├── .github └── FUNDING.yml ├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.rst ├── SECURITY.md ├── s3cache └── __init__.py ├── setup.py ├── sitecustomize.py └── tests └── tests.py /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | tidelift: "pypi/django-s3-cache" 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.pyo 3 | tests/__pycache__/ 4 | .coverage 5 | *.egg-info/ 6 | *.json 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | addons: 2 | apt: 3 | packages: 4 | - bc 5 | after_success: 6 | - coveralls 7 | before_install: 8 | - pip install coveralls mock 9 | - if [ -z "$_COMMAND" ]; then export _COMMAND=test; fi 10 | - if [ -z "$_DJANGO" ]; then export _DJANGO=2.1.4; export _DJANGO_STORAGES=1.7.1; export _BOTO=2.49.0; fi 11 | env: 12 | - _BOTO=2.49.0 _DJANGO=1.11.17 _DJANGO_STORAGES=1.7.1 13 | - _BOTO=2.49.0 _DJANGO=2.0.9 _DJANGO_STORAGES=1.7.1 14 | - _BOTO=2.49.0 _DJANGO=2.1.4 _DJANGO_STORAGES=1.7.1 15 | install: 16 | - pip install django-nose 17 | - pip install pylint 18 | - if [ -n "$_BOTO" ]; then pip install boto==$_BOTO; fi 19 | - if [ -n "$_BOTO3" ]; then pip install boto3==$_BOTO3; fi 20 | - if [ -n "$_DJANGO" ]; then pip install Django==$_DJANGO django-storages==$_DJANGO_STORAGES; fi 21 | language: python 22 | matrix: 23 | include: 24 | - env: _COMMAND=pylint 25 | python: 3.6 26 | notifications: 27 | email: 28 | on_failure: change 29 | on_success: change 30 | python: 31 | - 2.7 32 | - 3.6 33 | script: 34 | - make $_COMMAND 35 | sudo: false 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2012,2017,2019 Alexander Todorov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | PYTHONPATH=. nosetests -v --with-coverage --cover-branches --cover-erase \ 3 | --cover-package s3cache tests/ && coverage report -m 4 | 5 | mutation_test: 6 | PYTHONPATH=. cosmic-ray run --baseline=10 --test-runner=nose s3cache.json s3cache -- -v tests/*.py 7 | cosmic-ray report s3cache.json 8 | # allow 10% mutation test failures before reporting FAIL 9 | echo "$$(cosmic-ray survival-rate s3cache.json) > 10" | bc -l 10 | 11 | pylint: 12 | pylint -rn *.py s3cache/ tests/*.py 13 | 14 | build: test 15 | ./setup.py sdist 16 | 17 | upload: test 18 | ./setup.py sdist upload 19 | 20 | clean: 21 | ./setup.py clean 22 | rm -rf django_s3_cache.egg-info/ 23 | rm -f MANIFEST *.pyc s3cache/*.pyc 24 | 25 | distclean: clean 26 | rm -rf dist/ 27 | rm -rf tests/__pycache__/ 28 | 29 | help: 30 | @echo "Usage: make " 31 | @echo " " 32 | @echo " test - run the tests " 33 | @echo " pylint - run PyLint " 34 | @echo " build - build the package " 35 | @echo " upload - upload to PyPI " 36 | @echo " clean - remove all build files " 37 | @echo " distclean - remove all non git files " 38 | @echo " help - show this help and exit " 39 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Django S3 Cache 2 | --------------- 3 | 4 | .. image:: https://travis-ci.org/atodorov/django-s3-cache.svg?branch=master 5 | :target: https://travis-ci.org/atodorov/django-s3-cache 6 | :alt: Build status 7 | 8 | .. image:: https://coveralls.io/repos/github/atodorov/django-s3-cache/badge.svg?branch=master 9 | :target: https://coveralls.io/github/atodorov/django-s3-cache?branch=master 10 | :alt: Code coverage 11 | 12 | .. image:: https://api.codeclimate.com/v1/badges/634faffb0ab0a5c81355/maintainability 13 | :target: https://codeclimate.com/github/atodorov/django-s3-cache/maintainability 14 | :alt: Maintainability 15 | 16 | .. image:: https://tidelift.com/badges/package/pypi/django-s3-cache 17 | :target: https://tidelift.com/subscription/pkg/pypi-django-s3-cache?utm_source=pypi-django-s3-cache&utm_medium=github&utm_campaign=readme 18 | :alt: Tidelift 19 | 20 | This is Amazon Simple Storage Service (S3) cache backend for Django. 21 | It is based on the *django.core.cache.backends.filebased.FileBasedCache* backend 22 | and operates in similar fashion. This backend uses 23 | `django-storages `_ to read/write the 24 | data into S3. It uses the *s3boto* storage backend. 25 | 26 | All key/values passed to this backend are stored in a flat directory structure 27 | in your S3 bucket. It uses sha1 instead of md5 to create the file name. 28 | 29 | 30 | **This project is looking for maintainers!** 31 | 32 | 33 | Installation 34 | ============ 35 | 36 | Use pip to install from PyPI: 37 | 38 | :: 39 | 40 | pip install django-s3-cache 41 | 42 | 43 | Configure the use of this backend: 44 | 45 | :: 46 | 47 | CACHES = { 48 | 'default': { 49 | 'BACKEND': 's3cache.AmazonS3Cache', 50 | 'OPTIONS': { 51 | 'ACCESS_KEY' : 'Your AWS access key', 52 | 'SECRET_KEY' : 'Your AWS secret access key', 53 | 'BUCKET_NAME': 'Your AWS storage bucket name', 54 | 'LOCATION' : 'directory_prefix', 55 | } 56 | } 57 | } 58 | 59 | Changelog 60 | ========= 61 | 62 | * 1.4.3 (10 Nov 2019) 63 | 64 | * switch license from BSD-3-Clause to MIT 65 | * Switched to Travis-CI 66 | * Started testing with pylint 67 | * Internal code refactoring and more testing 68 | 69 | Configuration 70 | ============= 71 | 72 | Django S3 Cache supports many configuration options. They should be defined as 73 | keys of the *OPTIONS* dictionary in *settings.py* as shown above. If something 74 | is not defined explicitly it follows the defaults of *s3boto* backend from 75 | *django-storages* which in turn reads them from *settings.py*. 76 | 77 | **NOTE-1:** some values in *settings.py* may be used globally by *boto* and other AWS aware 78 | Django components since they follow the format *AWS_XXXX*. It's always best to define your 79 | values as cache options explicitly if you don't want to run into problems. 80 | 81 | **NOTE-2:** since version 1.2 Django S3 Cache is compatible with django-storages v1.1.8 which 82 | has changed the names of configuration variables. All new variables are expected to be lower 83 | case and the AWS keys variables changed names. For exact names see the S3BotoStorage class 84 | definition in *s3boto.py*. Django S3 Cache implements backward compatibility with its previous 85 | OPTIONS syntax to allow for easier upgrades. Older names are mapped to new ones and all 86 | options are lower cased before passing to S3BotoStorage. The example above shows the new syntax. 87 | 88 | **NOTE-3:** before version 1.3 there is a **CRITICAL BUG** in the handling of the *LOCATION* 89 | option. If used cache objects will be stored under the defined directory, however culling 90 | and clearing the cache **was not** taking this into account. cache.clear() or cache._cull() 91 | will **delete the entire bucket**. This has been fixed in version 1.3! 92 | 93 | **NOTE-4:** in versions 1.2 to 1.3 there is a **BUG** in the backward compatibility handling 94 | of *OPTIONS*. If you have been using the new style syntax for *ACCESS_KEY*, *SECRET_KEY*, 95 | *BUCKET_NAME* it would be overriden and boto will crash due to missing authentication parameters. 96 | This has been fixed in version 1.4! 97 | 98 | Some notable options are: 99 | 100 | * *LOCATION* - the directory prefix under which to store cache files. Defaults to empty string, which means the root directory; 101 | * *DEFAULT_ACL* == *private* - default ACL for created objects. Unlike the *s3boto* storage backend we set this to *private*; 102 | * *BUCKET_ACL* == *DEFAULT_ACL* - ACL for the bucket if auto created. By default set to *private*. It's best to use separate bucket for cache files; 103 | * *REDUCED_REDUNDANCY* - set to *True* if you want to save a few cents on storage costs; 104 | * *IS_GZIPPED* - set to *True* to enable Gzip compression. Used together with *GZIP_CONTENT_TYPES*. See *django-storages* `documentation `_. 105 | 106 | 107 | Django S3 implements culling strategy similar to the stock filesystem backend. It will honor the following options: 108 | 109 | * *MAX_ENTRIES* - the maximum number of entries allowed in the cache before old values are deleted. If 0 culling is disabled. This argument defaults to 300; 110 | * *CULL_FREQUENCY* - the fraction of entries that are culled when *MAX_ENTRIES* is reached. The actual ratio is *1/CULL_FREQUENCY*, so set *CULL_FREQUENCY* to 2 to cull half of the entries when *MAX_ENTRIES* is reached; 111 | 112 | 113 | Contributing 114 | ============ 115 | 116 | Source code and issue tracker are at https://github.com/atodorov/django-s3-cache 117 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | | Version | Supported | 6 | | ------- | ------------------ | 7 | | [latest](https://pypi.org/project/django-s3-cache/) | :heavy_check_mark: | 8 | 9 | ## Reporting a Vulnerability 10 | 11 | In case you have found a security problem with *django-s3-cache* **DO NOT** report 12 | it into GitHub Issues. Instead go to 13 | [https://tidelift.com/security](https://tidelift.com/security) 14 | and follow the instructions there. 15 | 16 | At least one of the package maintainers ([@atodorov](http://github.com/atodorov)) 17 | is a lifter at Tidelift and will be notified when you report the security 18 | problem with them! 19 | -------------------------------------------------------------------------------- /s3cache/__init__.py: -------------------------------------------------------------------------------- 1 | "Amazon S3 cache backend for Django" 2 | 3 | # Copyright (c) 2012,2017 Alexander Todorov 4 | # 5 | # Taken directly from django.core.cache.backends.filebased.FileBasedCache 6 | # and adapted for S3. 7 | 8 | import time 9 | import hashlib 10 | 11 | try: 12 | import cPickle as pickle 13 | except ImportError: 14 | import pickle 15 | 16 | from storages.backends import s3boto 17 | from django.core.files.base import ContentFile 18 | from django.core.cache.backends.base import BaseCache 19 | 20 | def _key_to_file(key): 21 | """ 22 | All files go into a single flat directory because it's not easier 23 | to search/delete empty directories in _delete(). 24 | 25 | Plus Amazon S3 doesn't seem to have a problem with many files into one directory. 26 | 27 | NB: measuring sha1() with timeit shows it is a bit faster compared to md5() 28 | http://stackoverflow.com/questions/2241013/is-there-a-significant-overhead-by-using-different-versions-of-sha-hashing-hash 29 | 30 | UPDATE: this is wrong, md5() is still faster, see: 31 | http://atodorov.org/blog/2013/02/05/performance-test-md5-sha1-sha256-sha512/ 32 | """ 33 | return hashlib.sha1(key.encode('utf-8')).hexdigest() 34 | 35 | class AmazonS3Cache(BaseCache): 36 | """ 37 | Amazon S3 cache backend for Django 38 | """ 39 | def __init__(self, _location, params): 40 | """ 41 | location is not used but otherwise Django crashes. 42 | """ 43 | 44 | BaseCache.__init__(self, params) 45 | 46 | # Amazon and boto have a maximum limit of 1000 for get_all_keys(). See: 47 | # http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html 48 | # This implementation of the GET operation returns some or all (up to 1000) 49 | # of the objects in a bucket.... 50 | 51 | if self._max_entries > 1000: 52 | self._max_entries = 1000 53 | 54 | self._options = params.get('OPTIONS', {}) 55 | 56 | # backward compatible syntax for s3cache users before v1.2 for easy upgrades 57 | # in v1.2 we update to latest django-storages 1.1.8 which changes variable names 58 | # in non-backward compatible fashion 59 | if 'ACCESS_KEY' not in self._options.keys(): 60 | self._options['ACCESS_KEY'] = self._options.get('ACCESS_KEY_ID', None) 61 | if 'SECRET_KEY' not in self._options.keys(): 62 | self._options['SECRET_KEY'] = self._options.get('SECRET_ACCESS_KEY', None) 63 | if 'BUCKET_NAME' not in self._options.keys(): 64 | self._options['BUCKET_NAME'] = self._options.get('STORAGE_BUCKET_NAME', None) 65 | 66 | # we use S3 compatible varibale names while django-storages doesn't 67 | _bucket_name = self._options.get('BUCKET_NAME', None) 68 | _default_acl = self._options.get('DEFAULT_ACL', 'private') 69 | _bucket_acl = self._options.get('BUCKET_ACL', _default_acl) 70 | # in case it was not specified in OPTIONS default to 'private' 71 | self._options['BUCKET_ACL'] = _bucket_acl 72 | 73 | 74 | self._location = self._options.get('LOCATION', self._options.get('location', '')) 75 | # sanitize location by removing leading and traling slashes 76 | self._options['LOCATION'] = self._location.strip('/') 77 | 78 | # S3BotoStorage wants lower case names 79 | lowercase_options = [] 80 | for name, value in self._options.items(): 81 | if value: # skip None values 82 | lowercase_options.append((name.lower(), value)) 83 | # this avoids RuntimeError: dictionary changed size during iteration 84 | # with Python 3 if we assign to the dictionary directly 85 | for _n, _v in lowercase_options: 86 | self._options[_n] = _v 87 | 88 | self._storage = s3boto.S3BotoStorage( 89 | acl=_default_acl, 90 | bucket=_bucket_name, 91 | **self._options 92 | ) 93 | 94 | 95 | def add(self, key, value, timeout=None, version=None): 96 | if self.has_key(key, version=version): 97 | return False 98 | 99 | self.set(key, value, timeout, version=version) 100 | return True 101 | 102 | def get(self, key, default=None, version=None): 103 | key = self.make_key(key, version=version) 104 | self.validate_key(key) 105 | 106 | fname = _key_to_file(key) 107 | try: 108 | fobj = self._storage.open(fname, 'rb') 109 | try: 110 | if not self._is_expired(fobj, fname): 111 | return pickle.load(fobj) 112 | finally: 113 | fobj.close() 114 | except (IOError, OSError, EOFError, pickle.PickleError): 115 | pass 116 | return default 117 | 118 | def set(self, key, value, timeout=None, version=None): 119 | key = self.make_key(key, version=version) 120 | self.validate_key(key) 121 | 122 | fname = _key_to_file(key) 123 | 124 | self._cull() 125 | 126 | try: 127 | content = self._dump_object(value, timeout) 128 | self._storage.save(fname, ContentFile(content)) 129 | except (IOError, OSError, EOFError, pickle.PickleError): 130 | pass 131 | 132 | def _dump_object(self, value, timeout=None): 133 | if timeout is None: 134 | timeout = self.default_timeout 135 | 136 | content = pickle.dumps(time.time() + timeout, pickle.HIGHEST_PROTOCOL) 137 | content += pickle.dumps(value, pickle.HIGHEST_PROTOCOL) 138 | return content 139 | 140 | def delete(self, key, version=None): 141 | key = self.make_key(key, version=version) 142 | self.validate_key(key) 143 | try: 144 | self._delete(_key_to_file(key)) 145 | except (IOError, OSError): 146 | pass 147 | 148 | def _delete(self, fname): 149 | self._storage.delete(fname) 150 | 151 | def has_key(self, key, version=None): 152 | key = self.make_key(key, version=version) 153 | self.validate_key(key) 154 | fname = _key_to_file(key) 155 | try: 156 | fobj = self._storage.open(fname, 'rb') 157 | try: 158 | return not self._is_expired(fobj, fname) 159 | finally: 160 | fobj.close() 161 | except (IOError, OSError, EOFError, pickle.PickleError): 162 | return False 163 | 164 | def _is_expired(self, fobj, fname): 165 | """ 166 | Takes an open cache file and determines if it has expired, 167 | deletes the file if it is has passed its expiry time. 168 | """ 169 | exp = pickle.load(fobj) 170 | if exp < time.time(): 171 | self._delete(fname) 172 | return True 173 | 174 | return False 175 | 176 | def _cull(self, frequency=None): 177 | if frequency is None: 178 | frequency = self._cull_frequency 179 | 180 | if not self._max_entries: 181 | return 182 | 183 | if int(self._num_entries) < self._max_entries: 184 | return 185 | 186 | try: 187 | keylist = self._storage.bucket.get_all_keys(prefix=self._location) 188 | except (IOError, OSError): 189 | return 190 | 191 | if not frequency: 192 | doomed = keylist 193 | else: 194 | doomed = [k for (i, k) in enumerate(keylist) if i % frequency == 0] 195 | 196 | try: 197 | self._storage.bucket.delete_keys(doomed, quiet=True) 198 | except (IOError, OSError): 199 | pass 200 | 201 | 202 | def _get_num_entries(self): 203 | """ 204 | There seems to be an artificial limit of 1000 205 | """ 206 | return len(self._storage.bucket.get_all_keys(prefix=self._location)) 207 | _num_entries = property(_get_num_entries) 208 | 209 | def clear(self): 210 | # delete all keys 211 | self._cull(0) 212 | 213 | # For backwards compatibility 214 | class CacheClass(AmazonS3Cache): 215 | """ 216 | Backward compatibility class definition 217 | """ 218 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # pylint: disable=missing-docstring,invalid-name 3 | 4 | from __future__ import print_function 5 | 6 | import sys 7 | from setuptools import setup, find_packages 8 | 9 | with open('README.rst') as file: 10 | long_description = file.read() 11 | 12 | config = { 13 | 'name' : 'django-s3-cache', 14 | 'version' : '1.4.3', 15 | 'packages' : find_packages(), 16 | 'author' : 'Alexander Todorov', 17 | 'author_email' : 'atodorov@MrSenko.com', 18 | 'license' : 'MIT', 19 | 'description' : 'Amazon Simple Storage Service (S3) cache backend for Django', 20 | 'long_description' : long_description, 21 | 'url' : 'https://github.com/atodorov/django-s3-cache', 22 | 'keywords' : ['Amazon', 'S3', 'Django', 'cache'], 23 | 'classifiers' : [ 24 | 'Development Status :: 5 - Production/Stable', 25 | 'Environment :: Web Environment', 26 | 'Intended Audience :: Developers', 27 | 'License :: OSI Approved :: MIT License', 28 | 'Operating System :: OS Independent', 29 | 'Programming Language :: Python', 30 | 'Framework :: Django', 31 | ], 32 | 'zip_safe' : False, 33 | 'install_requires' : ['boto', 'django-storages>=1.1.8', 'Django'], 34 | } 35 | 36 | if (len(sys.argv) >= 2) and (sys.argv[1] == '--requires'): 37 | for req in config['install_requires']: 38 | print(req) 39 | else: 40 | setup(**config) 41 | -------------------------------------------------------------------------------- /sitecustomize.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is used to configure Django for testing. 3 | It is placed in the user's site-packages directory and 4 | is loaded before each python process! 5 | """ 6 | import os 7 | os.environ["DJANGO_SETTINGS_MODULE"] = "django.conf.global_settings" 8 | 9 | # pylint: disable=wrong-import-position 10 | import django 11 | from django.conf import global_settings 12 | 13 | global_settings.INSTALLED_APPS = ('django_nose',) 14 | global_settings.TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' 15 | global_settings.NOSE_ARGS = [ 16 | '-x', '-v', 17 | '--with-coverage', 18 | '--cover-erase', 19 | '--cover-branches', 20 | '--cover-package=s3cache', 21 | ] 22 | global_settings.MIDDLEWARE_CLASSES = () 23 | global_settings.SECRET_KEY = "not-very-secret" 24 | 25 | global_settings.DATABASES = { 26 | 'default': { 27 | 'ENGINE': 'django.db.backends.sqlite3', 28 | 'NAME': ':memory:' 29 | } 30 | } 31 | 32 | # http://django.readthedocs.org/en/latest/releases/1.7.html#standalone-scripts 33 | if django.VERSION >= (1, 7): 34 | django.setup() 35 | -------------------------------------------------------------------------------- /tests/tests.py: -------------------------------------------------------------------------------- 1 | # pylint: disable=missing-docstring,protected-access,invalid-name 2 | 3 | from io import BytesIO 4 | try: 5 | from unittest.mock import patch 6 | except ImportError: 7 | from mock import patch 8 | import time 9 | from django.test import TestCase 10 | 11 | from s3cache import AmazonS3Cache 12 | 13 | class S3CacheTestCase(TestCase): 14 | pass 15 | 16 | class CacheConfigurationTest(S3CacheTestCase): 17 | def test_old_style_options(self): 18 | """ 19 | Test django-storages < 1.1.8 options syntax, which 20 | needs to be translated to latest syntax 21 | """ 22 | cache = AmazonS3Cache( 23 | None, # location 24 | { 25 | 'BACKEND': 's3cache.AmazonS3Cache', 26 | 'OPTIONS' : { 27 | 'ACCESS_KEY_ID' : 'access_key_old', 28 | 'SECRET_ACCESS_KEY' : 'secret_key_old', 29 | 'STORAGE_BUCKET_NAME': 'bucket_old', 30 | } 31 | } 32 | ) 33 | 34 | self.assertEqual(cache._options['access_key'], 'access_key_old') 35 | self.assertEqual(cache._options['secret_key'], 'secret_key_old') 36 | self.assertEqual(cache._options['bucket_name'], 'bucket_old') 37 | 38 | def test_new_style_options(self): 39 | """ 40 | Test django-storages >= 1.1.8 options syntax 41 | """ 42 | cache = AmazonS3Cache( 43 | None, # location 44 | { 45 | 'BACKEND': 's3cache.AmazonS3Cache', 46 | 'OPTIONS' : { 47 | 'ACCESS_KEY' : 'access_key_new', 48 | 'SECRET_KEY' : 'secret_key_new', 49 | 'BUCKET_NAME': 'bucket_new', 50 | } 51 | } 52 | ) 53 | 54 | self.assertEqual(cache._options['access_key'], 'access_key_new') 55 | self.assertEqual(cache._options['secret_key'], 'secret_key_new') 56 | self.assertEqual(cache._options['bucket_name'], 'bucket_new') 57 | 58 | def test_mixed_style_options(self): 59 | """ 60 | Test MIXED options syntax (upgrade leftovers) 61 | """ 62 | cache = AmazonS3Cache( 63 | None, # location 64 | { 65 | 'BACKEND': 's3cache.AmazonS3Cache', 66 | 'OPTIONS' : { 67 | 'ACCESS_KEY_ID' : 'access_key_mix', # old 68 | 'SECRET_KEY' : 'secret_key_mix', 69 | 'STORAGE_BUCKET_NAME': 'bucket_mix', # old 70 | } 71 | } 72 | ) 73 | 74 | self.assertEqual(cache._options['access_key'], 'access_key_mix') 75 | self.assertEqual(cache._options['secret_key'], 'secret_key_mix') 76 | self.assertEqual(cache._options['bucket_name'], 'bucket_mix') 77 | 78 | def test_lowercase_new_style_options(self): 79 | """ 80 | Test django-storages >= 1.1.8 options syntax in lower case. 81 | django-s3-cache v1.3 README was showing lowercase names for the options 82 | but those were overriden by the backward compatibility code and 83 | set to None. The issue comes from s3cache automatically converting 84 | everything to lower case before passing it down to django-storages 85 | which uses only lower case names for its class attributes. 86 | 87 | Documentation was updated in 3aaa0f254a2e0e389961b2112a00d3edf3e1ee90 88 | 89 | Real usage of lower case option names: 90 | https://github.com/atodorov/django-s3-cache/issues/2#issuecomment-65423398 91 | """ 92 | cache = AmazonS3Cache( 93 | None, # location 94 | { 95 | 'BACKEND': 's3cache.AmazonS3Cache', 96 | 'OPTIONS' : { 97 | 'access_key' : 'access_key_low', 98 | 'secret_key' : 'secret_key_low', 99 | 'bucket_name': 'bucket_low', 100 | } 101 | } 102 | ) 103 | 104 | self.assertEqual(cache._options['access_key'], 'access_key_low') 105 | self.assertEqual(cache._options['secret_key'], 'secret_key_low') 106 | self.assertEqual(cache._options['bucket_name'], 'bucket_low') 107 | 108 | # pylint: disable=no-member,too-many-public-methods 109 | class FunctionalTests(TestCase): 110 | def _dump_object(self, value, timeout=None): 111 | io_obj = BytesIO() 112 | io_obj.write(self.cache._dump_object(value, timeout)) 113 | io_obj.seek(0) 114 | return io_obj 115 | 116 | def setUp(self): 117 | self.cache = AmazonS3Cache(None, {}) 118 | 119 | def test_is_expired_with_expired_object(self): 120 | obj = self._dump_object('TEST', -1) 121 | with patch.object(AmazonS3Cache, '_delete'): 122 | self.assertTrue(self.cache._is_expired(obj, 'dummy file name')) 123 | 124 | def test_is_expired_with_valid_object(self): 125 | obj = self._dump_object('TEST', +10) 126 | with patch.object(AmazonS3Cache, '_delete'): 127 | self.assertFalse(self.cache._is_expired(obj, 'dummy file name')) 128 | 129 | def test_max_entries_great_than_1000(self): 130 | cache = AmazonS3Cache(None, {'OPTIONS': {'MAX_ENTRIES': 1001}}) 131 | self.assertEqual(cache._max_entries, 1000) 132 | 133 | def test_max_entries_less_than_1000(self): 134 | cache = AmazonS3Cache(None, {'OPTIONS': {'MAX_ENTRIES': 200}}) 135 | self.assertEqual(cache._max_entries, 200) 136 | 137 | def test_has_key_with_valid_key_and_non_expired_object(self): 138 | obj = self._dump_object('TEST', +10) 139 | with patch.object(self.cache._storage, 'open', return_value=obj): 140 | self.assertTrue(self.cache.has_key('my-key')) 141 | 142 | def test_has_key_with_valid_key_and_expired_object(self): 143 | obj = self._dump_object('TEST', -1) 144 | with patch.object(self.cache._storage, 'open', return_value=obj), \ 145 | patch.object(AmazonS3Cache, '_delete'): 146 | self.assertFalse(self.cache.has_key('my-key')) 147 | 148 | def test_has_key_with_invalid_key(self): 149 | with patch.object(self.cache._storage, 'open', side_effect=IOError): 150 | self.assertFalse(self.cache.has_key('my-key')) 151 | 152 | def test_add_with_existing_key(self): 153 | with patch.object(self.cache, 'has_key', return_value=True): 154 | self.assertFalse(self.cache.add('my-key', 'TEST')) 155 | 156 | def test_add_with_non_existing_key(self): 157 | with patch.object(self.cache, 'has_key', return_value=False), \ 158 | patch.object(self.cache, '_cull') as cull_mock, \ 159 | patch.object(self.cache._storage, 'save') as save_mock: 160 | self.assertTrue(self.cache.add('my-key', 'TEST')) 161 | self.assertEqual(cull_mock.call_count, 1) 162 | self.assertEqual(save_mock.call_count, 1) 163 | 164 | def test_set_storage_raises_exception(self): 165 | with patch.object(self.cache._storage, 'save', side_effect=IOError), \ 166 | patch.object(self.cache, '_cull'): 167 | # doesn't raise an exception 168 | self.cache.set('my-key', 'TEST') 169 | 170 | def test_get_valid_key_non_expired_object(self): 171 | obj = self._dump_object('TEST', +10) 172 | with patch.object(self.cache._storage, 'open', return_value=obj): 173 | self.assertEqual(self.cache.get('my-key'), 'TEST') 174 | 175 | def test_get_valid_key_expired_object(self): 176 | obj = self._dump_object('TEST', -1) 177 | with patch.object(self.cache._storage, 'open', return_value=obj), \ 178 | patch.object(AmazonS3Cache, '_delete'): 179 | self.assertEqual(self.cache.get('my-key'), None) 180 | 181 | def test_get_after_waiting_the_object_to_expire(self): 182 | obj = self._dump_object('TEST', 2) 183 | # wait for object expiration 184 | # when mutation testing is used 185 | # time.time() * timeout instead of time.time() + timeout mutation 186 | # will set the expiration date in the future 187 | time.sleep(3) 188 | with patch.object(self.cache._storage, 'open', return_value=obj), \ 189 | patch.object(AmazonS3Cache, '_delete'): 190 | self.assertEqual(self.cache.get('my-key'), None) 191 | 192 | def test_get_storage_raises_exception(self): 193 | with patch.object(self.cache._storage, 'open', side_effect=IOError): 194 | self.assertEqual(self.cache.get('my-key'), None) 195 | 196 | def test_delete(self): 197 | with patch.object(self.cache._storage, 'delete') as delete_mock: 198 | self.cache.delete('my-key') 199 | self.assertEqual(delete_mock.call_count, 1) 200 | 201 | def test_delete_storage_raises_exception(self): 202 | with patch.object(self.cache._storage, 'delete', side_effect=IOError): 203 | # doesn't raise an exception 204 | self.cache.delete('my-key') 205 | 206 | def test_clear(self): 207 | cache = AmazonS3Cache(None, {}) 208 | cache._max_entries = 10 209 | cache._cull_frequency = 3 210 | key_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] 211 | with patch.object(cache._storage, '_bucket') as _bucket: 212 | _bucket.configure_mock(**{ 213 | 'get_all_keys.return_value': key_list 214 | }) 215 | cache.clear() 216 | self.assertEqual(_bucket.get_all_keys.call_count, 2) 217 | self.assertEqual(_bucket.delete_keys.call_count, 1) 218 | # all keys were deleted 219 | _bucket.delete_keys.assert_called_with(key_list, quiet=True) 220 | 221 | 222 | def test_clear_bucket_raises_exception(self): 223 | with patch.object(self.cache._storage, '_bucket') as _bucket: 224 | _bucket.configure_mock(**{ 225 | 'get_all_keys.return_value': [1, 2, 3], 226 | 'delete_keys.side_effect': OSError 227 | }) 228 | self.cache.clear() 229 | 230 | def test_num_entries(self): 231 | with patch.object(self.cache._storage, '_bucket') as _bucket: 232 | _bucket.configure_mock(**{ 233 | 'get_all_keys.return_value': [1, 2, 3] 234 | }) 235 | self.assertEqual(self.cache._num_entries, 3) 236 | 237 | def test_cull_without_max_entries(self): 238 | cache = AmazonS3Cache(None, {}) 239 | cache._max_entries = 0 240 | with patch.object(cache._storage, '_bucket') as _bucket: 241 | cache._cull() 242 | self.assertEqual(_bucket.get_all_keys.call_count, 0) 243 | 244 | def test_cull_with_num_entries_less_than_max_entries(self): 245 | with patch.object(self.cache._storage, '_bucket') as _bucket: 246 | _bucket.configure_mock(**{ 247 | 'get_all_keys.return_value': [1, 2, 3] 248 | }) 249 | self.cache._cull() 250 | self.assertEqual(_bucket.get_all_keys.call_count, 1) 251 | self.assertEqual(_bucket.delete_keys.call_count, 0) 252 | 253 | def test_cull_with_num_entries_great_than_max_entries_cull_frequency_0(self): 254 | cache = AmazonS3Cache(None, {}) 255 | cache._max_entries = 5 256 | cache._cull_frequency = 0 257 | key_list = [1, 2, 3, 5, 6, 7, 8, 9, 0] 258 | with patch.object(cache._storage, '_bucket') as _bucket: 259 | _bucket.configure_mock(**{ 260 | 'get_all_keys.return_value': key_list 261 | }) 262 | cache._cull() 263 | self.assertEqual(_bucket.get_all_keys.call_count, 2) 264 | self.assertEqual(_bucket.delete_keys.call_count, 1) 265 | # when cull_frequency == 0 it means to delete all keys 266 | _bucket.delete_keys.assert_called_with(key_list, quiet=True) 267 | 268 | def test_cull_with_num_entries_great_than_max_entries_cull_frequency_3(self): 269 | cache = AmazonS3Cache(None, {}) 270 | cache._max_entries = 5 271 | cache._cull_frequency = 3 272 | key_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0] 273 | delete_list = [1, 4, 7, 0] 274 | with patch.object(cache._storage, '_bucket') as _bucket: 275 | _bucket.configure_mock(**{ 276 | 'get_all_keys.return_value': key_list 277 | }) 278 | cache._cull() 279 | self.assertEqual(_bucket.get_all_keys.call_count, 2) 280 | self.assertEqual(_bucket.delete_keys.call_count, 1) 281 | # when cull_frequency != 0 it means to delete every Nth key 282 | _bucket.delete_keys.assert_called_with(delete_list, quiet=True) 283 | 284 | def test_cull_bucket_get_all_keys_raises_exception(self): 285 | class MockAmazonS3Cache(AmazonS3Cache): 286 | def _get_num_entries(self): 287 | return 10 288 | _num_entries = property(_get_num_entries) 289 | 290 | cache = MockAmazonS3Cache(None, {}) 291 | cache._max_entries = 5 292 | 293 | with patch.object(cache._storage, '_bucket') as _bucket: 294 | _bucket.configure_mock(**{ 295 | 'get_all_keys.side_effect': OSError 296 | }) 297 | cache._cull() 298 | self.assertEqual(_bucket.get_all_keys.call_count, 1) 299 | self.assertEqual(_bucket.delete_keys.call_count, 0) 300 | 301 | def test_cull_bucket_delete_keys_raises_exception(self): 302 | cache = AmazonS3Cache(None, {}) 303 | cache._max_entries = 5 304 | key_list = [1, 2, 3, 5, 6, 7, 8, 9, 0] 305 | with patch.object(cache._storage, '_bucket') as _bucket: 306 | _bucket.configure_mock(**{ 307 | 'get_all_keys.return_value': key_list, 308 | 'delete_keys.side_effect': OSError 309 | }) 310 | cache._cull() 311 | self.assertEqual(_bucket.get_all_keys.call_count, 2) 312 | self.assertEqual(_bucket.delete_keys.call_count, 1) 313 | --------------------------------------------------------------------------------