├── tests ├── conf │ ├── seafile.conf │ └── storage │ │ ├── blocks │ │ └── 3f9e4aa5-d6ba-4066-a1d6-81824f422af1 │ │ │ ├── 12 │ │ │ └── 5f1e9dc9f3eca5a6819f9b4a2e17e53d7e2f78 │ │ │ ├── 15 │ │ │ └── 69cf662c7befe4c4891a22cc7a1c035bc8bfac │ │ │ ├── 29 │ │ │ └── 49afb5a9c351b9415b91c8f3d0d98991118c11 │ │ │ ├── 96 │ │ │ └── 9e42baa28e87991c2c2f99a10cc597f3c2cc9d │ │ │ ├── 5e │ │ │ └── 7996b6befd9ace3c1b4003d6db16f4150ff394 │ │ │ ├── c0 │ │ │ └── f6052bb9dca21ba6bc2f942bdc919285d50976 │ │ │ ├── 6c │ │ │ └── 686d18269fa3655998f3c256108ed074076398 │ │ │ ├── 0a │ │ │ └── e2c82d9ebd7e166ca0e24d52bba31fd64e3c8d │ │ │ ├── b3 │ │ │ └── fb3f1d7d7d3c7bad7837df7f53a2d8bd8472bd │ │ │ └── b7 │ │ │ └── 3b3cf6dc021d20c7a0e9bedf46a5b6a58bdd53 │ │ ├── fs │ │ └── 3f9e4aa5-d6ba-4066-a1d6-81824f422af1 │ │ │ ├── 13 │ │ │ └── a88b850fff0fc71a3773f910b460acf74a5a48 │ │ │ ├── 16 │ │ │ ├── 580b8b29769e8c0ffca0cca3ab3b437a789862 │ │ │ └── f1fbdecdee83a17c3a608fef2c853be21c33e7 │ │ │ ├── 18 │ │ │ └── 7c61bff4dfe27c011817e9ab00fb5c89223d8d │ │ │ ├── 24 │ │ │ ├── 75bcb9dbe92ad9728d07a96c02d00a374c55dd │ │ │ └── ce2befc450e4d60a7ea792bcfcf5d50393b712 │ │ │ ├── 33 │ │ │ └── eea09c1da3c3ad8de6313df62e5961574f786d │ │ │ ├── 37 │ │ │ └── 4cfe4e29d83317d52ef9709c859e9bb65e5b0e │ │ │ ├── 43 │ │ │ └── 001bd2de8f63b3adb42e19cebd4168a39adac7 │ │ │ ├── 51 │ │ │ └── 346ce17df5ed7c3b4492405086b5bea03c5247 │ │ │ ├── 53 │ │ │ └── 40f65b17b688040b287ac55b18709ee66d28d4 │ │ │ ├── 67 │ │ │ └── 619f6d87f2f232bb0a821649f9fd1358eaa58c │ │ │ ├── 72 │ │ │ └── 7b85252990036bbab6e0d0d03aa78edb5369f2 │ │ │ ├── 83 │ │ │ └── 0181201ee00133d13736a332b0e476cc4cac01 │ │ │ ├── 88 │ │ │ ├── 6d71ca196eab9a337ff25407fbb87e1a7b0a96 │ │ │ └── a2c6aa5217c9d47969b6c9aeff0620c9c67be7 │ │ │ ├── 97 │ │ │ └── 71cd218f1002e59c6f0dc6ee2dc57dc9dde698 │ │ │ ├── 00 │ │ │ ├── 2375292edc2489e119e875d919db785001d0d4 │ │ │ └── 6fb385c6bc952077b2488829895a4e78555e74 │ │ │ ├── 04 │ │ │ └── 5dfc08495b5c6cbc1a4dc347f5e2987fd809f4 │ │ │ ├── 05 │ │ │ └── a6f0455d1f11ecfc202f5e218274b092fd3dbc │ │ │ ├── 07 │ │ │ └── b17efa3cc443c9ee121aff523122c2c64071c5 │ │ │ ├── 0e │ │ │ └── 01eb14c626bc33670ae5df2ebe8ed22bedcf3e │ │ │ ├── 1c │ │ │ ├── 63b08bea7ccb2adfb7752cf1adc6af89f4ed91 │ │ │ ├── 97ac7765d75136e876a8618f4ad35c635936c7 │ │ │ └── b9aee9557fcb76465dcbbe1fdb078f16e96d26 │ │ │ ├── 3a │ │ │ └── d0a00df678b537c928513c05890c1f3bc50266 │ │ │ ├── 3c │ │ │ ├── 1000329fcb5a51a9d1860d9b86ce7f4c7ceb5f │ │ │ └── 8ab7d86e67138b1aa48824866986522c011391 │ │ │ ├── 3e │ │ │ ├── ea84c126b9a71c4dc817cec2e524501e96c0f7 │ │ │ └── f00cc2e396b7492eaee6e7d0fc6e23279569c5 │ │ │ ├── 6e │ │ │ └── bef09e10dcfc0b7e8440cc58bb61ef8a02c805 │ │ │ ├── 9a │ │ │ └── e6c35f473c582d032d0e28460e670a52bfeda1 │ │ │ ├── 9e │ │ │ └── 768ed89b40dbadffdc73194575fa1851004171 │ │ │ ├── a4 │ │ │ └── 0bdbb2bad9ea3de9c5848d5c7d059fbf69408b │ │ │ ├── a6 │ │ │ └── 8ded33eb116a785f93f03cc5e395ba8f9252ef │ │ │ ├── ad │ │ │ └── 535adf8feb3a28110605da7f880245802d0736 │ │ │ ├── b2 │ │ │ └── 7235c5278ccba69b392fa1734138bf4100693c │ │ │ ├── b6 │ │ │ └── 55d8292dde39841d226d3e614d8896bff8ab8f │ │ │ ├── b7 │ │ │ └── c5c9e9684a7907916878832fceef1252d47b1d │ │ │ ├── ba │ │ │ └── 84b0b6e3e58d558933cb504bd7c0fee777ebbc │ │ │ ├── be │ │ │ └── 4f5a12b2d5a9e853ed2aa34a71c5eefdc94522 │ │ │ ├── cc │ │ │ ├── 2aa776bd97661ce82523ba779718332dced917 │ │ │ └── a3625387a7bcae54d06c7e2211e3ca09548d77 │ │ │ ├── cd │ │ │ └── 9e02d554601b5e5a6fce90507e750b7a3f4639 │ │ │ ├── ce │ │ │ └── a847dc95a500689006ac655e9188fab65d15c4 │ │ │ ├── d9 │ │ │ └── d1cd4f7281f194a226ebe1727de5351b0f6238 │ │ │ ├── da │ │ │ └── 6dc358f3acb1ece3757b3e392acd51846d70e7 │ │ │ ├── df │ │ │ └── c1a4d7c66f2d76739140132b437195cd5d1d1e │ │ │ ├── e0 │ │ │ └── 3ba3543f3e96e20ffa2bb086f7a79194311724 │ │ │ ├── ea │ │ │ └── cb722df281748facbae4d6ca9ade44f20fee1b │ │ │ ├── f1 │ │ │ └── 85a9b682960f1de13d135c0c63f40765236f2a │ │ │ ├── f4 │ │ │ ├── 7bf2d9da74c4cf77fc107f23391b397671b528 │ │ │ └── d6632f562881e1b847d592ec924d8e6010ceda │ │ │ ├── f8 │ │ │ ├── 452a24cf7557319fb8cc36a42edbf4f69a0c65 │ │ │ └── d2d17acad4d83188bee1c9863827b82c593e1f │ │ │ └── fc │ │ │ └── 9d6f213586a57ba2c7fab2ceabdb6ff5ddf846 │ │ └── commits │ │ └── 3f9e4aa5-d6ba-4066-a1d6-81824f422af1 │ │ ├── 13 │ │ └── fbc1e153107678f501e1c807b5797269cbe601 │ │ ├── 15 │ │ └── c11b3e7643a9164cf4daf39b790421d1e934b3 │ │ ├── 17 │ │ └── abc6246d4b213dc8e7c92e94b321af7708670a │ │ ├── 23 │ │ └── f58ab01d4c6d79b8b2ea011ef1614e4097b935 │ │ ├── 25 │ │ ├── 2795fc32563b965a28f72e991af55ab8318f3e │ │ └── 8fbe2382a9887ba9815aad8757afd4b5ce8d3d │ │ ├── 35 │ │ └── b0b82a81825c0200fc508c7d1c3d02c30dc1fe │ │ ├── 44 │ │ └── 05b7234b1e9dd74fe7c4f6a844ce79198e0e19 │ │ ├── 76 │ │ └── b617eddf61c9e8768bf8e93a0c473c971e9790 │ │ ├── 78 │ │ └── 78ea888e60937b73e039461c456364f1aa138f │ │ ├── 92 │ │ └── 8fc8c308d441b224b9238d47938cc6ac944950 │ │ ├── 94 │ │ └── 84c79dc0f6f50dcb3116ecfa56c145bf7d113d │ │ ├── 5e │ │ └── 3f290cbd811849f5697b10bd67502605f5f4c2 │ │ ├── 7a │ │ └── 51f20b9efd471b8fe74ad66214e652594f9b92 │ │ ├── 3a │ │ └── 0a8e3a9a2b1bc834dc1cc4b372a6401d1e8e10 │ │ ├── 3d │ │ └── 4659a6cbe13d0c2111455f590fa1537a6ec1e4 │ │ ├── 9e │ │ └── 4705d102d86756eb8ed9d8d16922ee3212c7c5 │ │ ├── 07 │ │ └── 418435d577e814a94a42a35eeb0f6f93cf578d │ │ ├── 5a │ │ └── 4b25345346cb19bad61d801061c6795d55aa4f │ │ ├── 5b │ │ └── 56f5470f6a55492585714c78b8e461875284d6 │ │ ├── b9 │ │ └── dbd0c72d8a0ccfd9110d638397c52d1540c114 │ │ ├── bb │ │ └── f5382d4c63f58fc647b416e6c059f948e13277 │ │ ├── d4 │ │ └── 797f387be27a9f3bf210441dd02fd715dbd454 │ │ ├── eb │ │ └── d1fdb77760f23d2ab29e1d2e121484260adac9 │ │ ├── 03 │ │ └── d5a857ccc2764e69a124cd8f3360aea1d5595f │ │ ├── 0a │ │ └── 3d3867a4ae7470485f2d591b9981ff885f59e3 │ │ ├── c3 │ │ └── 9a521ab58e216103a8c668dda697a51f870426 │ │ ├── c4 │ │ └── c0925d53b61d173ddeeac9b45c34aa0e19608f │ │ ├── d1 │ │ └── b33fde55b05171aad287bdb87a43f2252a41d6 │ │ ├── dc │ │ └── 8cc4557a310f668bb81c3f44d90b1ff6270f38 │ │ ├── 7e │ │ └── d2a485f96121f5d336363e8d61d9ef2911beeb │ │ ├── e3 │ │ ├── eea2da33dedda26690739c1ca7b7da2022fa40 │ │ └── e0c6f57baa1a122af75bb395719d9725169982 │ │ ├── ff │ │ └── c32568c059e9532cb426f19f8138c624c5cdd4 │ │ └── a2 │ │ └── 4bf4385e5df18922337390e757c4b7789d853d ├── test_objwrapper │ ├── utils.py │ └── test_objwrapper.py ├── test_blocks │ └── test_blocks.py ├── test_commits │ └── test_commits.py └── test_fs │ └── test_fs.py ├── seafobj ├── backends │ ├── __init__.py │ ├── alioss.py │ ├── s3.py │ ├── base.py │ ├── filesystem.py │ ├── ceph.py │ └── swift.py ├── utils │ ├── __init__.py │ ├── ceph_utils.py │ └── crypto.py ├── __init__.py ├── redis_cache.py ├── exceptions.py ├── mc.py ├── blocks.py ├── commits.py ├── db.py ├── commit_differ.py ├── fs.py └── objstore_factory.py ├── requirements.txt ├── test-requirements.txt ├── .pylint ├── .gitignore ├── ci ├── requirements.txt ├── install-deps.sh ├── run.py └── utils.py ├── .functests ├── run_test.sh ├── objwrapper ├── exceptions.py ├── alioss.py └── s3.py ├── Makefile ├── do-pycscope.sh ├── README.md ├── .unittests ├── .github └── workflows │ └── ci.yml ├── copy_data.py ├── pylintrc └── LICENSE.txt /tests/conf/seafile.conf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seafobj/backends/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seafobj/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.28.12 2 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | coverage 2 | nose 3 | mock>=0.8.0 4 | -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/5e/7996b6befd9ace3c1b4003d6db16f4150ff394: -------------------------------------------------------------------------------- 1 | thisi s deleted folder. -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/c0/f6052bb9dca21ba6bc2f942bdc919285d50976: -------------------------------------------------------------------------------- 1 | this is added folder. -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/6c/686d18269fa3655998f3c256108ed074076398: -------------------------------------------------------------------------------- 1 | this is moved folder. 2 | -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/29/49afb5a9c351b9415b91c8f3d0d98991118c11: -------------------------------------------------------------------------------- 1 | this is new file. 2 | 3 | 4 | -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/15/69cf662c7befe4c4891a22cc7a1c035bc8bfac: -------------------------------------------------------------------------------- 1 | this is moved file. 2 | 3 | kldqoejvys -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/0a/e2c82d9ebd7e166ca0e24d52bba31fd64e3c8d: -------------------------------------------------------------------------------- 1 | this is deleted file. 2 | 3 | wntifyazgg -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/96/9e42baa28e87991c2c2f99a10cc597f3c2cc9d: -------------------------------------------------------------------------------- 1 | this is modified file. 2 | 3 | ayexqkushe. -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b3/fb3f1d7d7d3c7bad7837df7f53a2d8bd8472bd: -------------------------------------------------------------------------------- 1 | this is renamed folder. 2 | 3 | aofsnmayhu -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b7/3b3cf6dc021d20c7a0e9bedf46a5b6a58bdd53: -------------------------------------------------------------------------------- 1 | this is renamed file. 2 | 3 | crmqzuhsed. -------------------------------------------------------------------------------- /seafobj/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .commits import commit_mgr 3 | from .fs import fs_mgr 4 | from .blocks import block_mgr 5 | from .commit_differ import CommitDiffer 6 | -------------------------------------------------------------------------------- /tests/conf/storage/blocks/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/12/5f1e9dc9f3eca5a6819f9b4a2e17e53d7e2f78: -------------------------------------------------------------------------------- 1 | this is modified file. 2 | 3 | ayexqkushe. 4 | 5 | 6 | modified. -------------------------------------------------------------------------------- /.pylint: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOP_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") 4 | 5 | cd $TOP_DIR 6 | pylint --rcfile=pylintrc seafobj test 7 | cd - 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *# 3 | *.pyc 4 | .DS_Store 5 | *.log 6 | cscope.* 7 | *.gz 8 | run.sh 9 | build 10 | dist 11 | cover 12 | .coverage 13 | .#* 14 | pkg.ini 15 | .emacs* 16 | /.idea 17 | -------------------------------------------------------------------------------- /ci/requirements.txt: -------------------------------------------------------------------------------- 1 | termcolor>=2.3.0 2 | requests>=2.31.0 3 | pytest>=7.4.0 4 | pytest-instafail 5 | boto3==1.28.12 6 | oss2==2.18.4 7 | sqlalchemy==2.0.18 8 | pylibmc==1.6.3 9 | redis==5.0.8 10 | lxml==5.3.1 11 | -------------------------------------------------------------------------------- /.functests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") 4 | 5 | cd ${SRC_DIR}/test/functional 6 | nosetests $@ 7 | rvalue=$? 8 | cd - 9 | 10 | exit $rvalue 11 | -------------------------------------------------------------------------------- /ci/install-deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -x 4 | 5 | SCRIPT=${BASH_SOURCE[0]} 6 | TESTS_DIR=$(dirname "${SCRIPT}")/.. 7 | SETUP_DIR=${TESTS_DIR}/ci 8 | 9 | cd $SETUP_DIR 10 | 11 | pip install -r requirements.txt 12 | -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/00/2375292edc2489e119e875d919db785001d0d4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/00/2375292edc2489e119e875d919db785001d0d4 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/00/6fb385c6bc952077b2488829895a4e78555e74: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/00/6fb385c6bc952077b2488829895a4e78555e74 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/04/5dfc08495b5c6cbc1a4dc347f5e2987fd809f4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/04/5dfc08495b5c6cbc1a4dc347f5e2987fd809f4 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/05/a6f0455d1f11ecfc202f5e218274b092fd3dbc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/05/a6f0455d1f11ecfc202f5e218274b092fd3dbc -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/07/b17efa3cc443c9ee121aff523122c2c64071c5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/07/b17efa3cc443c9ee121aff523122c2c64071c5 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/0e/01eb14c626bc33670ae5df2ebe8ed22bedcf3e: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/0e/01eb14c626bc33670ae5df2ebe8ed22bedcf3e -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/13/a88b850fff0fc71a3773f910b460acf74a5a48: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/13/a88b850fff0fc71a3773f910b460acf74a5a48 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/16/580b8b29769e8c0ffca0cca3ab3b437a789862: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/16/580b8b29769e8c0ffca0cca3ab3b437a789862 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/16/f1fbdecdee83a17c3a608fef2c853be21c33e7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/16/f1fbdecdee83a17c3a608fef2c853be21c33e7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/18/7c61bff4dfe27c011817e9ab00fb5c89223d8d: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/18/7c61bff4dfe27c011817e9ab00fb5c89223d8d -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/63b08bea7ccb2adfb7752cf1adc6af89f4ed91: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/63b08bea7ccb2adfb7752cf1adc6af89f4ed91 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/97ac7765d75136e876a8618f4ad35c635936c7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/97ac7765d75136e876a8618f4ad35c635936c7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/b9aee9557fcb76465dcbbe1fdb078f16e96d26: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/1c/b9aee9557fcb76465dcbbe1fdb078f16e96d26 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/24/75bcb9dbe92ad9728d07a96c02d00a374c55dd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/24/75bcb9dbe92ad9728d07a96c02d00a374c55dd -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/24/ce2befc450e4d60a7ea792bcfcf5d50393b712: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/24/ce2befc450e4d60a7ea792bcfcf5d50393b712 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/33/eea09c1da3c3ad8de6313df62e5961574f786d: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/33/eea09c1da3c3ad8de6313df62e5961574f786d -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/37/4cfe4e29d83317d52ef9709c859e9bb65e5b0e: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/37/4cfe4e29d83317d52ef9709c859e9bb65e5b0e -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3a/d0a00df678b537c928513c05890c1f3bc50266: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3a/d0a00df678b537c928513c05890c1f3bc50266 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3c/1000329fcb5a51a9d1860d9b86ce7f4c7ceb5f: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3c/1000329fcb5a51a9d1860d9b86ce7f4c7ceb5f -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3c/8ab7d86e67138b1aa48824866986522c011391: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3c/8ab7d86e67138b1aa48824866986522c011391 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3e/ea84c126b9a71c4dc817cec2e524501e96c0f7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3e/ea84c126b9a71c4dc817cec2e524501e96c0f7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3e/f00cc2e396b7492eaee6e7d0fc6e23279569c5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3e/f00cc2e396b7492eaee6e7d0fc6e23279569c5 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/43/001bd2de8f63b3adb42e19cebd4168a39adac7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/43/001bd2de8f63b3adb42e19cebd4168a39adac7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/51/346ce17df5ed7c3b4492405086b5bea03c5247: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/51/346ce17df5ed7c3b4492405086b5bea03c5247 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/53/40f65b17b688040b287ac55b18709ee66d28d4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/53/40f65b17b688040b287ac55b18709ee66d28d4 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/67/619f6d87f2f232bb0a821649f9fd1358eaa58c: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/67/619f6d87f2f232bb0a821649f9fd1358eaa58c -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/6e/bef09e10dcfc0b7e8440cc58bb61ef8a02c805: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/6e/bef09e10dcfc0b7e8440cc58bb61ef8a02c805 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/72/7b85252990036bbab6e0d0d03aa78edb5369f2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/72/7b85252990036bbab6e0d0d03aa78edb5369f2 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/83/0181201ee00133d13736a332b0e476cc4cac01: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/83/0181201ee00133d13736a332b0e476cc4cac01 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/88/6d71ca196eab9a337ff25407fbb87e1a7b0a96: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/88/6d71ca196eab9a337ff25407fbb87e1a7b0a96 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/88/a2c6aa5217c9d47969b6c9aeff0620c9c67be7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/88/a2c6aa5217c9d47969b6c9aeff0620c9c67be7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/97/71cd218f1002e59c6f0dc6ee2dc57dc9dde698: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/97/71cd218f1002e59c6f0dc6ee2dc57dc9dde698 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/9a/e6c35f473c582d032d0e28460e670a52bfeda1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/9a/e6c35f473c582d032d0e28460e670a52bfeda1 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/9e/768ed89b40dbadffdc73194575fa1851004171: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/9e/768ed89b40dbadffdc73194575fa1851004171 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/a4/0bdbb2bad9ea3de9c5848d5c7d059fbf69408b: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/a4/0bdbb2bad9ea3de9c5848d5c7d059fbf69408b -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/a6/8ded33eb116a785f93f03cc5e395ba8f9252ef: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/a6/8ded33eb116a785f93f03cc5e395ba8f9252ef -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ad/535adf8feb3a28110605da7f880245802d0736: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ad/535adf8feb3a28110605da7f880245802d0736 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b2/7235c5278ccba69b392fa1734138bf4100693c: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b2/7235c5278ccba69b392fa1734138bf4100693c -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b6/55d8292dde39841d226d3e614d8896bff8ab8f: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b6/55d8292dde39841d226d3e614d8896bff8ab8f -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b7/c5c9e9684a7907916878832fceef1252d47b1d: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b7/c5c9e9684a7907916878832fceef1252d47b1d -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ba/84b0b6e3e58d558933cb504bd7c0fee777ebbc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ba/84b0b6e3e58d558933cb504bd7c0fee777ebbc -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/be/4f5a12b2d5a9e853ed2aa34a71c5eefdc94522: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/be/4f5a12b2d5a9e853ed2aa34a71c5eefdc94522 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cc/2aa776bd97661ce82523ba779718332dced917: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cc/2aa776bd97661ce82523ba779718332dced917 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cc/a3625387a7bcae54d06c7e2211e3ca09548d77: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cc/a3625387a7bcae54d06c7e2211e3ca09548d77 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cd/9e02d554601b5e5a6fce90507e750b7a3f4639: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/cd/9e02d554601b5e5a6fce90507e750b7a3f4639 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ce/a847dc95a500689006ac655e9188fab65d15c4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ce/a847dc95a500689006ac655e9188fab65d15c4 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/d9/d1cd4f7281f194a226ebe1727de5351b0f6238: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/d9/d1cd4f7281f194a226ebe1727de5351b0f6238 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/da/6dc358f3acb1ece3757b3e392acd51846d70e7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/da/6dc358f3acb1ece3757b3e392acd51846d70e7 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/df/c1a4d7c66f2d76739140132b437195cd5d1d1e: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/df/c1a4d7c66f2d76739140132b437195cd5d1d1e -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/e0/3ba3543f3e96e20ffa2bb086f7a79194311724: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/e0/3ba3543f3e96e20ffa2bb086f7a79194311724 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ea/cb722df281748facbae4d6ca9ade44f20fee1b: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ea/cb722df281748facbae4d6ca9ade44f20fee1b -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f1/85a9b682960f1de13d135c0c63f40765236f2a: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f1/85a9b682960f1de13d135c0c63f40765236f2a -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f4/7bf2d9da74c4cf77fc107f23391b397671b528: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f4/7bf2d9da74c4cf77fc107f23391b397671b528 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f4/d6632f562881e1b847d592ec924d8e6010ceda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f4/d6632f562881e1b847d592ec924d8e6010ceda -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f8/452a24cf7557319fb8cc36a42edbf4f69a0c65: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f8/452a24cf7557319fb8cc36a42edbf4f69a0c65 -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f8/d2d17acad4d83188bee1c9863827b82c593e1f: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/f8/d2d17acad4d83188bee1c9863827b82c593e1f -------------------------------------------------------------------------------- /tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/fc/9d6f213586a57ba2c7fab2ceabdb6ff5ddf846: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/haiwen/seafobj/HEAD/tests/conf/storage/fs/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/fc/9d6f213586a57ba2c7fab2ceabdb6ff5ddf846 -------------------------------------------------------------------------------- /run_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SCRIPT=${BASH_SOURCE[0]} 6 | PROJECT_DIR=$(dirname "${SCRIPT}") 7 | 8 | cd $PROJECT_DIR 9 | 10 | export PYTHONPATH=$PROJECT_DIR:$PYTHONPATH 11 | export SEAFILE_CONF_DIR=$PROJECT_DIR/tests/conf 12 | 13 | ci/run.py --test-only 14 | -------------------------------------------------------------------------------- /ci/run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | 5 | from utils import setup_logging, shell 6 | from os.path import abspath, join 7 | 8 | TOPDIR = abspath(join(os.getcwd())) 9 | 10 | 11 | def main(): 12 | shell("py.test", env=dict(os.environ)) 13 | 14 | if __name__ == "__main__": 15 | setup_logging() 16 | main() 17 | -------------------------------------------------------------------------------- /objwrapper/exceptions.py: -------------------------------------------------------------------------------- 1 | #coding: UTF-8 2 | 3 | class ObjWrapperException(Exception): 4 | def __init__(self, msg): 5 | Exception.__init__(self) 6 | self.msg = str(msg) 7 | 8 | def __str__(self): 9 | return self.msg 10 | 11 | class InvalidConfigError(ObjWrapperException): 12 | '''This Exception is rasied when error happens during parsing 13 | seafile.conf 14 | 15 | ''' 16 | pass 17 | -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/5e/3f290cbd811849f5697b10bd67502605f5f4c2: -------------------------------------------------------------------------------- 1 | {"commit_id": "5e3f290cbd811849f5697b10bd67502605f5f4c2", "ctime": 1517210869, "description": "", "repo_desc": "", "root_id": "0000000000000000000000000000000000000000", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": null, "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | @echo 3 | @echo 'Usage: make ' 4 | @echo 5 | @echo ' unittest run unit tests' 6 | @echo ' functest run funtional tests' 7 | @echo 8 | 9 | dist: 10 | tar czv \ 11 | --exclude='*.git*' \ 12 | --exclude='*.log' \ 13 | --exclude='*~' \ 14 | --exclude='*#' \ 15 | --exclude='*.gz' \ 16 | --exclude='*.pyc' \ 17 | -f seafobj.tar.gz seafobj 18 | 19 | unittest: 20 | @bash .unittests 21 | 22 | functest: 23 | @bash .functests 24 | 25 | pylint: 26 | @bash .pylint 27 | 28 | .PHONY: all unittest functest pylint 29 | -------------------------------------------------------------------------------- /do-pycscope.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | printf "\033[1;32m[ rm ]\033[m deleting the following old files\n" 4 | 5 | ls -lh cscope.* 6 | 7 | rm cscope.* 8 | 9 | printf "\033[1;32m[ find ]\033[m write source file list to cscope.files\n" 10 | 11 | find . -name '*.py' > cscope.files 12 | 13 | printf "\033[1;32m[ wc ]\033[m \033[0;40;32m`wc -l cscope.files | awk '{print $1;}'`\033[m files found\n" 14 | 15 | printf "\033[1;32m[cscope]\033[m now index source files\n" 16 | 17 | pycscope -i cscope.files 18 | 19 | du -h cscope.* 20 | 21 | printf "\033[1;32m[cscope]\033[m done\n" 22 | 23 | -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/7a/51f20b9efd471b8fe74ad66214e652594f9b92: -------------------------------------------------------------------------------- 1 | {"commit_id": "7a51f20b9efd471b8fe74ad66214e652594f9b92", "parent_id": "252795fc32563b965a28f72e991af55ab8318f3e", "version": 1, "root_id": "cc2aa776bd97661ce82523ba779718332dced917", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added \"tmp_folder\"", "ctime": 1517211578, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/35/b0b82a81825c0200fc508c7d1c3d02c30dc1fe: -------------------------------------------------------------------------------- 1 | {"commit_id": "35b0b82a81825c0200fc508c7d1c3d02c30dc1fe", "parent_id": "5b56f5470f6a55492585714c78b8e461875284d6", "version": 1, "root_id": "1c63b08bea7ccb2adfb7752cf1adc6af89f4ed91", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added \"renamed_folder\"", "ctime": 1517211612, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3a/0a8e3a9a2b1bc834dc1cc4b372a6401d1e8e10: -------------------------------------------------------------------------------- 1 | {"commit_id": "3a0a8e3a9a2b1bc834dc1cc4b372a6401d1e8e10", "parent_id": "7a51f20b9efd471b8fe74ad66214e652594f9b92", "version": 1, "root_id": "b7c5c9e9684a7907916878832fceef1252d47b1d", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Deleted \"tmp_folder\"", "ctime": 1517211582, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/3d/4659a6cbe13d0c2111455f590fa1537a6ec1e4: -------------------------------------------------------------------------------- 1 | {"commit_id": "3d4659a6cbe13d0c2111455f590fa1537a6ec1e4", "parent_id": "d4797f387be27a9f3bf210441dd02fd715dbd454", "version": 1, "root_id": "f47bf2d9da74c4cf77fc107f23391b397671b528", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added \"moved_folder.md\"", "ctime": 1517211672, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/76/b617eddf61c9e8768bf8e93a0c473c971e9790: -------------------------------------------------------------------------------- 1 | {"commit_id": "76b617eddf61c9e8768bf8e93a0c473c971e9790", "parent_id": "b9dbd0c72d8a0ccfd9110d638397c52d1540c114", "version": 1, "root_id": "727b85252990036bbab6e0d0d03aa78edb5369f2", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added \"added_folder\"", "ctime": 1517211693, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/9e/4705d102d86756eb8ed9d8d16922ee3212c7c5: -------------------------------------------------------------------------------- 1 | {"commit_id": "9e4705d102d86756eb8ed9d8d16922ee3212c7c5", "parent_id": "76b617eddf61c9e8768bf8e93a0c473c971e9790", "version": 1, "root_id": "be4f5a12b2d5a9e853ed2aa34a71c5eefdc94522", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Renamed \"added_folder\"", "ctime": 1517211703, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/07/418435d577e814a94a42a35eeb0f6f93cf578d: -------------------------------------------------------------------------------- 1 | {"commit_id": "07418435d577e814a94a42a35eeb0f6f93cf578d", "parent_id": "5a4b25345346cb19bad61d801061c6795d55aa4f", "version": 1, "root_id": "3ad0a00df678b537c928513c05890c1f3bc50266", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added \"deleted_folder.md\"", "ctime": 1517211654, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/17/abc6246d4b213dc8e7c92e94b321af7708670a: -------------------------------------------------------------------------------- 1 | {"commit_id": "17abc6246d4b213dc8e7c92e94b321af7708670a", "parent_id": "35b0b82a81825c0200fc508c7d1c3d02c30dc1fe", "version": 1, "root_id": "f8452a24cf7557319fb8cc36a42edbf4f69a0c65", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Renamed \"renamed_folder\"", "ctime": 1517211616, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/44/05b7234b1e9dd74fe7c4f6a844ce79198e0e19: -------------------------------------------------------------------------------- 1 | {"commit_id": "4405b7234b1e9dd74fe7c4f6a844ce79198e0e19", "parent_id": "9e4705d102d86756eb8ed9d8d16922ee3212c7c5", "version": 1, "root_id": "24ce2befc450e4d60a7ea792bcfcf5d50393b712", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"added_folder.md\"", "ctime": 1517211712, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/5a/4b25345346cb19bad61d801061c6795d55aa4f: -------------------------------------------------------------------------------- 1 | {"commit_id": "5a4b25345346cb19bad61d801061c6795d55aa4f", "parent_id": "17abc6246d4b213dc8e7c92e94b321af7708670a", "version": 1, "root_id": "16580b8b29769e8c0ffca0cca3ab3b437a789862", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"renamed_folder.md\"", "ctime": 1517211641, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/5b/56f5470f6a55492585714c78b8e461875284d6: -------------------------------------------------------------------------------- 1 | {"commit_id": "5b56f5470f6a55492585714c78b8e461875284d6", "parent_id": "3a0a8e3a9a2b1bc834dc1cc4b372a6401d1e8e10", "version": 1, "root_id": "ba84b0b6e3e58d558933cb504bd7c0fee777ebbc", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Added directory \"tmp_folder\"", "ctime": 1517211587, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/92/8fc8c308d441b224b9238d47938cc6ac944950: -------------------------------------------------------------------------------- 1 | {"commit_id": "928fc8c308d441b224b9238d47938cc6ac944950", "ctime": 1517210894, "description": "Added \"create_moved_file.md\"", "repo_desc": "", "root_id": "2475bcb9dbe92ad9728d07a96c02d00a374c55dd", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "9484c79dc0f6f50dcb3116ecfa56c145bf7d113d", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/b9/dbd0c72d8a0ccfd9110d638397c52d1540c114: -------------------------------------------------------------------------------- 1 | {"commit_id": "b9dbd0c72d8a0ccfd9110d638397c52d1540c114", "parent_id": "3d4659a6cbe13d0c2111455f590fa1537a6ec1e4", "version": 1, "root_id": "fc9d6f213586a57ba2c7fab2ceabdb6ff5ddf846", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"moved_folder.md\"", "ctime": 1517211682, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/bb/f5382d4c63f58fc647b416e6c059f948e13277: -------------------------------------------------------------------------------- 1 | {"commit_id": "bbf5382d4c63f58fc647b416e6c059f948e13277", "parent_id": "4405b7234b1e9dd74fe7c4f6a844ce79198e0e19", "version": 1, "root_id": "0e01eb14c626bc33670ae5df2ebe8ed22bedcf3e", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Moved \"create_moved_file.md\"", "ctime": 1517211856, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/d4/797f387be27a9f3bf210441dd02fd715dbd454: -------------------------------------------------------------------------------- 1 | {"commit_id": "d4797f387be27a9f3bf210441dd02fd715dbd454", "parent_id": "07418435d577e814a94a42a35eeb0f6f93cf578d", "version": 1, "root_id": "a40bdbb2bad9ea3de9c5848d5c7d059fbf69408b", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"deleted_folder.md\"", "ctime": 1517211663, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/eb/d1fdb77760f23d2ab29e1d2e121484260adac9: -------------------------------------------------------------------------------- 1 | {"commit_id": "ebd1fdb77760f23d2ab29e1d2e121484260adac9", "ctime": 1517210882, "description": "Added \"create_new_file.md\"", "repo_desc": "", "root_id": "830181201ee00133d13736a332b0e476cc4cac01", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "5e3f290cbd811849f5697b10bd67502605f5f4c2", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/03/d5a857ccc2764e69a124cd8f3360aea1d5595f: -------------------------------------------------------------------------------- 1 | {"commit_id": "03d5a857ccc2764e69a124cd8f3360aea1d5595f", "ctime": 1517210898, "description": "Added \"create_deleted_file.md\"", "repo_desc": "", "root_id": "d9d1cd4f7281f194a226ebe1727de5351b0f6238", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "928fc8c308d441b224b9238d47938cc6ac944950", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/0a/3d3867a4ae7470485f2d591b9981ff885f59e3: -------------------------------------------------------------------------------- 1 | {"commit_id": "0a3d3867a4ae7470485f2d591b9981ff885f59e3", "parent_id": "c4c0925d53b61d173ddeeac9b45c34aa0e19608f", "version": 1, "root_id": "3c1000329fcb5a51a9d1860d9b86ce7f4c7ceb5f", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"create_renamed_file.md\"", "ctime": 1517211510, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/13/fbc1e153107678f501e1c807b5797269cbe601: -------------------------------------------------------------------------------- 1 | {"commit_id": "13fbc1e153107678f501e1c807b5797269cbe601", "parent_id": "0a3d3867a4ae7470485f2d591b9981ff885f59e3", "version": 1, "root_id": "51346ce17df5ed7c3b4492405086b5bea03c5247", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"create_moved_file.md\"", "ctime": 1517211537, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/78/78ea888e60937b73e039461c456364f1aa138f: -------------------------------------------------------------------------------- 1 | {"commit_id": "7878ea888e60937b73e039461c456364f1aa138f", "parent_id": "13fbc1e153107678f501e1c807b5797269cbe601", "version": 1, "root_id": "3c8ab7d86e67138b1aa48824866986522c011391", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"create_deleted_file.md\"", "ctime": 1517211552, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/94/84c79dc0f6f50dcb3116ecfa56c145bf7d113d: -------------------------------------------------------------------------------- 1 | {"commit_id": "9484c79dc0f6f50dcb3116ecfa56c145bf7d113d", "ctime": 1517210887, "description": "Added \"create_renamed_file.md\"", "repo_desc": "", "root_id": "cca3625387a7bcae54d06c7e2211e3ca09548d77", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "ebd1fdb77760f23d2ab29e1d2e121484260adac9", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/c3/9a521ab58e216103a8c668dda697a51f870426: -------------------------------------------------------------------------------- 1 | {"commit_id": "c39a521ab58e216103a8c668dda697a51f870426", "parent_id": "23f58ab01d4c6d79b8b2ea011ef1614e4097b935", "version": 1, "root_id": "886d71ca196eab9a337ff25407fbb87e1a7b0a96", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Deleted \"create_deleted_file.md\"", "ctime": 1517211894, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/c4/c0925d53b61d173ddeeac9b45c34aa0e19608f: -------------------------------------------------------------------------------- 1 | {"commit_id": "c4c0925d53b61d173ddeeac9b45c34aa0e19608f", "root_id": "5340f65b17b688040b287ac55b18709ee66d28d4", "version": 1, "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "repo_name": "obj_test", "creator_name": "admin@admin.com", "parent_id": "e3e0c6f57baa1a122af75bb395719d9725169982", "ctime": 1517211270, "creator": "0000000000000000000000000000000000000000", "description": "Modified \"create_new_file.md\"", "second_parent_id": null, "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/d1/b33fde55b05171aad287bdb87a43f2252a41d6: -------------------------------------------------------------------------------- 1 | {"commit_id": "d1b33fde55b05171aad287bdb87a43f2252a41d6", "ctime": 1517210903, "description": "Added \"create_modified_files.md\"", "repo_desc": "", "root_id": "a68ded33eb116a785f93f03cc5e395ba8f9252ef", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "03d5a857ccc2764e69a124cd8f3360aea1d5595f", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/dc/8cc4557a310f668bb81c3f44d90b1ff6270f38: -------------------------------------------------------------------------------- 1 | {"commit_id": "dc8cc4557a310f668bb81c3f44d90b1ff6270f38", "parent_id": "bbf5382d4c63f58fc647b416e6c059f948e13277", "version": 1, "root_id": "88a2c6aa5217c9d47969b6c9aeff0620c9c67be7", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Renamed \"create_renamed_file.md\"", "ctime": 1517211865, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/23/f58ab01d4c6d79b8b2ea011ef1614e4097b935: -------------------------------------------------------------------------------- 1 | {"commit_id": "23f58ab01d4c6d79b8b2ea011ef1614e4097b935", "parent_id": "dc8cc4557a310f668bb81c3f44d90b1ff6270f38", "version": 1, "root_id": "f185a9b682960f1de13d135c0c63f40765236f2a", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"create_modified_files.md\"", "ctime": 1517211880, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/25/2795fc32563b965a28f72e991af55ab8318f3e: -------------------------------------------------------------------------------- 1 | {"commit_id": "252795fc32563b965a28f72e991af55ab8318f3e", "parent_id": "7878ea888e60937b73e039461c456364f1aa138f", "version": 1, "root_id": "b7c5c9e9684a7907916878832fceef1252d47b1d", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Modified \"create_modified_files.md\"", "ctime": 1517211571, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/7e/d2a485f96121f5d336363e8d61d9ef2911beeb: -------------------------------------------------------------------------------- 1 | {"commit_id": "7ed2a485f96121f5d336363e8d61d9ef2911beeb", "ctime": 1517210909, "description": "Added directory \"create_added_folder\"", "repo_desc": "", "root_id": "cd9e02d554601b5e5a6fce90507e750b7a3f4639", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "d1b33fde55b05171aad287bdb87a43f2252a41d6", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/e3/eea2da33dedda26690739c1ca7b7da2022fa40: -------------------------------------------------------------------------------- 1 | {"commit_id": "e3eea2da33dedda26690739c1ca7b7da2022fa40", "ctime": 1517210914, "description": "Added directory \"create_moved_folder\"", "repo_desc": "", "root_id": "9ae6c35f473c582d032d0e28460e670a52bfeda1", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "7ed2a485f96121f5d336363e8d61d9ef2911beeb", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/ff/c32568c059e9532cb426f19f8138c624c5cdd4: -------------------------------------------------------------------------------- 1 | {"commit_id": "ffc32568c059e9532cb426f19f8138c624c5cdd4", "parent_id": "258fbe2382a9887ba9815aad8757afd4b5ce8d3d", "version": 1, "root_id": "b655d8292dde39841d226d3e614d8896bff8ab8f", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Moved directory \"create_moved_folder\"", "ctime": 1517211903, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/15/c11b3e7643a9164cf4daf39b790421d1e934b3: -------------------------------------------------------------------------------- 1 | {"commit_id": "15c11b3e7643a9164cf4daf39b790421d1e934b3", "ctime": 1517210919, "description": "Added directory \"create_deleted_folder\"", "repo_desc": "", "root_id": "16f1fbdecdee83a17c3a608fef2c853be21c33e7", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "e3eea2da33dedda26690739c1ca7b7da2022fa40", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/25/8fbe2382a9887ba9815aad8757afd4b5ce8d3d: -------------------------------------------------------------------------------- 1 | {"commit_id": "258fbe2382a9887ba9815aad8757afd4b5ce8d3d", "parent_id": "c39a521ab58e216103a8c668dda697a51f870426", "version": 1, "root_id": "3eea84c126b9a71c4dc817cec2e524501e96c0f7", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Removed directory \"create_deleted_folder\"", "ctime": 1517211896, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/a2/4bf4385e5df18922337390e757c4b7789d853d: -------------------------------------------------------------------------------- 1 | {"commit_id": "a24bf4385e5df18922337390e757c4b7789d853d", "parent_id": "ffc32568c059e9532cb426f19f8138c624c5cdd4", "version": 1, "root_id": "e03ba3543f3e96e20ffa2bb086f7a79194311724", "creator_name": "admin@admin.com", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "second_parent_id": null, "description": "Renamed directory \"create_renamed_folder\"", "ctime": 1517211913, "repo_name": "obj_test", "repo_desc": "", "repo_category": null, "no_local_history": 1} -------------------------------------------------------------------------------- /tests/conf/storage/commits/3f9e4aa5-d6ba-4066-a1d6-81824f422af1/e3/e0c6f57baa1a122af75bb395719d9725169982: -------------------------------------------------------------------------------- 1 | {"commit_id": "e3e0c6f57baa1a122af75bb395719d9725169982", "ctime": 1517210924, "description": "Added directory \"create_renamed_folder\"", "repo_desc": "", "root_id": "002375292edc2489e119e875d919db785001d0d4", "repo_id": "3f9e4aa5-d6ba-4066-a1d6-81824f422af1", "creator": "0000000000000000000000000000000000000000", "repo_category": null, "creator_name": "admin@admin.com", "parent_id": "15c11b3e7643a9164cf4daf39b790421d1e934b3", "second_parent_id": null, "repo_name": "obj_test", "no_local_history": 1, "version": 1} -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Test 2 | 3 | ### Prepare 4 | 5 | #### Environment 6 | 7 | `add seafobj to PYTHONPATH env` 8 | ``` 9 | export PYTHONPATH=...../seafobj 10 | ``` 11 | 12 | #### Dependent 13 | 14 | ``` 15 | pip install -r test-requirements.txt 16 | sudo apt-get install python-ceph python-rados 17 | pip install boto 18 | ``` 19 | 20 | #### Configure File 21 | 22 | update `./test/functional/data/ceph/seafile.conf` configure file if you want test ceph storage system. 23 | 24 | ### Run 25 | 26 | run command `python run_test.py --sotrage {{fs|ceph}}` 27 | 28 | ### Note 29 | 30 | Test data will be migrate to ceph storage system when run ceph test case. 31 | So make sure it doesn't affect you. 32 | -------------------------------------------------------------------------------- /seafobj/utils/ceph_utils.py: -------------------------------------------------------------------------------- 1 | #coding: UTF-8 2 | # 3 | import rados 4 | 5 | from ctypes import c_char_p 6 | 7 | def ioctx_set_namespace(ioctx, namespace): 8 | '''Python rados client has no binding for rados_ioctx_set_namespace, we 9 | add it here. 10 | 11 | ''' 12 | ioctx.require_ioctx_open() 13 | if not isinstance(namespace, str): 14 | raise TypeError('namespace must be a string') 15 | 16 | if hasattr(ioctx, 'set_namespace'): 17 | ioctx.set_namespace(namespace) 18 | else: 19 | # A hack to set namespace for older version (<= 0.94.x) rados python lib. 20 | rados.run_in_thread(ioctx.librados.rados_ioctx_set_namespace, 21 | (ioctx.io, c_char_p(namespace))) 22 | -------------------------------------------------------------------------------- /.unittests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TOP_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") 4 | 5 | python -c 'from distutils.version import LooseVersion as Ver; import nose, sys; sys.exit(0 if Ver(nose.__version__) >= Ver("1.2.0") else 1)' 6 | if [ $? != 0 ]; then 7 | cover_branches="" 8 | else 9 | # Having the HTML reports is REALLY useful for achieving 100% branch 10 | # coverage. 11 | cover_branches="--cover-branches --cover-html --cover-html-dir=$TOP_DIR/cover" 12 | fi 13 | export SEAFILE_CONF_DIR= 14 | cd $TOP_DIR/test/unit 15 | if [[ $1 == "cover" ]]; then 16 | shift 17 | nosetests --exe --with-coverage --cover-package seafobj --cover-erase $cover_branches $@ 18 | else 19 | nosetests $@ 20 | fi 21 | rvalue=$? 22 | rm -f .coverage 23 | cd - 24 | exit $rvalue 25 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Seafobj CI 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | env: 9 | OSS_ACCESS_KEY: ${{ secrets.OSS_ACCESS_KEY }} 10 | OSS_ACCESS_KEY_ID: ${{ secrets.OSS_ACCESS_KEY_ID }} 11 | S3_ACCESS_KEY: ${{ secrets.S3_ACCESS_KEY }} 12 | S3_ACCESS_KEY_ID: ${{ secrets.S3_ACCESS_KEY_ID }} 13 | OSS_BUCKET: ${{ secrets.OSS_BUCKET }} 14 | OSS_REGION: ${{ secrets.OSS_REGION }} 15 | S3_BUCKET: ${{ secrets.S3_BUCKET }} 16 | S3_REGION: ${{ secrets.S3_REGION }} 17 | 18 | steps: 19 | - uses: actions/checkout@v1 20 | with: 21 | fetch-depth: 1 22 | - uses: actions/setup-python@v1 23 | with: 24 | python-version: "3.10" 25 | - name: install dependencies and test 26 | run: | 27 | cd $GITHUB_WORKSPACE 28 | ./ci/install-deps.sh 29 | ./run_test.sh 30 | -------------------------------------------------------------------------------- /seafobj/redis_cache.py: -------------------------------------------------------------------------------- 1 | import redis 2 | 3 | class RedisCache(object): 4 | def __init__(self, host, port, expiry, max_connections, passwd): 5 | self.expiry = expiry 6 | pool = redis.ConnectionPool(host=host, port=port, max_connections=max_connections, password=passwd) 7 | self.client = redis.StrictRedis(connection_pool=pool) 8 | 9 | def set_obj(self, repo_id, obj_id, value): 10 | try: 11 | key = '%s-%s' % (repo_id, obj_id) 12 | self.client.set(key, value, ex=self.expiry) 13 | except Exception: 14 | return 15 | 16 | def get_obj(self, repo_id, obj_id): 17 | try: 18 | key = '%s-%s' % (repo_id, obj_id) 19 | data = self.client.get(key) 20 | return data 21 | except Exception: 22 | return None 23 | 24 | def get_redis_cache(host, port, expiry, max_connections, passwd): 25 | return RedisCache(host, port, expiry, max_connections, passwd) 26 | -------------------------------------------------------------------------------- /tests/test_objwrapper/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from objwrapper.s3 import S3Conf 3 | from objwrapper.s3 import SeafS3Client 4 | from objwrapper.alioss import OSSConf 5 | from objwrapper.alioss import SeafOSSClient 6 | 7 | envs = os.environ 8 | 9 | oss_key = envs.get("OSS_ACCESS_KEY") 10 | oss_key_id = envs.get("OSS_ACCESS_KEY_ID") 11 | oss_bucket = envs.get("OSS_BUCKET") 12 | oss_region = envs.get("OSS_REGION") 13 | 14 | s3_key = envs.get("S3_ACCESS_KEY") 15 | s3_key_id = envs.get("S3_ACCESS_KEY_ID") 16 | s3_bucket = envs.get("S3_BUCKET") 17 | s3_region = envs.get("S3_REGION") 18 | 19 | def get_s3_client(sse_c_key): 20 | conf = S3Conf(s3_key_id, s3_key, s3_bucket, None, None, True, s3_region, True, False, sse_c_key, False) 21 | client = SeafS3Client(conf) 22 | return client 23 | 24 | def get_oss_client(): 25 | host = f'oss-{oss_region}.aliyuncs.com' 26 | conf = OSSConf(oss_key_id, oss_key, oss_bucket, host, oss_region, True) 27 | client = SeafOSSClient(conf) 28 | return client 29 | -------------------------------------------------------------------------------- /seafobj/exceptions.py: -------------------------------------------------------------------------------- 1 | #coding: UTF-8 2 | 3 | class SeafObjException(Exception): 4 | def __init__(self, msg): 5 | Exception.__init__(self) 6 | self.msg = str(msg) 7 | 8 | def __str__(self): 9 | return self.msg 10 | 11 | class InvalidConfigError(SeafObjException): 12 | '''This Exception is rasied when error happens during parsing 13 | seafile.conf 14 | 15 | ''' 16 | pass 17 | 18 | class ObjectFormatError(SeafObjException): 19 | '''This Exception is rasied when error happened during parse object 20 | format 21 | 22 | ''' 23 | pass 24 | 25 | class GetObjectError(SeafObjException): 26 | '''This exception is raised when we failed to read object from backend. 27 | ''' 28 | pass 29 | 30 | class SwiftAuthenticateError(SeafObjException): 31 | '''This exception is raised when failed to authenticate for swift. 32 | ''' 33 | pass 34 | 35 | class SeafCryptoException(SeafObjException): 36 | '''This exception is raised when crypto realted operation failed. 37 | ''' 38 | pass 39 | -------------------------------------------------------------------------------- /copy_data.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | '''Copy test data from a real library storage''' 4 | 5 | import os 6 | 7 | repo_id = '413c175b-0f7d-4616-8298-22bc147af43c' 8 | 9 | seafile_dir = '/data/data/seafile-data' 10 | 11 | test_storage_dir = os.path.join(os.path.dirname(__file__), 'test/functional/data/storage') 12 | 13 | def copy_folder(src, dst): 14 | if not os.path.exists(dst): 15 | os.makedirs(dst) 16 | 17 | cmd = 'cp -arf %s %s' % (src, dst) 18 | # print 'running ' + cmd 19 | # return 20 | if os.system(cmd) < 0: 21 | raise Exception('failed to run ' + cmd) 22 | 23 | # from: seafile-data/storage/blocks/413c175b-0f7d-4616-8298-22bc147af43c 24 | # to: test/functional/data/storage/blocks/413c175b-0f7d-4616-8298-22bc147af43c 25 | 26 | def do_copy(): 27 | for name in ('commits', 'fs', 'blocks'): 28 | src = os.path.join(seafile_dir, 'storage', name, repo_id) 29 | dst = os.path.join(test_storage_dir, name) 30 | copy_folder(src, dst) 31 | 32 | def main(): 33 | do_copy() 34 | 35 | if __name__ == '__main__': 36 | main() 37 | -------------------------------------------------------------------------------- /ci/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | from subprocess import PIPE, CalledProcessError, Popen 4 | import sys 5 | 6 | import termcolor 7 | 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | def setup_logging(): 13 | kw = { 14 | "format": "[%(asctime)s][%(module)s]: %(message)s", 15 | "datefmt": "%m/%d/%Y %H:%M:%S", 16 | "level": logging.DEBUG, 17 | "stream": sys.stdout, 18 | } 19 | 20 | logging.basicConfig(**kw) 21 | logging.getLogger("requests.packages.urllib3.connectionpool").setLevel( 22 | logging.WARNING 23 | ) 24 | 25 | 26 | def shell(cmd, inputdata=None, wait=True, **kw): 27 | info('calling "%s" in %s', cmd, kw.get("cwd", os.getcwd())) 28 | kw["shell"] = not isinstance(cmd, list) 29 | kw["stdin"] = PIPE if inputdata else None 30 | p = Popen(cmd, **kw) 31 | if inputdata: 32 | p.communicate(inputdata) 33 | if wait: 34 | p.wait() 35 | if p.returncode: 36 | raise CalledProcessError(p.returncode, cmd) 37 | else: 38 | return p 39 | 40 | 41 | def info(fmt, *a): 42 | logger.info(green(fmt), *a) 43 | 44 | 45 | def green(s): 46 | return _color(s, "green") 47 | 48 | 49 | def _color(s, color): 50 | return s if not os.isatty(sys.stdout.fileno()) else termcolor.colored(str(s), color) 51 | -------------------------------------------------------------------------------- /seafobj/mc.py: -------------------------------------------------------------------------------- 1 | import pylibmc 2 | from pylibmc import ClientPool 3 | import re 4 | 5 | class McCache(object): 6 | def __init__(self, mc_options, expiry): 7 | self.server = 'localhost:11211' 8 | self.parse_mc_options(mc_options) 9 | self.expiry = expiry 10 | client = pylibmc.Client([self.server], behaviors={"tcp_nodelay": True}) 11 | self.pool = ClientPool(client, 20) 12 | 13 | def parse_mc_options(self, mc_options): 14 | pattern = re.compile(r'--SERVER\s*=\s*(\S+)') 15 | match = pattern.match(mc_options) 16 | if match: 17 | self.server = match.group(1) 18 | 19 | def set_obj(self, repo_id, obj_id, value): 20 | try: 21 | key = '%s-%s' % (repo_id, obj_id) 22 | with self.pool.reserve() as client: 23 | client.set(key, value, time=self.expiry) 24 | except Exception: 25 | return 26 | 27 | def get_obj(self, repo_id, obj_id): 28 | try: 29 | key = '%s-%s' % (repo_id, obj_id) 30 | with self.pool.reserve() as client: 31 | data = client.get(key) 32 | return data 33 | return None 34 | except Exception: 35 | return None 36 | 37 | def get_mc_cache(mc_options, expiry): 38 | return McCache(mc_options, expiry) 39 | -------------------------------------------------------------------------------- /seafobj/blocks.py: -------------------------------------------------------------------------------- 1 | from .objstore_factory import objstore_factory 2 | from .objstore_factory import get_repo_storage_id 3 | 4 | class SeafBlockManager(object): 5 | def __init__(self): 6 | if not objstore_factory.enable_storage_classes: 7 | self.obj_store = objstore_factory.get_obj_store('blocks') 8 | else: 9 | self.obj_stores = objstore_factory.get_obj_stores('blocks') 10 | self._counter = 0 11 | 12 | def read_count(self): 13 | return self._counter 14 | 15 | def load_block(self, repo_id, version, obj_id): 16 | self._counter += 1 17 | if not objstore_factory.enable_storage_classes: 18 | data = self.obj_store.read_obj(repo_id, version, obj_id) 19 | else: 20 | storage_id = get_repo_storage_id(repo_id) 21 | if storage_id: 22 | data = self.obj_stores[storage_id].read_obj(repo_id, version, obj_id) 23 | else: 24 | data = self.obj_stores['__default__'].read_obj(repo_id, version, obj_id) 25 | return data 26 | 27 | def stat_block(self, repo_id, version, obj_id): 28 | if not objstore_factory.enable_storage_classes: 29 | return self.obj_store.stat(repo_id, version, obj_id) 30 | else: 31 | storage_id = get_repo_storage_id(repo_id) 32 | if storage_id: 33 | return self.obj_stores[storage_id].stat(repo_id, version, obj_id) 34 | else: 35 | return self.obj_stores['__default__'].stat(repo_id, version, obj_id) 36 | 37 | 38 | block_mgr = SeafBlockManager() 39 | -------------------------------------------------------------------------------- /tests/test_blocks/test_blocks.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import pytest 3 | from seafobj import block_mgr 4 | 5 | class TestBlockManager(): 6 | def __init__(self): 7 | self.repo_id = '3f9e4aa5-d6ba-4066-a1d6-81824f422af1' 8 | self.modified_bkid = '125f1e9dc9f3eca5a6819f9b4a2e17e53d7e2f78' 9 | self.new_bkid = '2949afb5a9c351b9415b91c8f3d0d98991118c11' 10 | self.renamed_bkid = 'b73b3cf6dc021d20c7a0e9bedf46a5b6a58bdd53' 11 | self.moved_bkid = '1569cf662c7befe4c4891a22cc7a1c035bc8bfac' 12 | 13 | def load_block(): 14 | mgr = TestBlockManager() 15 | seafblk = block_mgr.load_block(mgr.repo_id, 1, mgr.new_bkid) 16 | assert b'this is new file.' in seafblk 17 | seafblk = block_mgr.load_block(mgr.repo_id, 1, mgr.modified_bkid) 18 | assert b'this is modified file' in seafblk 19 | seafblk = block_mgr.load_block(mgr.repo_id, 1, mgr.renamed_bkid) 20 | assert b'this is renamed file.' in seafblk 21 | seafblk = block_mgr.load_block(mgr.repo_id, 1, mgr.moved_bkid) 22 | assert b'this is moved file.' in seafblk 23 | 24 | def test_load_block (): 25 | for i in range(100): 26 | load_block() 27 | 28 | Success = True 29 | def catch_with_load_block(): 30 | try: 31 | test_load_block() 32 | except Exception as e: 33 | global Success 34 | Success = False 35 | raise e 36 | 37 | def test_load_block_with_multi_thread(): 38 | ths = [] 39 | for i in range(20): 40 | th = threading.Thread(target=catch_with_load_block) 41 | ths.append(th) 42 | th.start() 43 | for th in ths: 44 | th.join() 45 | assert Success == True 46 | -------------------------------------------------------------------------------- /tests/test_commits/test_commits.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import pytest 3 | from seafobj import commit_mgr 4 | from seafobj.commits import SeafCommit 5 | 6 | class TestCommitManager(): 7 | def __init__(self): 8 | self.repo_id = '3f9e4aa5-d6ba-4066-a1d6-81824f422af1' 9 | self.last_commit = '4405b7234b1e9dd74fe7c4f6a844ce79198e0e19' 10 | self.head_commit = 'a24bf4385e5df18922337390e757c4b7789d853d' 11 | 12 | def load_commits(): 13 | mgr = TestCommitManager() 14 | seafcmt = commit_mgr.load_commit(mgr.repo_id, 1, mgr.head_commit) 15 | assert isinstance(seafcmt, SeafCommit) == True 16 | assert 'Renamed directory "create_renamed_folder"' == seafcmt.description 17 | assert 'ffc32568c059e9532cb426f19f8138c624c5cdd4' == seafcmt.parent_id 18 | assert 'obj_test' == seafcmt.repo_name 19 | assert 1517211913 == seafcmt.ctime 20 | seafcmt = commit_mgr.load_commit(mgr.repo_id, 1, mgr.last_commit) 21 | assert 'Modified "added_folder.md"' == seafcmt.description 22 | assert '9e4705d102d86756eb8ed9d8d16922ee3212c7c5' == seafcmt.parent_id 23 | assert 'obj_test' == seafcmt.repo_name 24 | assert 1517211712 == seafcmt.ctime 25 | 26 | def test_load_commit(): 27 | for i in range(100): 28 | load_commits() 29 | 30 | Success = True 31 | def catch_with_load_commits(): 32 | try: 33 | test_load_commit() 34 | except Exception as e: 35 | global Success 36 | Success = False 37 | raise e 38 | 39 | def test_load_commit_with_multi_thread(): 40 | ths = [] 41 | for i in range(20): 42 | th = threading.Thread(target=catch_with_load_commits) 43 | ths.append(th) 44 | th.start() 45 | for th in ths: 46 | th.join() 47 | assert Success == True 48 | -------------------------------------------------------------------------------- /seafobj/backends/alioss.py: -------------------------------------------------------------------------------- 1 | from .base import AbstractObjStore 2 | 3 | from seafobj.exceptions import GetObjectError 4 | from objwrapper.alioss import SeafOSSClient 5 | 6 | class SeafObjStoreOSS(AbstractObjStore): 7 | '''OSS backend for seafile objects''' 8 | def __init__(self, compressed, oss_conf, crypto=None, cache=None): 9 | AbstractObjStore.__init__(self, compressed, crypto, cache) 10 | self.oss_client = SeafOSSClient(oss_conf) 11 | self.bucket_name = oss_conf.bucket_name 12 | 13 | def read_obj_raw(self, repo_id, version, obj_id): 14 | real_obj_id = '%s/%s' % (repo_id, obj_id) 15 | data = self.oss_client.read_obj(real_obj_id) 16 | return data 17 | 18 | def get_name(self): 19 | return 'OSS storage backend' 20 | 21 | def list_objs(self, repo_id=None): 22 | objs = self.oss_client.list_objs(repo_id) 23 | for obj in objs: 24 | tokens = obj[0].split('/', 1) 25 | if len(tokens) == 2: 26 | newobj = [tokens[0], tokens[1], obj[1]] 27 | yield newobj 28 | 29 | def obj_exists(self, repo_id, obj_id): 30 | key = '%s/%s' % (repo_id, obj_id) 31 | 32 | return self.oss_client.obj_exists(key) 33 | 34 | def write_obj(self, data, repo_id, obj_id): 35 | key = '%s/%s' % (repo_id, obj_id) 36 | 37 | self.oss_client.write_obj(data, key) 38 | 39 | def remove_obj(self, repo_id, obj_id): 40 | key = '%s/%s' % (repo_id, obj_id) 41 | 42 | self.oss_client.remove_obj(key) 43 | 44 | def stat_raw(self, repo_id, obj_id): 45 | key = '%s/%s' % (repo_id, obj_id) 46 | 47 | return self.oss_client.stat_obj(key) 48 | 49 | def get_container_name(self): 50 | return self.bucket_name 51 | -------------------------------------------------------------------------------- /seafobj/backends/s3.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | from objwrapper.s3 import SeafS3Client 4 | 5 | from .base import AbstractObjStore 6 | 7 | class SeafObjStoreS3(AbstractObjStore): 8 | """S3 backend for seafile objects""" 9 | def __init__(self, compressed, s3_conf, crypto=None, cache=None): 10 | AbstractObjStore.__init__(self, compressed, crypto, cache) 11 | self.s3_client = SeafS3Client(s3_conf) 12 | self.bucket_name = s3_conf.bucket_name 13 | if s3_conf.host: 14 | self.domain = s3_conf.host 15 | else: 16 | self.domain = "s3." + s3_conf.aws_region + ".amazonaws.com" 17 | 18 | def read_obj_raw(self, repo_id, version, obj_id): 19 | real_obj_id = '%s/%s' % (repo_id, obj_id) 20 | data = self.s3_client.read_obj(real_obj_id) 21 | return data 22 | 23 | def get_name(self): 24 | return 'S3 storage backend' 25 | 26 | def list_objs(self, repo_id=None): 27 | objs = self.s3_client.list_objs(repo_id) 28 | for obj in objs: 29 | tokens = obj[0].split('/', 1) 30 | if len(tokens) == 2: 31 | newobj = [tokens[0], tokens[1], obj[1]] 32 | yield newobj 33 | 34 | def obj_exists(self, repo_id, obj_id): 35 | s3_path = '%s/%s' % (repo_id, obj_id) 36 | return self.s3_client.obj_exists(s3_path) 37 | 38 | def write_obj(self, data, repo_id, obj_id): 39 | s3_path = '%s/%s' % (repo_id, obj_id) 40 | self.s3_client.write_obj(data, s3_path) 41 | 42 | def remove_obj(self, repo_id, obj_id): 43 | s3_path = '%s/%s' % (repo_id, obj_id) 44 | self.s3_client.remove_obj(s3_path) 45 | 46 | def stat_raw(self, repo_id, obj_id): 47 | s3_path = '%s/%s' % (repo_id, obj_id) 48 | return self.s3_client.stat_obj(s3_path) 49 | 50 | def get_container_name(self): 51 | return self.domain + "/" + self.bucket_name 52 | -------------------------------------------------------------------------------- /seafobj/commits.py: -------------------------------------------------------------------------------- 1 | from .objstore_factory import objstore_factory 2 | from .objstore_factory import get_repo_storage_id 3 | 4 | try: 5 | import json 6 | except ImportError: 7 | import simplejson as json 8 | 9 | class SeafCommit(object): 10 | def __init__(self, _dict): 11 | self._dict = _dict 12 | 13 | def __getattr__(self, key): 14 | if key in self._dict: 15 | return self._dict[key] 16 | return object.__getattribute__(self, key) 17 | 18 | def get_version(self): 19 | return self._dict.get('version', 0) 20 | 21 | class SeafCommitManager(object): 22 | def __init__(self): 23 | if objstore_factory.enable_storage_classes: 24 | self.obj_stores = objstore_factory.get_obj_stores('commits') 25 | else: 26 | self.obj_store = objstore_factory.get_obj_store('commits') 27 | self._counter = 0 28 | 29 | def read_count(self): 30 | return self._counter 31 | 32 | def load_commit(self, repo_id, version, obj_id): 33 | self._counter += 1 34 | if not objstore_factory.enable_storage_classes: 35 | data = self.obj_store.read_obj(repo_id, version, obj_id) 36 | else: 37 | storage_id = get_repo_storage_id(repo_id) 38 | if storage_id: 39 | data = self.obj_stores[storage_id].read_obj(repo_id, version, obj_id) 40 | else: 41 | data = self.obj_stores['__default__'].read_obj(repo_id, version, obj_id) 42 | 43 | return self.parse_commit(data) 44 | 45 | def parse_commit(self, data): 46 | commit_dict = {} 47 | if data: 48 | commit_dict = json.loads(data) 49 | 50 | return SeafCommit(commit_dict) 51 | 52 | def is_commit_encrypted(self, repo_id, version, commit_id): 53 | commit = self.load_commit(repo_id, version, commit_id) 54 | return getattr(commit, 'encrypted', False) 55 | 56 | def get_commit_root_id(self, repo_id, version, commit_id): 57 | commit = self.load_commit(repo_id, version, commit_id) 58 | return commit.root_id 59 | 60 | def get_backend_name(self): 61 | if objstore_factory.enable_storage_classes: 62 | return 'multiple backends' 63 | else: 64 | return self.obj_store.get_name() 65 | 66 | 67 | commit_mgr = SeafCommitManager() 68 | -------------------------------------------------------------------------------- /objwrapper/alioss.py: -------------------------------------------------------------------------------- 1 | import http.client 2 | import oss2 3 | from objwrapper.exceptions import InvalidConfigError 4 | 5 | # set log level to WARNING 6 | # the api set_file_logger exists after oss2 2.6.0, which has a lot of 'INFO' log 7 | try: 8 | log_file_path = "log.log" 9 | oss2.set_file_logger(log_file_path, 'oss2', logging.WARNING) 10 | except: 11 | pass 12 | 13 | class OSSConf(object): 14 | def __init__(self, key_id, key, bucket_name, host, region, use_https): 15 | if not host and not region: 16 | raise InvalidConfigError('endpoint and region are not configured') 17 | self.host = host 18 | if not host: 19 | self.host = 'oss-cn-%s-internal.aliyuncs.com' % region 20 | self.key_id = key_id 21 | self.key = key 22 | self.bucket_name = bucket_name 23 | self.region = region 24 | self.use_https = use_https 25 | 26 | class SeafOSSClient(object): 27 | '''Wraps a oss connection and a bucket''' 28 | def __init__(self, conf): 29 | self.conf = conf 30 | if conf.use_https: 31 | host = 'https://%s' % conf.host 32 | else: 33 | host = 'http://%s' % conf.host 34 | # Due to a bug in httplib we can't use https 35 | self.auth = oss2.Auth(conf.key_id, conf.key) 36 | self.service = oss2.Service(self.auth, conf.host) 37 | self.bucket = oss2.Bucket(self.auth, conf.host, conf.bucket_name) 38 | 39 | def read_obj(self, obj_id): 40 | res = self.bucket.get_object(obj_id) 41 | return res.read() 42 | 43 | def get_name(self): 44 | return 'OSS storage backend' 45 | 46 | def list_objs(self, prefix=None): 47 | for key in oss2.ObjectIterator(self.bucket, prefix=prefix): 48 | token = key.key 49 | if token: 50 | size = key.size 51 | obj = [token, size] 52 | yield obj 53 | 54 | def obj_exists(self, key): 55 | return self.bucket.object_exists(key) 56 | 57 | def write_obj(self, data, key, ctime=-1): 58 | headers = None 59 | if ctime >= 0: 60 | headers = {'x-oss-meta-ctime':str(ctime)} 61 | self.bucket.put_object(key, data, headers=headers) 62 | 63 | def remove_obj(self, key): 64 | self.bucket.delete_object(key) 65 | 66 | def stat_obj(self, key): 67 | size = self.bucket.get_object_meta(key).headers['Content-Length'] 68 | return int(size) 69 | 70 | def get_ctime(self, key): 71 | headers = self.bucket.head_object(key).headers 72 | ctime = headers.get('x-oss-meta-ctime', '') 73 | try: 74 | return float(ctime) 75 | except: 76 | return 0 77 | -------------------------------------------------------------------------------- /tests/test_fs/test_fs.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import pytest 3 | from seafobj import fs_mgr 4 | 5 | class TestFSManager(): 6 | def __init__(self): 7 | self.repo_id = '3f9e4aa5-d6ba-4066-a1d6-81824f422af1' 8 | self.first_dir_id = 'b27235c5278ccba69b392fa1734138bf4100693c' 9 | self.second_dir_id = '3ef00cc2e396b7492eaee6e7d0fc6e23279569c5' 10 | self.first_file_id = '9771cd218f1002e59c6f0dc6ee2dc57dc9dde698' 11 | self.second_file_id = '67619f6d87f2f232bb0a821649f9fd1358eaa58c' 12 | 13 | def load_seafdir(): 14 | mgr = TestFSManager() 15 | seafdir = fs_mgr.load_seafdir(mgr.repo_id, 1, mgr.first_dir_id) 16 | assert mgr.first_dir_id == seafdir.obj_id 17 | assert 'create_moved_folder' in list(seafdir.dirents.keys()) 18 | assert 'create_moved_file.md' in list(seafdir.dirents.keys()) 19 | assert seafdir.dirents.get('create_moved_file.md', None).name == 'create_moved_file.md' 20 | assert '045dfc08495b5c6cbc1a4dc347f5e2987fd809f4' == seafdir.dirents['create_moved_file.md'].id 21 | assert seafdir.dirents.get('create_moved_folder', None).name == 'create_moved_folder' 22 | assert '05a6f0455d1f11ecfc202f5e218274b092fd3dbc' == seafdir.dirents['create_moved_folder'].id 23 | seafdir = fs_mgr.load_seafdir(mgr.repo_id, 1, mgr.second_dir_id) 24 | assert 'added_folder.md' in list(seafdir.dirents.keys()) 25 | assert mgr.second_dir_id == seafdir.obj_id 26 | 27 | 28 | def test_load_seafdir(): 29 | for i in range(100): 30 | load_seafdir() 31 | 32 | Success = True 33 | def catch_with_load_seafdir(): 34 | try: 35 | test_load_seafdir() 36 | except Exception as e: 37 | global Success 38 | Success = False 39 | raise e 40 | 41 | def test_load_seafdir_with_mutli_thread(): 42 | global Success 43 | Success = True 44 | ths = [] 45 | for i in range(20): 46 | th = threading.Thread(target=catch_with_load_seafdir) 47 | ths.append(th) 48 | th.start() 49 | for th in ths: 50 | th.join() 51 | assert Success == True 52 | 53 | def load_seafile(): 54 | mgr = TestFSManager() 55 | seafile = fs_mgr.load_seafile(mgr.repo_id, 1, mgr.first_file_id) 56 | assert len(seafile.blocks) > 0 57 | assert '2949afb5a9c351b9415b91c8f3d0d98991118c11' == seafile.blocks[0] 58 | second_seafile = fs_mgr.load_seafile(mgr.repo_id, 1, mgr.second_file_id) 59 | assert 1 == len(second_seafile.blocks) 60 | assert '125f1e9dc9f3eca5a6819f9b4a2e17e53d7e2f78' == second_seafile.blocks[0] 61 | 62 | def test_load_seafile(): 63 | for i in range(100): 64 | load_seafile() 65 | 66 | def catch_with_load_seafile(): 67 | try: 68 | test_load_seafile() 69 | except Exception as e: 70 | global Success 71 | Success = False 72 | raise e 73 | 74 | def test_load_seafile_with_multi_thread(): 75 | global Success 76 | Success = True 77 | ths = [] 78 | for i in range(20): 79 | th = threading.Thread(target=catch_with_load_seafile) 80 | ths.append(th) 81 | th.start() 82 | for th in ths: 83 | th.join() 84 | assert Success == True 85 | -------------------------------------------------------------------------------- /seafobj/backends/base.py: -------------------------------------------------------------------------------- 1 | #coding: UTF-8 2 | 3 | import zlib 4 | from seafobj.exceptions import GetObjectError 5 | 6 | class AbstractObjStore(object): 7 | '''Base class of seafile object backend''' 8 | def __init__(self, compressed, crypto=None, cache=None): 9 | self.compressed = compressed 10 | self.crypto = crypto 11 | self.cache = cache 12 | 13 | def read_obj(self, repo_id, version, obj_id): 14 | try: 15 | if self.cache: 16 | data = self.cache.get_obj(repo_id, obj_id) 17 | if data: 18 | if self.crypto: 19 | data = self.crypto.dec_data(data) 20 | if self.compressed and version == 1: 21 | data = zlib.decompress(data) 22 | return data 23 | data = self.read_obj_raw(repo_id, version, obj_id) 24 | if self.cache and data: 25 | self.cache.set_obj(repo_id, obj_id, data) 26 | if self.crypto: 27 | data = self.crypto.dec_data(data) 28 | if self.compressed and version == 1: 29 | data = zlib.decompress(data) 30 | except Exception as e: 31 | raise GetObjectError('Failed to read object %s/%s: %s' % (repo_id, obj_id, e)) 32 | 33 | return data 34 | 35 | def read_decrypted(self, repo_id, version, obj_id): 36 | try: 37 | if self.cache: 38 | data = self.cache.get_obj(repo_id, obj_id) 39 | if data: 40 | if self.crypto: 41 | data = self.crypto.dec_data(data) 42 | return data 43 | data = self.read_obj_raw(repo_id, version, obj_id) 44 | if self.cache and data: 45 | self.cache.set_obj(repo_id, obj_id, data) 46 | if self.crypto: 47 | data = self.crypto.dec_data(data) 48 | except Exception as e: 49 | raise GetObjectError('Failed to read decrypted object %s/%s: %s' % (repo_id, obj_id, e)) 50 | 51 | return data 52 | 53 | def read_obj_raw(self, repo_id, version, obj_id): 54 | '''Read the raw content of the object from the backend. Each backend 55 | subclass should have their own implementation. 56 | 57 | ''' 58 | raise NotImplementedError 59 | 60 | def get_name(self): 61 | '''Get the backend name for display in the log''' 62 | raise NotImplementedError 63 | 64 | def list_objs(self, repo_id=None): 65 | '''List all objects''' 66 | raise NotImplementedError 67 | 68 | def obj_exists(self, repo_id, obj_id): 69 | raise NotImplementedError 70 | 71 | def write_obj(self, data, repo_id, obj_id): 72 | '''Write data to destination backend''' 73 | raise NotImplementedError 74 | 75 | def stat(self, repo_id, verison, obj_id): 76 | if self.crypto or self.compressed: 77 | try: 78 | data = self.read_obj(repo_id, verison, obj_id) 79 | return len(data) 80 | except: 81 | raise 82 | return self.stat_raw(repo_id, obj_id) 83 | 84 | def stat_raw(self, repo_id, obj_id): 85 | raise NotImplementedError 86 | 87 | def get_container_name(self): 88 | raise NotImplementedError 89 | -------------------------------------------------------------------------------- /tests/test_objwrapper/test_objwrapper.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import uuid 3 | from utils import get_s3_client, get_oss_client 4 | 5 | data1 = 'test file content' 6 | data2 = "second test file content" 7 | data3 = "third test file content" 8 | 9 | repo_id1 = str(uuid.uuid4()) 10 | repo_id2 = str(uuid.uuid4()) 11 | 12 | def read_and_write_client(client): 13 | key1 = f'{repo_id1}/249408dcc7aaba6e0948cb2d1950aaf4c86078b0' 14 | key2 = f'{repo_id1}/8228f1d3877efa3395475ccccd065f87d7727e29' 15 | key3 = f'{repo_id2}/97c5a757b1aa4de4a9d7c07f3d66648e43c562e7' 16 | client.write_obj(data1, key1) 17 | client.write_obj(data2, key2) 18 | client.write_obj(data3, key3) 19 | assert client.obj_exists(key1) == True 20 | assert client.obj_exists(key2) == True 21 | assert client.obj_exists(key3) == True 22 | assert client.stat_obj(key1) == len(data1) 23 | assert client.stat_obj(key2) == len(data2) 24 | assert client.stat_obj(key3) == len(data3) 25 | assert client.get_ctime(key1) == 0 26 | assert client.get_ctime(key2) == 0 27 | assert client.get_ctime(key3) == 0 28 | 29 | objs = client.list_objs() 30 | num = 0 31 | for obj in objs: 32 | obj = obj[0].split('/', 1) 33 | if len(obj) != 2: 34 | continue 35 | if obj[0] == repo_id1 or obj[0] == repo_id2: 36 | num += 1 37 | assert num == 3 38 | 39 | objs = client.list_objs(repo_id1) 40 | num = 0 41 | for obj in objs: 42 | obj = obj[0].split('/', 1) 43 | if len(obj) != 2: 44 | continue 45 | assert obj[0] == repo_id1 46 | assert obj[1] == '249408dcc7aaba6e0948cb2d1950aaf4c86078b0' or obj[1] == '8228f1d3877efa3395475ccccd065f87d7727e29' 47 | num += 1 48 | assert num == 2 49 | 50 | objs = client.list_objs(repo_id2) 51 | num = 0 52 | for obj in objs: 53 | obj = obj[0].split('/', 1) 54 | if len(obj) != 2: 55 | continue 56 | assert obj[0] == repo_id2 57 | assert obj[1] == '97c5a757b1aa4de4a9d7c07f3d66648e43c562e7' 58 | num += 1 59 | assert num == 1 60 | 61 | raw = client.read_obj(key1) 62 | assert raw == data1.encode("utf-8") 63 | raw = client.read_obj(key2) 64 | assert raw == data2.encode("utf-8") 65 | raw = client.read_obj(key3) 66 | assert raw == data3.encode("utf-8") 67 | 68 | client.remove_obj(key1) 69 | assert client.obj_exists(key1) == False 70 | client.remove_obj(key2) 71 | assert client.obj_exists(key2) == False 72 | client.remove_obj(key3) 73 | assert client.obj_exists(key3) == False 74 | 75 | ctime = 1729934744 76 | client.write_obj(data1, key1, ctime) 77 | client.write_obj(data2, key2, ctime) 78 | client.write_obj(data3, key3, ctime) 79 | assert client.get_ctime(key1) == ctime 80 | assert client.get_ctime(key2) == ctime 81 | assert client.get_ctime(key3) == ctime 82 | 83 | client.remove_obj(key1) 84 | assert client.obj_exists(key1) == False 85 | client.remove_obj(key2) 86 | assert client.obj_exists(key2) == False 87 | client.remove_obj(key3) 88 | assert client.obj_exists(key3) == False 89 | 90 | @pytest.mark.parametrize( 91 | "sse_c_key", 92 | [None, 'I6gkJwqfGiXRNx7wW7au23iznCy///Q='], 93 | ) 94 | def test_obj_wrapper_s3 (sse_c_key): 95 | client = get_s3_client(sse_c_key) 96 | assert client.get_name() == "S3 storage backend" 97 | read_and_write_client(client) 98 | def test_obj_wrapper_oss (): 99 | client = get_oss_client() 100 | assert client.get_name() == "OSS storage backend" 101 | read_and_write_client(client) 102 | 103 | -------------------------------------------------------------------------------- /seafobj/backends/filesystem.py: -------------------------------------------------------------------------------- 1 | import os 2 | import errno 3 | from tempfile import NamedTemporaryFile 4 | 5 | from .base import AbstractObjStore 6 | 7 | def id_to_path(dirname, obj_id): 8 | '''Utility method to format an object path''' 9 | return os.path.join(dirname, obj_id[:2], obj_id[2:]) 10 | 11 | class SeafObjStoreFS(AbstractObjStore): 12 | '''FS backend''' 13 | def __init__(self, compressed, obj_dir, crypto=None, cache=None): 14 | AbstractObjStore.__init__(self, compressed, crypto, cache) 15 | self.obj_dir = obj_dir 16 | self.compressed = compressed 17 | 18 | def read_obj_raw(self, repo_id, version, obj_id): 19 | path = id_to_path(os.path.join(self.obj_dir, repo_id), obj_id) 20 | 21 | with open(path, 'rb') as fp: 22 | data = fp.read() 23 | 24 | return data 25 | 26 | def get_name(self): 27 | return 'filesystem storage backend' 28 | 29 | def list_objs(self, repo_id=None): 30 | top_path = self.obj_dir 31 | if repo_id: 32 | repo_path = os.path.join(top_path, repo_id) 33 | if not os.path.exists(repo_path): 34 | return 35 | for spath in os.listdir(repo_path): 36 | obj_path = os.path.join(repo_path, spath) 37 | if not os.path.exists(obj_path): 38 | continue 39 | for lpath in os.listdir(obj_path): 40 | obj_id = spath + lpath 41 | obj = [repo_id, obj_id, 0] 42 | yield obj 43 | else: 44 | if not os.path.exists(top_path): 45 | return 46 | for _repo_id in os.listdir(top_path): 47 | repo_path = os.path.join(top_path, _repo_id) 48 | if not os.path.exists(repo_path): 49 | return 50 | for spath in os.listdir(repo_path): 51 | obj_path = os.path.join(repo_path, spath) 52 | if not os.path.exists(obj_path): 53 | continue 54 | for lpath in os.listdir(obj_path): 55 | obj_id = spath + lpath 56 | obj = [_repo_id, obj_id, 0] 57 | yield obj 58 | 59 | def obj_exists(self, repo_id, obj_id): 60 | dirname = self.obj_dir 61 | filepath = os.path.join(dirname, repo_id, obj_id[:2], obj_id[2:]) 62 | 63 | return os.path.exists(filepath) 64 | 65 | def write_obj(self, data, repo_id, obj_id): 66 | path = os.path.join(self.obj_dir, repo_id, obj_id[:2]) 67 | if not os.path.exists(path): 68 | try: 69 | os.makedirs(path) 70 | except OSError as e: 71 | if e.errno != errno.EEXIST: 72 | raise 73 | 74 | with NamedTemporaryFile(mode='w+b', dir=path, delete=False) as fp: 75 | fp.write(data) 76 | 77 | filename = os.path.join(path, obj_id[2:]) 78 | os.rename(fp.name, filename) 79 | 80 | def remove_obj(self, repo_id, obj_id): 81 | path = os.path.join(self.obj_dir, repo_id, obj_id[:2], obj_id[2:]) 82 | 83 | if os.path.exists(path): 84 | try: 85 | os.remove(path) 86 | except OSError as e: 87 | raise 88 | 89 | def stat_raw(self, repo_id, obj_id): 90 | path = os.path.join(self.obj_dir, repo_id, obj_id[:2], obj_id[2:]) 91 | 92 | stat_info = None 93 | if os.path.exists(path): 94 | try: 95 | stat_info = os.stat(path) 96 | except OSError as e: 97 | raise 98 | if not stat_info: 99 | return -1 100 | return stat_info.st_size 101 | 102 | def get_container_name(self): 103 | return self.obj_dir 104 | -------------------------------------------------------------------------------- /seafobj/backends/ceph.py: -------------------------------------------------------------------------------- 1 | import queue 2 | import threading 3 | 4 | import rados 5 | 6 | from .base import AbstractObjStore 7 | 8 | from seafobj.utils.ceph_utils import ioctx_set_namespace 9 | from rados import LIBRADOS_ALL_NSPACES 10 | 11 | class CephConf(object): 12 | def __init__(self, ceph_conf_file, pool_name, ceph_client_id=None): 13 | self.pool_name = pool_name 14 | self.ceph_conf_file = ceph_conf_file 15 | self.ceph_client_id = ceph_client_id 16 | 17 | class IoCtxPool(object): 18 | '''since we need to set the namespace before read the object, we need to 19 | use a different ioctx per thread. 20 | 21 | ''' 22 | def __init__(self, conf, pool_size=5): 23 | self.conf = conf 24 | self.pool = queue.Queue(pool_size) 25 | if conf.ceph_client_id: 26 | self.cluster = rados.Rados(conffile=conf.ceph_conf_file, rados_id=conf.ceph_client_id) 27 | else: 28 | self.cluster = rados.Rados(conffile=conf.ceph_conf_file) 29 | self.lock = threading.Lock() 30 | 31 | def get_ioctx(self, repo_id): 32 | try: 33 | ioctx = self.pool.get(False) 34 | except queue.Empty: 35 | ioctx = self.create_ioctx() 36 | 37 | ioctx_set_namespace(ioctx, repo_id) 38 | 39 | return ioctx 40 | 41 | def create_ioctx(self): 42 | with self.lock: 43 | if self.cluster.state != 'connected': 44 | self.cluster.connect() 45 | 46 | ioctx = self.cluster.open_ioctx(self.conf.pool_name) 47 | 48 | return ioctx 49 | 50 | def return_ioctx(self, ioctx): 51 | try: 52 | self.pool.put(ioctx, False) 53 | except queue.Full: 54 | ioctx.close() 55 | 56 | class SeafCephClient(object): 57 | '''Wraps a Ceph ioctx''' 58 | def __init__(self, conf): 59 | self.ioctx_pool = IoCtxPool(conf) 60 | 61 | def read_object_content(self, repo_id, obj_id): 62 | ioctx = self.ioctx_pool.get_ioctx(repo_id) 63 | 64 | try: 65 | stat = ioctx.stat(obj_id) 66 | return ioctx.read(obj_id, length=stat[0]) 67 | finally: 68 | self.ioctx_pool.return_ioctx(ioctx) 69 | 70 | class SeafObjStoreCeph(AbstractObjStore): 71 | '''Ceph backend for seafile objects''' 72 | def __init__(self, compressed, ceph_conf, crypto=None, cache=None): 73 | AbstractObjStore.__init__(self, compressed, crypto, cache) 74 | self.ceph_client = SeafCephClient(ceph_conf) 75 | self.pool_name = ceph_conf.pool_name 76 | 77 | def read_obj_raw(self, repo_id, version, obj_id): 78 | data = self.ceph_client.read_object_content(repo_id, obj_id) 79 | return data 80 | 81 | def get_name(self): 82 | return 'Ceph storage backend' 83 | 84 | def list_objs(self, repo_id=None): 85 | if repo_id is None: 86 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(LIBRADOS_ALL_NSPACES) 87 | else: 88 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(repo_id) 89 | objs = ioctx.list_objects() 90 | for obj in objs: 91 | yield [obj.nspace, obj.key, 0] 92 | 93 | self.ceph_client.ioctx_pool.return_ioctx(ioctx) 94 | 95 | def obj_exists(self, repo_id, obj_id): 96 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(repo_id) 97 | try: 98 | ioctx.stat(obj_id) 99 | except rados.ObjectNotFound: 100 | return False 101 | finally: 102 | self.ceph_client.ioctx_pool.return_ioctx(ioctx) 103 | 104 | return True 105 | 106 | def write_obj(self, data, repo_id, obj_id): 107 | try: 108 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(repo_id) 109 | ioctx.write_full(obj_id, data) 110 | except Exception: 111 | raise 112 | finally: 113 | self.ceph_client.ioctx_pool.return_ioctx(ioctx) 114 | 115 | def remove_obj(self, repo_id, obj_id): 116 | try: 117 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(repo_id) 118 | ioctx.remove_object(obj_id) 119 | except Exception: 120 | raise 121 | finally: 122 | self.ceph_client.ioctx_pool.return_ioctx(ioctx) 123 | 124 | def stat_raw(self, repo_id, obj_id): 125 | ioctx = self.ceph_client.ioctx_pool.get_ioctx(repo_id) 126 | try: 127 | stat_info = ioctx.stat(obj_id) 128 | return stat_info[0] 129 | except rados.ObjectNotFound: 130 | raise 131 | finally: 132 | self.ceph_client.ioctx_pool.return_ioctx(ioctx) 133 | 134 | def get_container_name(self): 135 | return self.pool_name 136 | -------------------------------------------------------------------------------- /seafobj/utils/crypto.py: -------------------------------------------------------------------------------- 1 | #coding: utf-8 2 | 3 | from ctypes import ( 4 | create_string_buffer, CDLL, c_char_p, 5 | c_void_p, c_int, POINTER, byref 6 | ) 7 | from ctypes.util import find_library 8 | from seafobj.exceptions import SeafCryptoException 9 | 10 | libname = find_library('crypto') 11 | if libname is None: 12 | raise OSError("Cannot find OpenSSL crypto library") 13 | 14 | dl = CDLL(libname) 15 | 16 | # EVP_CIPHER_CTX *EVP_CIPHER_CTX_new(void); 17 | EVP_CIPHER_CTX_new = dl.EVP_CIPHER_CTX_new 18 | EVP_CIPHER_CTX_new.restype = c_void_p 19 | EVP_CIPHER_CTX_new.argtypes = [] 20 | 21 | EVP_aes_256_cbc = dl.EVP_aes_256_cbc 22 | EVP_aes_256_cbc.restype = c_void_p 23 | EVP_aes_256_cbc.argtypes = [] 24 | 25 | # int EVP_EncryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *cipher, ENGINE *impl, const unsigned char *key, const unsigned char *iv); 26 | EVP_EncryptInit_ex = dl.EVP_EncryptInit_ex 27 | EVP_EncryptInit_ex.restype = c_int 28 | EVP_EncryptInit_ex.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_char_p] 29 | 30 | # int EVP_EncryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, const unsigned char *in, int inl); 31 | EVP_EncryptUpdate = dl.EVP_EncryptUpdate 32 | EVP_EncryptUpdate.restype = c_int 33 | EVP_EncryptUpdate.argtypes = [c_void_p, c_char_p, POINTER(c_int), c_char_p, c_int] 34 | 35 | # int EVP_EncryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl); 36 | EVP_EncryptFinal_ex = dl.EVP_EncryptFinal_ex 37 | EVP_EncryptFinal_ex.restype = c_int 38 | EVP_EncryptFinal_ex.argtypes = [c_void_p, c_char_p, POINTER(c_int)] 39 | 40 | # int EVP_DecryptInit_ex(EVP_CIPHER_CTX *ctx, const EVP_CIPHER *type, ENGINE *impl, unsigned char *key, unsigned char *iv); 41 | EVP_DecryptInit_ex = dl.EVP_DecryptInit_ex 42 | EVP_DecryptInit_ex.restype = c_int 43 | EVP_DecryptInit_ex.argtypes = [c_void_p, c_void_p, c_void_p, c_char_p, c_char_p] 44 | 45 | # int EVP_DecryptUpdate(EVP_CIPHER_CTX *ctx, unsigned char *out, int *outl, unsigned char *in, int inl); 46 | EVP_DecryptUpdate = dl.EVP_DecryptUpdate 47 | EVP_DecryptUpdate.restype = c_int 48 | EVP_DecryptUpdate.argtypes = [c_void_p, c_char_p, POINTER(c_int), c_char_p, c_int] 49 | 50 | # int EVP_DecryptFinal_ex(EVP_CIPHER_CTX *ctx, unsigned char *outm, int *outl); 51 | EVP_DecryptFinal_ex = dl.EVP_DecryptFinal_ex 52 | EVP_DecryptFinal_ex.restype = c_int 53 | EVP_DecryptFinal_ex.argtypes = [c_void_p, c_char_p, POINTER(c_int)] 54 | 55 | # void EVP_CIPHER_CTX_free(EVP_CIPHER_CTX *ctx); 56 | EVP_CIPHER_CTX_free = dl.EVP_CIPHER_CTX_free 57 | EVP_CIPHER_CTX_free.restype = None 58 | EVP_CIPHER_CTX_free.argtypes = [c_void_p] 59 | 60 | class SeafCrypto(object): 61 | def __init__(self, key, iv): 62 | self.key = key 63 | self.iv = iv 64 | 65 | def enc_data(self, data): 66 | if not data: 67 | raise SeafCryptoException('Invalid encrypted data') 68 | 69 | ctx = EVP_CIPHER_CTX_new() 70 | if not ctx: 71 | raise SeafCryptoException('Failed to create cipher ctx') 72 | 73 | try: 74 | if EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), None, 75 | self.key, self.iv) == 0: 76 | raise SeafCryptoException('Failed to init cipher ctx') 77 | 78 | out = create_string_buffer(len(data) + 16) 79 | out_len = c_int(0) 80 | if EVP_EncryptUpdate(ctx, out, byref(out_len), 81 | data, len(data)) == 0: 82 | raise SeafCryptoException('Failed to encrypt update') 83 | 84 | out_final = create_string_buffer(16) 85 | out_final_len = c_int(0) 86 | if EVP_EncryptFinal_ex(ctx, out_final, 87 | byref(out_final_len)) == 0: 88 | raise SeafCryptoException('Failed to encrypt final') 89 | 90 | return out.raw[:out_len.value] + out_final.raw[:out_final_len.value] 91 | finally: 92 | EVP_CIPHER_CTX_free(ctx) 93 | 94 | def dec_data(self, data): 95 | if not data or len(data) % 16 != 0: 96 | raise SeafCryptoException('Invalid decrypted data') 97 | 98 | ctx = EVP_CIPHER_CTX_new() 99 | if not ctx: 100 | raise SeafCryptoException('Failed to create cipher ctx') 101 | 102 | try: 103 | if EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), None, 104 | self.key, self.iv) == 0: 105 | raise SeafCryptoException('Failed to init cipher ctx') 106 | 107 | out = create_string_buffer(len(data)) 108 | out_len = c_int(0) 109 | if EVP_DecryptUpdate(ctx, out, byref(out_len), 110 | data, len(data)) == 0: 111 | raise SeafCryptoException('Failed to decrypt update') 112 | 113 | out_final = create_string_buffer(16) 114 | out_final_len = c_int(0) 115 | if EVP_DecryptFinal_ex(ctx, out_final, 116 | byref(out_final_len)) == 0: 117 | raise SeafCryptoException('Failed to decrypt final') 118 | 119 | return out.raw[:out_len.value] + out_final.raw[:out_final_len.value] 120 | finally: 121 | EVP_CIPHER_CTX_free(ctx) 122 | -------------------------------------------------------------------------------- /seafobj/db.py: -------------------------------------------------------------------------------- 1 | import os 2 | import configparser 3 | import logging 4 | 5 | from urllib.parse import quote_plus 6 | 7 | from sqlalchemy import create_engine 8 | from sqlalchemy.event import contains as has_event_listener, listen as add_event_listener 9 | from sqlalchemy.exc import DisconnectionError 10 | from sqlalchemy.orm import sessionmaker 11 | from sqlalchemy.pool import Pool 12 | from sqlalchemy.ext.automap import automap_base 13 | 14 | # Automatically generate mapped classes and relationships from a database schema 15 | Base = automap_base() 16 | 17 | def load_db_url_from_env(): 18 | envs = os.environ 19 | host = envs.get("SEAFILE_MYSQL_DB_HOST") 20 | username = envs.get("SEAFILE_MYSQL_DB_USER") 21 | passwd = envs.get("SEAFILE_MYSQL_DB_PASSWORD") 22 | dbname = envs.get("SEAFILE_MYSQL_DB_SEAFILE_DB_NAME") 23 | 24 | if not host or not username or not passwd: 25 | return None 26 | if not dbname: 27 | dbname="seafile_db" 28 | 29 | port = 3306 30 | db_url = "mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8" % (username, quote_plus(passwd), host, port, dbname) 31 | return db_url 32 | 33 | def create_engine_from_conf(config): 34 | need_connection_pool_fix = True 35 | 36 | if not config.has_section('database'): 37 | seafile_data_dir = os.environ['SEAFILE_CONF_DIR'] 38 | if seafile_data_dir: 39 | path = os.path.join(seafile_data_dir, 'seafile.db') 40 | else: 41 | logging.warning('SEAFILE_CONF_DIR not set, can not load sqlite database.') 42 | return None 43 | db_url = "sqlite:///%s" % path 44 | need_connection_pool_fix = False 45 | else: 46 | backend = config.get('database', 'type') 47 | 48 | if backend == 'mysql': 49 | if config.has_option('database', 'host'): 50 | host = config.get('database', 'host').lower() 51 | else: 52 | host = 'localhost' 53 | 54 | if config.has_option('database', 'port'): 55 | port = config.getint('database', 'port') 56 | else: 57 | port = 3306 58 | username = config.get('database', 'user') 59 | passwd = config.get('database', 'password') 60 | dbname = config.get('database', 'db_name') 61 | db_url = "mysql+pymysql://%s:%s@%s:%s/%s?charset=utf8" % (username, quote_plus(passwd), host, port, dbname) 62 | elif backend == 'oracle': 63 | if config.has_option('database', 'host'): 64 | host = config.get('database', 'host').lower() 65 | else: 66 | host = 'localhost' 67 | 68 | if config.has_option('database', 'port'): 69 | port = config.getint('database', 'port') 70 | else: 71 | port = 1521 72 | username = config.get('database', 'username') 73 | passwd = config.get('database', 'password') 74 | service_name = config.get('database', 'service_name') 75 | db_url = "oracle://%s:%s@%s:%s/%s" % (username, quote_plus(passwd), 76 | host, port, service_name) 77 | else: 78 | raise RuntimeError("Unknown database backend: %s" % backend) 79 | 80 | env_db_url = load_db_url_from_env() 81 | if env_db_url: 82 | db_url = env_db_url 83 | # Add pool recycle, or mysql connection will be closed by mysqld if idle 84 | # for too long. 85 | kwargs = dict(pool_recycle=300, echo=False, echo_pool=False) 86 | 87 | engine = create_engine(db_url, **kwargs) 88 | 89 | if need_connection_pool_fix and not has_event_listener(Pool, 'checkout', ping_connection): 90 | # We use has_event_listener to double check in case we call create_engine 91 | # multipe times in the same process. 92 | add_event_listener(Pool, 'checkout', ping_connection) 93 | 94 | return engine 95 | 96 | def init_db_session_class(config): 97 | """Configure Session class for mysql according to the config file.""" 98 | try: 99 | engine = create_engine_from_conf(config) 100 | except configparser.NoOptionError as xxx_todo_changeme: 101 | configparser.NoSectionError = xxx_todo_changeme 102 | raise RuntimeError("invalid seafile config.") 103 | 104 | # reflect the tables 105 | Base.prepare(autoload_with=engine) 106 | 107 | Session = sessionmaker(bind=engine) 108 | return Session 109 | 110 | # This is used to fix the problem of "MySQL has gone away" that happens when 111 | # mysql server is restarted or the pooled connections are closed by the mysql 112 | # server beacause being idle for too long. 113 | # 114 | # See http://stackoverflow.com/a/17791117/1467959 115 | def ping_connection(dbapi_connection, connection_record, connection_proxy): # pylint: disable=unused-argument 116 | cursor = dbapi_connection.cursor() 117 | try: 118 | cursor.execute("SELECT 1") 119 | cursor.close() 120 | except: 121 | logging.info('fail to ping database server, disposing all cached connections') 122 | connection_proxy._pool.dispose() # pylint: disable=protected-access 123 | 124 | # Raise DisconnectionError so the pool would create a new connection 125 | raise DisconnectionError() 126 | -------------------------------------------------------------------------------- /seafobj/commit_differ.py: -------------------------------------------------------------------------------- 1 | # coding: UTF-8 2 | 3 | from seafobj import fs_mgr 4 | import os 5 | 6 | ZERO_OBJ_ID = '0000000000000000000000000000000000000000' 7 | 8 | 9 | class DiffEntry(object): 10 | def __init__(self, path, obj_id, size=-1, new_path=None, modifier=None, mtime=None): 11 | self.path = path 12 | self.new_path = new_path 13 | self.obj_id = obj_id 14 | self.size = size 15 | self.modifier = modifier 16 | self.mtime = mtime 17 | 18 | class CommitDiffer(object): 19 | def __init__(self, repo_id, version, root1, root2, handle_rename=False, fold_dirs=False): 20 | self.repo_id = repo_id 21 | self.version = version 22 | self.root1 = root1 23 | self.root2 = root2 24 | self.handle_rename = handle_rename 25 | self.fold_dirs = fold_dirs 26 | 27 | def diff_to_unicode(self): 28 | # you can also do this by overwriting key points 29 | res = [] 30 | diff_res = self.diff() 31 | for dirents in diff_res: 32 | for dirent in dirents: 33 | for key in list(dirent.__dict__.keys()): 34 | v = dirent.__dict__[key] 35 | if isinstance(v, str): 36 | dirent.__dict__[key] = v.decode('utf8') 37 | res.append(dirents) 38 | return tuple(res) 39 | 40 | def diff(self): 41 | added_files = [] 42 | deleted_files = [] 43 | deleted_dirs = [] 44 | modified_files = [] 45 | added_dirs = [] 46 | renamed_files = [] 47 | renamed_dirs = [] 48 | moved_files = [] 49 | moved_dirs = [] 50 | 51 | new_dirs = [] 52 | del_dirs = [] 53 | queued_dirs = [] # (path, dir_id1, dir_id2) 54 | 55 | if self.root1 == self.root2: 56 | return (added_files, deleted_files, added_dirs, deleted_dirs, 57 | modified_files, renamed_files, moved_files, 58 | renamed_dirs, moved_dirs) 59 | else: 60 | queued_dirs.append(('/', self.root1, self.root2)) 61 | 62 | while True: 63 | path = old_id = new_id = None 64 | try: 65 | path, old_id, new_id = queued_dirs.pop(0) 66 | except IndexError: 67 | break 68 | 69 | dir1 = fs_mgr.load_seafdir(self.repo_id, self.version, old_id) 70 | dir2 = fs_mgr.load_seafdir(self.repo_id, self.version, new_id) 71 | 72 | for dent in dir1.get_files_list(): 73 | new_dent = dir2.lookup_dent(dent.name) 74 | if not new_dent or new_dent.type != dent.type: 75 | deleted_files.append(DiffEntry(make_path(path, dent.name), dent.id, dent.size, modifier=dent.modifier, mtime=dent.mtime)) 76 | else: 77 | dir2.remove_entry(dent.name) 78 | if new_dent.id == dent.id: 79 | pass 80 | else: 81 | modified_files.append(DiffEntry(make_path(path, dent.name), new_dent.id, new_dent.size, modifier=new_dent.modifier, mtime=new_dent.mtime)) 82 | 83 | added_files.extend([DiffEntry(make_path(path, dent.name), dent.id, dent.size, modifier=dent.modifier, mtime=dent.mtime) for dent in dir2.get_files_list()]) 84 | 85 | for dent in dir1.get_subdirs_list(): 86 | new_dent = dir2.lookup_dent(dent.name) 87 | if not new_dent or new_dent.type != dent.type: 88 | del_dirs.append(DiffEntry(make_path(path, dent.name), dent.id, mtime=dent.mtime)) 89 | else: 90 | dir2.remove_entry(dent.name) 91 | if new_dent.id == dent.id: 92 | pass 93 | else: 94 | queued_dirs.append((make_path(path, dent.name), dent.id, new_dent.id)) 95 | 96 | new_dirs.extend([DiffEntry(make_path(path, dent.name), dent.id, mtime=dent.mtime) for dent in dir2.get_subdirs_list()]) 97 | 98 | if not self.fold_dirs: 99 | while True: 100 | # Process newly added dirs and its sub-dirs, all files under 101 | # these dirs should be marked as added. 102 | try: 103 | dir_dent = new_dirs.pop(0) 104 | added_dirs.append(DiffEntry(dir_dent.path, dir_dent.obj_id, mtime=dir_dent.mtime)) 105 | except IndexError: 106 | break 107 | d = fs_mgr.load_seafdir(self.repo_id, self.version, dir_dent.obj_id) 108 | added_files.extend([DiffEntry(make_path(dir_dent.path, dent.name), dent.id, dent.size, modifier=dent.modifier, mtime=dent.mtime) for dent in d.get_files_list()]) 109 | 110 | new_dirs.extend([DiffEntry(make_path(dir_dent.path, dent.name), dent.id, mtime=dent.mtime) for dent in d.get_subdirs_list()]) 111 | 112 | while True: 113 | try: 114 | dir_dent = del_dirs.pop(0) 115 | deleted_dirs.append(DiffEntry(dir_dent.path, dir_dent.obj_id, mtime=dir_dent.mtime)) 116 | except IndexError: 117 | break 118 | d = fs_mgr.load_seafdir(self.repo_id, self.version, dir_dent.obj_id) 119 | deleted_files.extend([DiffEntry(make_path(dir_dent.path, dent.name), dent.id, dent.size, modifier=dent.modifier, mtime=dent.mtime) for dent in d.get_files_list()]) 120 | 121 | del_dirs.extend([DiffEntry(make_path(dir_dent.path, dent.name), dent.id, mtime=dent.mtime) for dent in d.get_subdirs_list()]) 122 | 123 | else: 124 | deleted_dirs = del_dirs 125 | added_dirs = new_dirs 126 | 127 | if self.handle_rename: 128 | ret_added_files = [] 129 | ret_added_dirs = [] 130 | 131 | # If an empty file or dir is generated from renaming or moving, just add it into both added_files 132 | # and deleted_files, because we can't know where it actually come from. 133 | del_file_dict = {} 134 | for de in deleted_files: 135 | if de.obj_id != ZERO_OBJ_ID: 136 | del_file_dict[de.obj_id] = de 137 | 138 | for de in added_files: 139 | if de.obj_id in del_file_dict: 140 | del_de = del_file_dict[de.obj_id] 141 | if os.path.dirname(de.path) == os.path.dirname(del_de.path): 142 | # it's a rename operation if add and del are in the same dir 143 | renamed_files.append(DiffEntry(del_de.path, de.obj_id, de.size, de.path, modifier=de.modifier, mtime=de.mtime)) 144 | else: 145 | moved_files.append(DiffEntry(del_de.path, de.obj_id, de.size, de.path, modifier=de.modifier, mtime=de.mtime)) 146 | del del_file_dict[de.obj_id] 147 | else: 148 | ret_added_files.append(de) 149 | 150 | del_dir_dict = {} 151 | for de in deleted_dirs: 152 | if de.obj_id != ZERO_OBJ_ID: 153 | del_dir_dict[de.obj_id] = de 154 | 155 | for de in added_dirs: 156 | if de.obj_id in del_dir_dict: 157 | del_de = del_dir_dict[de.obj_id] 158 | if os.path.dirname(de.path) == os.path.dirname(del_de.path): 159 | renamed_dirs.append(DiffEntry(del_de.path, de.obj_id, -1, de.path, mtime=de.mtime)) 160 | else: 161 | moved_dirs.append(DiffEntry(del_de.path, de.obj_id, -1, de.path, mtime=de.mtime)) 162 | del del_dir_dict[de.obj_id] 163 | else: 164 | ret_added_dirs.append(de) 165 | 166 | ret_deleted_files = list(del_file_dict.values()) 167 | ret_deleted_dirs = list(del_dir_dict.values()) 168 | for de in deleted_files: 169 | if de.obj_id == ZERO_OBJ_ID: 170 | ret_deleted_files.append(de) 171 | for de in deleted_dirs: 172 | if de.obj_id == ZERO_OBJ_ID: 173 | ret_deleted_dirs.append(de) 174 | else: 175 | ret_added_files = added_files 176 | ret_deleted_files = deleted_files 177 | ret_added_dirs = added_dirs 178 | ret_deleted_dirs = deleted_dirs 179 | 180 | return (ret_added_files, ret_deleted_files, ret_added_dirs, ret_deleted_dirs, 181 | modified_files, renamed_files, moved_files, 182 | renamed_dirs, moved_dirs) 183 | 184 | def make_path(dirname, filename): 185 | if dirname == '/': 186 | return dirname + filename 187 | else: 188 | return '/'.join((dirname, filename)) 189 | -------------------------------------------------------------------------------- /objwrapper/s3.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | from objwrapper.exceptions import InvalidConfigError 4 | import requests 5 | from datetime import datetime 6 | import hmac 7 | import hashlib 8 | import base64 9 | from lxml import etree 10 | 11 | class S3Conf(object): 12 | def __init__(self, key_id, key, bucket_name, host, port, use_v4_sig, aws_region, use_https, path_style_request, sse_c_key, use_iam_role): 13 | if not host and not aws_region: 14 | raise InvalidConfigError('aws_region and host are not configured') 15 | self.key_id = key_id 16 | self.key = key 17 | self.bucket_name = bucket_name 18 | self.host = host 19 | self.port = port 20 | self.use_v4_sig = use_v4_sig 21 | self.aws_region = aws_region 22 | self.use_https = use_https 23 | self.path_style_request = path_style_request 24 | self.sse_c_key = sse_c_key 25 | self.use_iam_role = use_iam_role 26 | 27 | 28 | class SeafS3Client(object): 29 | """Wraps a s3 connection and a bucket""" 30 | def __init__(self, conf): 31 | self.conf = conf 32 | self.client = None 33 | self.bucket = None 34 | self.enpoint_url = None; 35 | self.do_connect() 36 | 37 | def do_connect(self): 38 | # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html 39 | addressing_style = 'virtual' 40 | if self.conf.path_style_request: 41 | addressing_style = 'path' 42 | if self.conf.use_v4_sig: 43 | config = boto3.session.Config(signature_version='s3v4', s3={'addressing_style':addressing_style}) 44 | else: 45 | config = boto3.session.Config(signature_version='s3',s3={'addressing_style':addressing_style}) 46 | 47 | if self.conf.use_iam_role: 48 | self.client = boto3.client('s3', 49 | use_ssl=self.conf.use_https, 50 | config=config) 51 | elif self.conf.host is None: 52 | if self.conf.use_https: 53 | self.endpoint_url = f'https://s3.{self.conf.aws_region}.amazonaws.com' 54 | else: 55 | self.endpoint_url = f'http://s3.{self.conf.aws_region}.amazonaws.com' 56 | self.client = boto3.client('s3', 57 | region_name=self.conf.aws_region, 58 | aws_access_key_id=self.conf.key_id, 59 | aws_secret_access_key=self.conf.key, 60 | use_ssl=self.conf.use_https, 61 | config=config) 62 | else: 63 | # https://github.com/boto/boto3/blob/master/boto3/session.py#L265 64 | endpoint_url = 'https://%s' % self.conf.host if self.conf.use_https else 'http://%s' % self.conf.host 65 | if self.conf.port: 66 | endpoint_url = '%s:%s' % (endpoint_url, self.conf.port) 67 | self.endpoint_url = endpoint_url 68 | self.client = boto3.client('s3', 69 | aws_access_key_id=self.conf.key_id, 70 | aws_secret_access_key=self.conf.key, 71 | endpoint_url=endpoint_url, 72 | config=config) 73 | 74 | self.bucket = self.conf.bucket_name 75 | 76 | def read_obj(self, obj_id): 77 | if self.conf.sse_c_key: 78 | obj = self.client.get_object(Bucket=self.bucket, Key=obj_id, SSECustomerKey=self.conf.sse_c_key, SSECustomerAlgorithm='AES256') 79 | else: 80 | obj = self.client.get_object(Bucket=self.bucket, Key=obj_id) 81 | return obj.get('Body').read() 82 | 83 | def get_name(self): 84 | return 'S3 storage backend' 85 | 86 | def list_objs(self, prefix=None): 87 | if not self.conf.use_v4_sig and self.conf.path_style_request: 88 | # When using the S3 v2 protocol and path-style requests, boto3 is unable to list objects properly. 89 | # We manually sign the requests and then use the list-objects API to list the objects in the bucket. 90 | yield from self.list_objs_v2(prefix) 91 | return 92 | paginator = self.client.get_paginator('list_objects_v2') 93 | if prefix: 94 | iterator = paginator.paginate(Bucket=self.bucket, Prefix=prefix) 95 | else: 96 | iterator = paginator.paginate(Bucket=self.bucket) 97 | for page in iterator: 98 | for content in page.get('Contents', []): 99 | tokens = content.get('Key', '') 100 | if tokens: 101 | obj = [tokens, content.get('Size', 0)] 102 | yield obj 103 | 104 | def obj_exists(self, key): 105 | bucket = self.bucket 106 | try: 107 | if self.conf.sse_c_key: 108 | self.client.head_object(Bucket=bucket, Key=key, SSECustomerKey=self.conf.sse_c_key, SSECustomerAlgorithm='AES256') 109 | else: 110 | self.client.head_object(Bucket=bucket, Key=key) 111 | exists = True 112 | except ClientError: 113 | exists = False 114 | 115 | return exists 116 | 117 | def write_obj(self, data, key, ctime=-1): 118 | metadata = {} 119 | if ctime >= 0: 120 | metadata = {"Ctime":str(ctime)} 121 | bucket = self.bucket 122 | if self.conf.sse_c_key: 123 | self.client.put_object(Bucket=bucket, Key=key, Body=data, Metadata=metadata, SSECustomerKey=self.conf.sse_c_key, SSECustomerAlgorithm='AES256') 124 | else: 125 | self.client.put_object(Bucket=bucket, Key=key, Body=data, Metadata=metadata) 126 | 127 | def remove_obj(self, key): 128 | bucket = self.bucket 129 | self.client.delete_object(Bucket=bucket, Key=key) 130 | 131 | def stat_obj(self, key): 132 | bucket = self.bucket 133 | if self.conf.sse_c_key: 134 | obj = self.client.head_object(Bucket=bucket, Key=key, SSECustomerKey=self.conf.sse_c_key, SSECustomerAlgorithm='AES256') 135 | else: 136 | obj = self.client.head_object(Bucket=bucket, Key=key) 137 | size = int(obj.get('ContentLength')) 138 | return size 139 | 140 | def get_ctime(self, key): 141 | bucket = self.bucket 142 | if self.conf.sse_c_key: 143 | obj = self.client.head_object(Bucket=bucket, Key=key, SSECustomerKey=self.conf.sse_c_key, SSECustomerAlgorithm='AES256') 144 | else: 145 | obj = self.client.head_object(Bucket=bucket, Key=key) 146 | metadata = obj.get('Metadata') 147 | ctime = metadata.get('ctime', '') 148 | try: 149 | return float(ctime) 150 | except: 151 | return 0 152 | 153 | def get_signature(self, date): 154 | sign_str = f"GET\n\n\n{date}\n/{self.bucket}/" 155 | 156 | hmac_object = hmac.new(self.conf.key.encode('utf-8'), sign_str.encode('utf-8'), hashlib.sha1) 157 | hmac_bytes = hmac_object.digest() 158 | signature = base64.b64encode(hmac_bytes).decode('utf-8') 159 | return signature 160 | 161 | def list_bucket_v2 (self, marker, prefix): 162 | now = datetime.utcnow() 163 | date = now.strftime('%a, %d %b %Y %H:%M:%S GMT') 164 | signature =self.get_signature(date) 165 | 166 | headers = {'Date':date, 167 | 'Authorization': f"AWS {self.conf.key_id}:{signature}", 168 | } 169 | 170 | endpoint_url = self.endpoint_url 171 | bucket = self.bucket 172 | 173 | if marker and prefix: 174 | url = f'{endpoint_url}/{bucket}/?marker={marker}&prefix={prefix}/' 175 | elif marker: 176 | url = f'{endpoint_url}/{bucket}/?marker={marker}' 177 | elif prefix: 178 | url = f'{endpoint_url}/{bucket}/?prefix={prefix}/' 179 | else: 180 | url = f'{endpoint_url}/{bucket}/' 181 | 182 | response = requests.get (url, headers=headers, timeout=300) 183 | if response.status_code != 200: 184 | return None 185 | return response.text 186 | 187 | def list_objs_v2(self, prefix): 188 | is_truncated = True 189 | marker = None 190 | while is_truncated: 191 | rsp = self.list_bucket_v2(marker, prefix) 192 | if not rsp: 193 | break 194 | 195 | root = etree.fromstring(rsp.encode('utf-8')) 196 | if "ListBucketResult" not in root.tag: 197 | break 198 | for child in root: 199 | if "IsTruncated" in child.tag: 200 | if child.text == "true": 201 | is_truncated = True 202 | else: 203 | is_truncated = False 204 | if "Contents" in child.tag: 205 | obj = [] 206 | for contents in child: 207 | if "Key" in contents.tag: 208 | marker = contents.text 209 | obj.append(contents.text) 210 | if "Size" in contents.tag: 211 | obj.append(int(contents.text)) 212 | if obj: 213 | yield obj 214 | -------------------------------------------------------------------------------- /pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # Specify a configuration file. 4 | #rcfile= 5 | 6 | # Python code to execute, usually for sys.path manipulation such as 7 | # pygtk.require(). 8 | #init-hook= 9 | 10 | # Profiled execution. 11 | profile=no 12 | 13 | # Add files or directories to the blacklist. They should be base names, not 14 | # paths. 15 | ignore=CVS 16 | 17 | # Pickle collected data for later comparisons. 18 | persistent=yes 19 | 20 | # List of plugins (as comma separated values of python modules names) to load, 21 | # usually to register additional checkers. 22 | load-plugins= 23 | 24 | 25 | [MESSAGES CONTROL] 26 | 27 | # Enable the message, report, category or checker with the given id(s). You can 28 | # either give multiple identifier separated by comma (,) or put this option 29 | # multiple time. 30 | #enable= 31 | 32 | # Disable the message, report, category or checker with the given id(s). You 33 | # can either give multiple identifier separated by comma (,) or put this option 34 | # multiple time (only on the command line, not in the configuration file where 35 | # it should appear only once). 36 | # 37 | # These warnings are annoying, disable them: 38 | # C0111: Missing docstring 39 | # C0301: line too long 40 | # C0302: Too many lines in module (NN) 41 | # I0011: Locally disabling W0000 42 | # R0201: Method could be a function 43 | # R0801: Similar lines in N files 44 | # R0902: Too many instance attributes (N/7) 45 | # R0903: Too few public methods (N/2) 46 | # R0904: Too many public methods (N/20) 47 | # R0911: Too many return statements (N/6) 48 | # R0912: Too many branches (NN/12) 49 | # R0913: Too many arguments (NN/5) 50 | # R0914: Too many local variables (NN/15) 51 | # R0915: Too many statements (NN/50) 52 | # W0511: TODO 53 | # W0401: Wildcard import FOO 54 | # W0141: Used builtin function 'map' 55 | # W0142: Used * or ** magic 56 | # W0232: Class has no __init__ method 57 | # W0603: Using the global statement 58 | # W0614: Unused import FOO from wildcard import 59 | # W0703: Catch "Exception" 60 | # W1201: Specify string format arguments as logging function parameters 61 | # E1121: Too many positional arguments for function call 62 | # 63 | # These warnings are genuine, we should add them back, by potentially only 64 | # disabling at the lines generating a false positive: 65 | # C0103: Invalid name "FOO" (should match [a-z_][a-z0-9_]{2,30}$) 66 | # E1103: Instance of 'FOO' has no 'BAR' member (but some types could not be inferred) 67 | # W0621: Redefining name 'FOO' from outer scope (line NN) 68 | # W0622: Redefining built-in 'FOO' 69 | # W0702: No exception type(s) specified 70 | # 71 | disable=C0103,C0111,C0301,C0302,E1103,I0011,R0201,R0801,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,W0141,W0142,W0232,W0401,W0603,W0614,W0621,W0622,W0702,W0703,W1201,E1121, W0611 72 | 73 | 74 | [REPORTS] 75 | 76 | # Set the output format. Available formats are text, parseable, colorized, msvs 77 | # (visual studio) and html 78 | output-format=text 79 | 80 | # Include message's id in output 81 | include-ids=yes 82 | 83 | # Put messages in a separate file for each module / package specified on the 84 | # command line instead of printing them on stdout. Reports (if any) will be 85 | # written in a file name "pylint_global.[txt|html]". 86 | files-output=no 87 | 88 | # Tells whether to display a full report or only the messages 89 | reports=no 90 | 91 | # Python expression which should return a note less than 10 (10 is the highest 92 | # note). You have access to the variables errors warning, statement which 93 | # respectively contain the number of errors / warnings messages and the total 94 | # number of statements analyzed. This is used by the global evaluation report 95 | # (RP0004). 96 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 97 | 98 | # Add a comment according to your evaluation note. This is used by the global 99 | # evaluation report (RP0004). 100 | comment=no 101 | 102 | 103 | [VARIABLES] 104 | 105 | # Tells whether we should check for unused import in __init__ files. 106 | init-import=no 107 | 108 | # A regular expression matching the beginning of the name of dummy variables 109 | # (i.e. not used). 110 | dummy-variables-rgx=_|dummy 111 | 112 | # List of additional names supposed to be defined in builtins. Remember that 113 | # you should avoid to define new builtins when possible. 114 | additional-builtins= 115 | 116 | 117 | [TYPECHECK] 118 | 119 | # Tells whether missing members accessed in mixin class should be ignored. A 120 | # mixin class is detected if its name ends with "mixin" (case insensitive). 121 | ignore-mixin-members=yes 122 | 123 | # List of classes names for which member attributes should not be checked 124 | # (useful for classes with attributes dynamically set). 125 | ignored-classes=django.db.models.Model,django.forms.Form,seahub.avatar.models.AvatarBase 126 | 127 | # When zope mode is activated, add a predefined set of Zope acquired attributes 128 | # to generated-members. 129 | zope=no 130 | 131 | # List of members which are set dynamically and missed by pylint inference 132 | # system, and so shouldn't trigger E0201 when accessed. Python regular 133 | # expressions are accepted. 134 | generated-members=objects,DoesNotExist,cleaned_data,is_valid,errors 135 | 136 | 137 | [MISCELLANEOUS] 138 | 139 | # List of note tags to take in consideration, separated by a comma. 140 | notes=FIXME,XXX,TODO 141 | 142 | 143 | [SIMILARITIES] 144 | 145 | # Minimum lines number of a similarity. 146 | min-similarity-lines=4 147 | 148 | # Ignore comments when computing similarities. 149 | ignore-comments=yes 150 | 151 | # Ignore docstrings when computing similarities. 152 | ignore-docstrings=yes 153 | 154 | 155 | [FORMAT] 156 | 157 | # Maximum number of characters on a single line. 158 | max-line-length=80 159 | 160 | # Maximum number of lines in a module 161 | max-module-lines=1000 162 | 163 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 164 | # tab). 165 | # In rietveld, 2 spaces indents are used. 166 | indent-string=' ' 167 | 168 | 169 | [BASIC] 170 | 171 | # Required attributes for module, separated by a comma 172 | required-attributes= 173 | 174 | # List of builtins function names that should not be used, separated by a comma 175 | bad-functions=map,filter,apply,input 176 | 177 | # Regular expression which should only match correct module names 178 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 179 | 180 | # Regular expression which should only match correct module level names 181 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 182 | 183 | # Regular expression which should only match correct class names 184 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 185 | 186 | # Regular expression which should only match correct function names 187 | function-rgx=[a-z_][a-z0-9_]{2,30}$ 188 | 189 | # Regular expression which should only match correct method names 190 | method-rgx=[a-z_][a-z0-9_]{2,30}$ 191 | 192 | # Regular expression which should only match correct instance attribute names 193 | attr-rgx=[a-z_][a-z0-9_]{2,30}$ 194 | 195 | # Regular expression which should only match correct argument names 196 | argument-rgx=[a-z_][a-z0-9_]{2,30}$ 197 | 198 | # Regular expression which should only match correct variable names 199 | variable-rgx=[a-z_][a-z0-9_]{2,30}$ 200 | 201 | # Regular expression which should only match correct list comprehension / 202 | # generator expression variable names 203 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 204 | 205 | # Good variable names which should always be accepted, separated by a comma 206 | good-names=i,j,k,ex,Run,_ 207 | 208 | # Bad variable names which should always be refused, separated by a comma 209 | bad-names=foo,bar,baz,toto,tutu,tata 210 | 211 | # Regular expression which should only match functions or classes name which do 212 | # not require a docstring 213 | no-docstring-rgx=__.*__ 214 | 215 | 216 | [DESIGN] 217 | 218 | # Maximum number of arguments for function / method 219 | max-args=5 220 | 221 | # Argument names that match this expression will be ignored. Default to name 222 | # with leading underscore 223 | ignored-argument-names=_.* 224 | 225 | # Maximum number of locals for function / method body 226 | max-locals=15 227 | 228 | # Maximum number of return / yield for function / method body 229 | max-returns=6 230 | 231 | # Maximum number of branch for function / method body 232 | max-branchs=12 233 | 234 | # Maximum number of statements in function / method body 235 | max-statements=50 236 | 237 | # Maximum number of parents for a class (see R0901). 238 | max-parents=7 239 | 240 | # Maximum number of attributes for a class (see R0902). 241 | max-attributes=7 242 | 243 | # Minimum number of public methods for a class (see R0903). 244 | min-public-methods=2 245 | 246 | # Maximum number of public methods for a class (see R0904). 247 | max-public-methods=20 248 | 249 | 250 | [CLASSES] 251 | 252 | # List of interface methods to ignore, separated by a comma. This is used for 253 | # instance to not check methods defines in Zope's Interface base class. 254 | ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by 255 | 256 | # List of method names used to declare (i.e. assign) instance attributes. 257 | defining-attr-methods=__init__,__new__,setUp 258 | 259 | # List of valid names for the first argument in a class method. 260 | valid-classmethod-first-arg=cls 261 | 262 | 263 | [IMPORTS] 264 | 265 | # Deprecated modules which should not be used, separated by a comma 266 | deprecated-modules=regsub,string,TERMIOS,Bastion,rexec 267 | 268 | # Create a graph of every (i.e. internal and external) dependencies in the 269 | # given file (report RP0402 must not be disabled) 270 | import-graph= 271 | 272 | # Create a graph of external dependencies in the given file (report RP0402 must 273 | # not be disabled) 274 | ext-import-graph= 275 | 276 | # Create a graph of internal dependencies in the given file (report RP0402 must 277 | # not be disabled) 278 | int-import-graph= 279 | 280 | 281 | [EXCEPTIONS] 282 | 283 | # Exceptions that will emit a warning when being caught. Defaults to 284 | # "Exception" 285 | overgeneral-exceptions=Exception 286 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /seafobj/fs.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import struct 3 | import stat 4 | import json 5 | import binascii 6 | 7 | from seafobj.exceptions import ObjectFormatError 8 | 9 | from .objstore_factory import objstore_factory 10 | from .objstore_factory import get_repo_storage_id 11 | from .blocks import block_mgr 12 | 13 | ZERO_OBJ_ID = '0000000000000000000000000000000000000000' 14 | 15 | SEAF_METADATA_TYPE_FILE = 1 16 | SEAF_METADATA_TYPE_LINK = 2 17 | SEAF_METADATA_TYPE_DIR = 3 18 | 19 | logger = logging.getLogger('seafobj.fs') 20 | 21 | class SeafDirent(object): 22 | '''An entry in a SeafDir''' 23 | DIR = 0 24 | FILE = 1 25 | def __init__(self, name, type, id, mtime, size, modifier): 26 | self.name = name 27 | self.type = type 28 | self.id = id 29 | self.mtime = mtime 30 | self.size = size 31 | self.modifier = modifier 32 | 33 | def is_file(self): 34 | return self.type == SeafDirent.FILE 35 | 36 | def is_dir(self): 37 | return self.type == SeafDirent.DIR 38 | 39 | def __str__(self): 40 | return 'SeafDirent(type=%s, name=%s, size=%s, id=%s, mtime=%s, modifier=%s)' % \ 41 | ('dir' if self.type == SeafDirent.DIR else 'file', self.name, self.size, self.id, self.mtime, self.modifier) 42 | 43 | @staticmethod 44 | def fromV0(name, type, id, modifier): 45 | return SeafDirent(name.decode(encoding='utf-8'), type, id.decode(encoding='utf-8'), -1, -1, modifier) 46 | 47 | @staticmethod 48 | def fromV1(name, type, id, mtime, size, modifier): 49 | return SeafDirent(name, type, id, mtime, size, modifier) 50 | 51 | 52 | class SeafDir(object): 53 | def __init__(self, store_id, version, obj_id, dirents): 54 | self.version = version 55 | self.store_id = store_id 56 | self.obj_id = obj_id 57 | 58 | self.dirents = dirents 59 | 60 | self._cached_files_list = None 61 | self._cached_dirs_list = None 62 | 63 | def get_files_list(self): 64 | if self._cached_files_list is None: 65 | self._cached_files_list = [ dent for dent in self.dirents.values() \ 66 | if dent.type == SeafDirent.FILE ] 67 | 68 | return self._cached_files_list 69 | 70 | def get_subdirs_list(self): 71 | if self._cached_dirs_list is None: 72 | self._cached_dirs_list = [ dent for dent in self.dirents.values() \ 73 | if dent.type == SeafDirent.DIR ] 74 | 75 | return self._cached_dirs_list 76 | 77 | def lookup_dent(self, name): 78 | return self.dirents.get(name, None) 79 | 80 | def lookup(self, name): 81 | if name not in self.dirents: 82 | return None 83 | 84 | dent = self.dirents[name] 85 | if dent.is_dir(): 86 | return fs_mgr.load_seafdir(self.store_id, self.version, dent.id) 87 | else: 88 | return fs_mgr.load_seafile(self.store_id, self.version, dent.id) 89 | 90 | def remove_entry(self, name): 91 | if name in self.dirents: 92 | del self.dirents[name] 93 | 94 | class SeafFile(object): 95 | def __init__(self, store_id, version, obj_id, blocks, size): 96 | self.version = version 97 | self.store_id = store_id 98 | self.obj_id = obj_id 99 | 100 | self.blocks = blocks 101 | self.size = size 102 | 103 | self._content = None 104 | 105 | def get_stream(self): 106 | return SeafileStream(self) 107 | 108 | def get_content(self, limit=-1): 109 | if limit <= 0: 110 | limit = self.size 111 | if limit >= self.size: 112 | if self._content is None: 113 | stream = self.get_stream() 114 | self._content = stream.read(limit) 115 | return self._content 116 | else: 117 | stream = self.get_stream() 118 | return stream.read(limit) 119 | 120 | class SeafileStream(object): 121 | '''Implements basic file-like interface''' 122 | def __init__(self, file_obj): 123 | self.file_obj = file_obj 124 | self.block = None 125 | self.block_idx = 0 126 | self.block_offset = 0 127 | 128 | def read(self, size): 129 | remain = size 130 | blocks = self.file_obj.blocks 131 | ret = b'' 132 | 133 | while True: 134 | if not self.block or self.block_offset == len(self.block): 135 | if self.block_idx == len(blocks): 136 | break 137 | self.block = block_mgr.load_block(self.file_obj.store_id, 138 | self.file_obj.version, 139 | blocks[self.block_idx]) 140 | self.block_idx += 1 141 | self.block_offset = 0 142 | 143 | if self.block_offset + remain >= len(self.block): 144 | ret += self.block[self.block_offset:] 145 | remain -= (len(self.block) - self.block_offset) 146 | self.block_offset = len(self.block) 147 | else: 148 | ret += self.block[self.block_offset:self.block_offset+remain] 149 | self.block_offset += remain 150 | remain = 0 151 | 152 | if remain == 0: 153 | break 154 | 155 | return ret 156 | 157 | def close(self): 158 | pass 159 | 160 | class SeafFSManager(object): 161 | def __init__(self): 162 | if objstore_factory.enable_storage_classes: 163 | self.obj_stores = objstore_factory.get_obj_stores('fs') 164 | else: 165 | self.obj_store = objstore_factory.get_obj_store('fs') 166 | 167 | self._dir_counter = 0 168 | self._file_counter = 0 169 | 170 | def load_seafile(self, store_id, version, file_id): 171 | self._file_counter += 1 172 | 173 | blocks = [] 174 | size = 0 175 | if file_id == ZERO_OBJ_ID: 176 | pass 177 | else: 178 | if not objstore_factory.enable_storage_classes: 179 | data = self.obj_store.read_obj(store_id, version, file_id) 180 | else: 181 | storage_id = get_repo_storage_id(store_id) 182 | if storage_id: 183 | data = self.obj_stores[storage_id].read_obj(store_id, version, file_id) 184 | else: 185 | data = self.obj_stores['__default__'].read_obj(store_id, version, file_id) 186 | 187 | if version == 0: 188 | blocks, size = self.parse_blocks_v0(data, file_id) 189 | elif version == 1: 190 | blocks, size = self.parse_blocks_v1(data, file_id) 191 | else: 192 | raise RuntimeError('invalid fs version ' + str(version)) 193 | 194 | return SeafFile(store_id, version, file_id, blocks, size) 195 | 196 | def load_seafdir(self, store_id, version, dir_id): 197 | self._dir_counter += 1 198 | 199 | dirents = {} 200 | if dir_id == ZERO_OBJ_ID: 201 | pass 202 | else: 203 | if not objstore_factory.enable_storage_classes: 204 | data = self.obj_store.read_obj(store_id, version, dir_id) 205 | else: 206 | storage_id = get_repo_storage_id(store_id) 207 | if storage_id: 208 | data = self.obj_stores[storage_id].read_obj(store_id, version, dir_id) 209 | else: 210 | data = self.obj_stores['__default__'].read_obj(store_id, version, dir_id) 211 | 212 | if version == 0: 213 | dirents = self.parse_dirents_v0(data, dir_id) 214 | elif version == 1: 215 | dirents = self.parse_dirents_v1(data, dir_id) 216 | else: 217 | raise RuntimeError('invalid fs version ' + str(version)) 218 | 219 | return SeafDir(store_id, version, dir_id, dirents) 220 | 221 | def parse_dirents_v0(self, data, dir_id): 222 | '''binary format''' 223 | mode, = struct.unpack_from("!i", data, offset = 0) 224 | if mode != SEAF_METADATA_TYPE_DIR: 225 | raise ObjectFormatError('corrupt dir object ' + dir_id) 226 | 227 | dirents = {} 228 | 229 | off = 4 230 | while True: 231 | fmt = "!i40si" 232 | mode, eid, name_len = struct.unpack_from(fmt, data, offset=off) 233 | off += struct.calcsize(fmt) 234 | 235 | fmt = "!%ds" % name_len 236 | name, = struct.unpack_from(fmt, data, offset = off) 237 | off += struct.calcsize(fmt) 238 | 239 | if stat.S_ISREG(mode): 240 | dirents[name] = SeafDirent.fromV0(name, SeafDirent.FILE, eid, '') 241 | elif stat.S_ISDIR(mode): 242 | dirents[name] = SeafDirent.fromV0(name, SeafDirent.DIR, eid, '') 243 | else: 244 | logger.warning('Error: unknown object mode %s', mode) 245 | if off > len(data) - 48: 246 | break 247 | 248 | return dirents 249 | 250 | def parse_dirents_v1(self, data, dir_id): 251 | '''json format''' 252 | d = json.loads(data) 253 | 254 | dirents = {} 255 | 256 | for entry in d['dirents']: 257 | name = entry['name'] 258 | id = entry['id'] 259 | mtime = entry['mtime'] 260 | mode = entry['mode'] 261 | if stat.S_ISREG(mode): 262 | type = SeafDirent.FILE 263 | size = entry['size'] 264 | modifier = entry['modifier'] 265 | elif stat.S_ISDIR(mode): 266 | type = SeafDirent.DIR 267 | size = 0 268 | modifier = '' 269 | else: 270 | continue 271 | dirents[name] = SeafDirent.fromV1(name, type, id, mtime, size, modifier) 272 | 273 | return dirents 274 | 275 | def parse_blocks_v0(self, data, obj_id): 276 | '''binray format''' 277 | blocks = [] 278 | 279 | fmt = '!iq' 280 | mode, size = struct.unpack_from(fmt, data, offset=0) 281 | if mode != SEAF_METADATA_TYPE_FILE: 282 | raise ObjectFormatError('corrupt file object ' + obj_id) 283 | 284 | off = struct.calcsize(fmt) 285 | while True: 286 | fmt = "!20s" 287 | bid, = struct.unpack_from(fmt, data, offset = off) 288 | hexs = [] 289 | for d in bid: 290 | x = binascii.b2a_hex(d) 291 | hexs.append(x) 292 | 293 | blk_id = ''.join(hexs) 294 | blocks.append(blk_id) 295 | 296 | off += struct.calcsize(fmt) 297 | if off > len(data) - 20: 298 | break 299 | 300 | return blocks, size 301 | 302 | def parse_blocks_v1(self, data, obj_id): 303 | ''''json format''' 304 | d = json.loads(data) 305 | 306 | blocks = [ id for id in d['block_ids'] ] 307 | size = d['size'] 308 | 309 | return blocks, size 310 | 311 | def dir_read_count(self): 312 | return self._dir_counter 313 | def file_read_count(self): 314 | return self._file_counter 315 | 316 | def get_file_id_by_path(self, store_id, version, root_id, path): 317 | path = self.format_dir_path(path); 318 | if path == '/': 319 | return None 320 | 321 | slash = path.rfind('/') 322 | if slash <= 0: 323 | dir = self.load_seafdir(store_id, version, root_id) 324 | if not dir: 325 | logger.warning('Failed to load root seafdir %s', root_id) 326 | return None 327 | 328 | else: 329 | parent_dir = path[:slash] 330 | dir = self.get_seafdir_by_path(store_id, version, root_id, parent_dir) 331 | if not dir: 332 | logger.warning('Failed to load seafdir for %s', parent_dir) 333 | return None 334 | 335 | base_name = path[slash+1:] 336 | for dent in dir.get_files_list(): 337 | if dent.name == base_name: 338 | return dent.id 339 | 340 | return None 341 | 342 | def get_seafdir_by_path(self, store_id, version, root_id, path): 343 | dir = fs_mgr.load_seafdir(store_id, version, root_id) 344 | if not dir: 345 | return None 346 | if path == '/': 347 | return dir 348 | if path.startswith('/'): 349 | path = path[1:] 350 | parts = path.split('/') 351 | while len(parts) != 0: 352 | name = parts[0] 353 | dir_id = None 354 | for dent in dir.dirents.values(): 355 | if dent.name == name and dent.is_dir(): 356 | dir_id = dent.id 357 | break 358 | if not dir_id: 359 | return None 360 | 361 | dir = fs_mgr.load_seafdir(store_id, version, dir_id) 362 | if not dir: 363 | return None 364 | parts.pop(0) 365 | 366 | return dir 367 | 368 | 369 | def format_dir_path(self, path): 370 | if not path.startswith('/'): 371 | path = '/' + path 372 | if path != '/' and path.endswith('/'): 373 | path = path.rstrip('/') 374 | return path 375 | 376 | 377 | fs_mgr = SeafFSManager() 378 | -------------------------------------------------------------------------------- /seafobj/backends/swift.py: -------------------------------------------------------------------------------- 1 | #coding: utf-8 2 | 3 | import http.client 4 | import urllib.request, urllib.error, urllib.parse 5 | import json 6 | from seafobj.backends.base import AbstractObjStore 7 | from seafobj.exceptions import GetObjectError, SwiftAuthenticateError 8 | 9 | class SwiftConf(object): 10 | def __init__(self, user_name, password, container, auth_host, auth_ver, tenant, use_https, region, domain): 11 | self.user_name = user_name 12 | self.password = password 13 | self.container = container 14 | self.auth_host = auth_host 15 | self.auth_ver = auth_ver 16 | self.tenant = tenant 17 | self.use_https = use_https 18 | self.region = region 19 | self.domain = domain 20 | 21 | class SeafSwiftClient(object): 22 | MAX_RETRY = 2 23 | 24 | def __init__(self, swift_conf): 25 | self.swift_conf = swift_conf 26 | self.token = None 27 | self.storage_url = None 28 | if swift_conf.use_https: 29 | self.base_url = 'https://%s' % swift_conf.auth_host 30 | else: 31 | self.base_url = 'http://%s' % swift_conf.auth_host 32 | 33 | def authenticated(self): 34 | if self.token is not None and self.storage_url is not None: 35 | return True 36 | return False 37 | 38 | def authenticate(self): 39 | if self.swift_conf.auth_ver == 'v1.0': 40 | self.authenticate_v1() 41 | elif self.swift_conf.auth_ver == "v2.0": 42 | self.authenticate_v2() 43 | else: 44 | self.authenticate_v3() 45 | 46 | def authenticate_v1(self): 47 | url = '%s/auth/%s' % (self.base_url, self.swift_conf.auth_ver) 48 | 49 | hdr = {'X-Storage-User': self.swift_conf.user_name, 50 | 'X-Storage-Pass': self.swift_conf.password} 51 | req = urllib.request.Request(url, None, hdr) 52 | try: 53 | resp = urllib.request.urlopen(req) 54 | except urllib.error.HTTPError as e: 55 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %d.' % 56 | (SeafSwiftClient.MAX_RETRY, e.getcode())) 57 | except urllib.error.URLError as e: 58 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %s.' % 59 | (SeafSwiftClient.MAX_RETRY, e.reason)) 60 | 61 | ret_code = resp.getcode() 62 | if ret_code == http.client.OK or ret_code == http.client.NON_AUTHORITATIVE_INFORMATION: 63 | self.storage_url = resp.headers['x-storage-url'] 64 | self.token = resp.headers['x-auth-token'] 65 | else: 66 | raise SwiftAuthenticateError('[swift] Unexpected code when authenticate: %d' % 67 | ret_code) 68 | if self.storage_url == None: 69 | raise SwiftAuthenticateError('[swift] Failed to authenticate.') 70 | 71 | def authenticate_v2(self): 72 | url = '%s/%s/tokens' % (self.base_url, self.swift_conf.auth_ver) 73 | hdr = {'Content-Type': 'application/json'} 74 | auth_data = {'auth': {'passwordCredentials': {'username': self.swift_conf.user_name, 75 | 'password': self.swift_conf.password}, 76 | 'tenantName': self.swift_conf.tenant}} 77 | 78 | req = urllib.request.Request(url, json.dumps(auth_data).encode('utf8'), hdr) 79 | try: 80 | resp = urllib.request.urlopen(req) 81 | except urllib.error.HTTPError as e: 82 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %d.' % 83 | (SeafSwiftClient.MAX_RETRY, e.getcode())) 84 | except urllib.error.URLError as e: 85 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %s.' % 86 | (SeafSwiftClient.MAX_RETRY, e.reason)) 87 | 88 | ret_code = resp.getcode() 89 | ret_data = resp.read() 90 | 91 | if ret_code == http.client.OK or ret_code == http.client.NON_AUTHORITATIVE_INFORMATION: 92 | data_json = json.loads(ret_data) 93 | self.token = data_json['access']['token']['id'] 94 | catalogs = data_json['access']['serviceCatalog'] 95 | for catalog in catalogs: 96 | if catalog['type'] == 'object-store': 97 | if self.swift_conf.region: 98 | for endpoint in catalog['endpoints']: 99 | if endpoint['region'] == self.swift_conf.region: 100 | self.storage_url = endpoint['publicURL'] 101 | return 102 | else: 103 | self.storage_url = catalog['endpoints'][0]['publicURL'] 104 | return 105 | else: 106 | raise SwiftAuthenticateError('[swift] Unexpected code when authenticate: %d' % 107 | ret_code) 108 | if self.swift_conf.region and self.storage_url == None: 109 | raise SwiftAuthenticateError('[swift] Region \'%s\' not found.' % self.swift_conf.region) 110 | 111 | def authenticate_v3(self): 112 | url = '%s/v3/auth/tokens' % self.base_url 113 | hdr = {'Content-Type': 'application/json'} 114 | 115 | if self.swift_conf.domain: 116 | domain_value = self.swift_conf.domain 117 | else: 118 | domain_value = 'default' 119 | auth_data = {'auth': {'identity': {'methods': ['password'], 120 | 'password': {'user': {'domain': {'id': domain_value}, 121 | 'name': self.swift_conf.user_name, 122 | 'password': self.swift_conf.password}}}, 123 | 'scope': {'project': {'domain': {'id': domain_value}, 124 | 'name': self.swift_conf.tenant}}}} 125 | 126 | req = urllib.request.Request(url, json.dumps(auth_data).encode('utf8'), hdr) 127 | try: 128 | resp = urllib.request.urlopen(req) 129 | except urllib.error.HTTPError as e: 130 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %d.' % 131 | (SeafSwiftClient.MAX_RETRY, e.getcode())) 132 | except urllib.error.URLError as e: 133 | raise SwiftAuthenticateError('[swift] Failed to authenticate: %s.' % 134 | (SeafSwiftClient.MAX_RETRY, e.reason)) 135 | 136 | ret_code = resp.getcode() 137 | ret_data = resp.read() 138 | 139 | if ret_code == http.client.OK or ret_code == http.client.NON_AUTHORITATIVE_INFORMATION or ret_code == http.client.CREATED: 140 | self.token = resp.headers['X-Subject-Token'] 141 | data_json = json.loads(ret_data) 142 | catalogs = data_json['token']['catalog'] 143 | for catalog in catalogs: 144 | if catalog['type'] == 'object-store': 145 | if self.swift_conf.region: 146 | for endpoint in catalog['endpoints']: 147 | if endpoint['region'] == self.swift_conf.region and endpoint['interface'] == 'public': 148 | self.storage_url = endpoint['url'] 149 | return 150 | else: 151 | for endpoint in catalog['endpoints']: 152 | if endpoint ['interface'] == 'public': 153 | self.storage_url = endpoint['url'] 154 | return 155 | else: 156 | raise SwiftAuthenticateError('[swift] Unexpected code when authenticate: %d' % 157 | ret_code) 158 | if self.swift_conf.region and self.storage_url == None: 159 | raise SwiftAuthenticateError('[swift] Region \'%s\' not found.' % self.swift_conf.region) 160 | 161 | def read_object_content(self, obj_id): 162 | i = 0 163 | while i <= SeafSwiftClient.MAX_RETRY: 164 | if not self.authenticated(): 165 | self.authenticate() 166 | 167 | url = '%s/%s/%s' % (self.storage_url, self.swift_conf.container, obj_id) 168 | hdr = {'X-Auth-Token': self.token} 169 | req = urllib.request.Request(url, headers=hdr) 170 | try: 171 | resp = urllib.request.urlopen(req) 172 | except urllib.error.HTTPError as e: 173 | err_code = e.getcode() 174 | if err_code == http.client.UNAUTHORIZED: 175 | # Reset token and storage_url 176 | self.token = None 177 | self.storage_url = None 178 | i += 1 179 | continue 180 | else: 181 | raise GetObjectError('[swift] Failed to read %s: %d' % (obj_id, err_code)) 182 | except urllib.error.URLError as e: 183 | raise GetObjectError('[swift] Failed to read %s: %s' % (obj_id, e.reason)) 184 | 185 | ret_code = resp.getcode() 186 | ret_data = resp.read() 187 | 188 | if ret_code == http.client.OK: 189 | return ret_data 190 | else: 191 | raise GetObjectError('[swift] Unexpected code when read %s: %d' % 192 | (obj_id, ret_code)) 193 | raise GetObjectError('[swift] Failed to read %s: quit after %d unauthorized retries.' % 194 | (obj_id, SeafSwiftClient.MAX_RETRY)) 195 | 196 | def list_objs(self): 197 | i = 0 198 | while i <= SeafSwiftClient.MAX_RETRY: 199 | if not self.authenticated(): 200 | self.authenticate() 201 | 202 | url = '%s/%s' % (self.storage_url, self.swift_conf.container) 203 | hdr = {'X-Auth-Token': self.token} 204 | req = urllib.request.Request(url, headers=hdr) 205 | try: 206 | resp = urllib.request.urlopen(req) 207 | except urllib.error.HTTPError as e: 208 | err_code = e.getcode() 209 | if err_code == http.client.UNAUTHORIZED: 210 | # Reset token and storage_url 211 | self.token = None 212 | self.storage_url = None 213 | i += 1 214 | continue 215 | else: 216 | raise GetObjectError('[swift] Failed to list objs %s' % err_code) 217 | except urllib.error.URLError as e: 218 | raise GetObjectError('[swift] Failed to list objs' % e.reason) 219 | 220 | ret_code = resp.getcode() 221 | ret_data = resp.read() 222 | 223 | if ret_code == http.client.OK: 224 | return ret_data 225 | else: 226 | raise GetObjectError('[swift] Unexpected code when list objs %s' % ret_code) 227 | raise GetObjectError('[swift] Failed to list objs: quit after %d unauthorized retries.' % 228 | SeafSwiftClient.MAX_RETRY) 229 | 230 | def remove_obj(self, obj_id): 231 | i = 0 232 | while i <= SeafSwiftClient.MAX_RETRY: 233 | if not self.authenticated(): 234 | self.authenticate() 235 | 236 | url = '%s/%s/%s' % (self.storage_url, self.swift_conf.container, obj_id) 237 | hdr = {'X-Auth-Token': self.token} 238 | req = urllib.request.Request(url, headers=hdr, method='DELETE') 239 | try: 240 | resp = urllib.request.urlopen(req) 241 | except urllib.error.HTTPError as e: 242 | err_code = e.getcode() 243 | if err_code == http.client.UNAUTHORIZED: 244 | # Reset token and storage_url 245 | self.token = None 246 | self.storage_url = None 247 | i += 1 248 | continue 249 | else: 250 | raise GetObjectError('[swift] Failed to remove %s: %d' % (obj_id, err_code)) 251 | except urllib.error.URLError as e: 252 | raise GetObjectError('[swift] Failed to remove %s: %s' % (obj_id, e.reason)) 253 | 254 | ret_code = resp.getcode() 255 | return ret_code 256 | raise GetObjectError('[swift] Failed to remove obj %s: quit after %d unauthorized retries.' % 257 | (obj_id, SeafSwiftClient.MAX_RETRY)) 258 | 259 | def stat_obj(self, obj_id): 260 | i = 0 261 | while i <= SeafSwiftClient.MAX_RETRY: 262 | if not self.authenticated(): 263 | self.authenticate() 264 | 265 | url = '%s/%s/%s' % (self.storage_url, self.swift_conf.container, obj_id) 266 | hdr = {'X-Auth-Token': self.token} 267 | req = urllib.request.Request(url, headers=hdr, method='HEAD') 268 | try: 269 | resp = urllib.request.urlopen(req) 270 | except urllib.error.HTTPError as e: 271 | err_code = e.getcode() 272 | if err_code == http.client.UNAUTHORIZED: 273 | # Reset token and storage_url 274 | self.token = None 275 | self.storage_url = None 276 | i += 1 277 | continue 278 | else: 279 | raise GetObjectError('[swift] Failed to remove %s: %d' % (obj_id, err_code)) 280 | except urllib.error.URLError as e: 281 | raise GetObjectError('[swift] Failed to remove %s: %s' % (obj_id, e.reason)) 282 | 283 | for k, v in resp.headers.items(): 284 | if k == 'Content-Length': 285 | return int(v) 286 | raise GetObjectError('[swift] Failed to remove obj %s: quit after %d unauthorized retries.' % 287 | (obj_id, SeafSwiftClient.MAX_RETRY)) 288 | 289 | class SeafObjStoreSwift(AbstractObjStore): 290 | '''Swift backend for seafile objecs''' 291 | def __init__(self, compressed, swift_conf, crypto=None, cache=None): 292 | AbstractObjStore.__init__(self, compressed, crypto, cache) 293 | self.swift_client = SeafSwiftClient(swift_conf) 294 | self.container = swift_conf.container 295 | 296 | def read_obj_raw(self, repo_id, version, obj_id): 297 | real_obj_id = '%s/%s' % (repo_id, obj_id) 298 | data = self.swift_client.read_object_content(real_obj_id) 299 | return data 300 | 301 | def get_name(self): 302 | return 'Swift storage backend' 303 | 304 | def list_objs(self, repo_id=None): 305 | objs = self.swift_client.list_objs().decode('utf8').split('\n') 306 | if repo_id: 307 | for obj in objs: 308 | tokens = obj.split('/') 309 | if tokens[0] == repo_id and len(tokens) == 2: 310 | yield [tokens[0], tokens[1], 0] 311 | else: 312 | for obj in objs: 313 | tokens = obj.split('/') 314 | if len(tokens) == 2: 315 | yield [tokens[0], tokens[1], 0] 316 | 317 | def remove_obj(self, repo_id, obj_id): 318 | key = '%s/%s' % (repo_id, obj_id) 319 | 320 | self.swift_client.remove_obj(key) 321 | 322 | def stat_raw(self, repo_id, obj_id): 323 | key = '%s/%s' % (repo_id, obj_id) 324 | 325 | return self.swift_client.stat_obj(key) 326 | 327 | def get_container_name(self): 328 | return self.container 329 | -------------------------------------------------------------------------------- /seafobj/objstore_factory.py: -------------------------------------------------------------------------------- 1 | import os 2 | import configparser 3 | import binascii 4 | import logging 5 | import json 6 | 7 | from sqlalchemy import select 8 | 9 | from seafobj.exceptions import InvalidConfigError 10 | from seafobj.backends.filesystem import SeafObjStoreFS 11 | from seafobj.mc import get_mc_cache 12 | from seafobj.redis_cache import get_redis_cache 13 | 14 | def get_ceph_conf(cfg, section): 15 | config_file = cfg.get(section, 'ceph_config') 16 | pool_name = cfg.get(section, 'pool') 17 | ceph_client_id = '' 18 | if cfg.has_option(section, 'ceph_client_id'): 19 | ceph_client_id = cfg.get(section, 'ceph_client_id') 20 | 21 | from seafobj.backends.ceph import CephConf 22 | 23 | return CephConf(config_file, pool_name, ceph_client_id) 24 | 25 | def get_ceph_conf_from_json(cfg): 26 | config_file = cfg['ceph_config'] 27 | pool_name = cfg['pool'] 28 | ceph_client_id = '' 29 | 30 | if 'ceph_client_id' in cfg: 31 | ceph_client_id = cfg['ceph_client_id'] 32 | 33 | from seafobj.backends.ceph import CephConf 34 | conf = CephConf(config_file, pool_name, ceph_client_id) 35 | 36 | return conf 37 | 38 | def get_s3_conf(cfg, section): 39 | key_id = cfg.get(section, 'key_id') 40 | key = cfg.get(section, 'key') 41 | bucket = cfg.get(section, 'bucket') 42 | 43 | host = None 44 | port = None 45 | if cfg.has_option(section, 'host'): 46 | addr = cfg.get(section, 'host') 47 | 48 | segs = addr.split(':') 49 | host = segs[0] 50 | 51 | try: 52 | port = int(segs[1]) 53 | except IndexError: 54 | pass 55 | 56 | use_v4_sig = False 57 | if cfg.has_option(section, 'use_v4_signature'): 58 | use_v4_sig = cfg.getboolean(section, 'use_v4_signature') 59 | 60 | aws_region = None 61 | if cfg.has_option(section, 'aws_region'): 62 | aws_region = cfg.get(section, 'aws_region') 63 | 64 | use_https = False 65 | if cfg.has_option(section, 'use_https'): 66 | use_https = cfg.getboolean(section, 'use_https') 67 | 68 | path_style_request = False 69 | if cfg.has_option(section, 'path_style_request'): 70 | path_style_request = cfg.getboolean(section, 'path_style_request') 71 | 72 | sse_c_key = None 73 | if cfg.has_option(section, 'sse_c_key'): 74 | sse_c_key = cfg.get(section, 'sse_c_key') 75 | 76 | envs = os.environ 77 | use_iam_role = False 78 | if envs.get("S3_USE_IAM_ROLE") == "true": 79 | use_iam_role = True 80 | 81 | from objwrapper.s3 import S3Conf 82 | conf = S3Conf(key_id, key, bucket, host, port, use_v4_sig, aws_region, use_https, path_style_request, sse_c_key, use_iam_role) 83 | 84 | return conf 85 | 86 | def get_s3_conf_from_env(obj_type): 87 | envs = os.environ 88 | 89 | key_id = envs.get("S3_KEY_ID") 90 | key = envs.get("S3_SECRET_KEY") 91 | 92 | bucket = None 93 | if obj_type == "fs": 94 | bucket = envs.get("S3_FS_BUCKET") 95 | elif obj_type == "commits": 96 | bucket = envs.get("S3_COMMIT_BUCKET") 97 | else: 98 | bucket = envs.get("S3_BLOCK_BUCKET") 99 | 100 | 101 | addr = envs.get("S3_HOST") 102 | host = None 103 | port = None 104 | if addr: 105 | segs = addr.split(':') 106 | host = segs[0] 107 | 108 | try: 109 | port = int(segs[1]) 110 | except IndexError: 111 | pass 112 | 113 | use_v4_sig = False 114 | if envs.get("S3_USE_V4_SIGNATURE") == "true": 115 | use_v4_sig = True 116 | 117 | aws_region = envs.get("S3_AWS_REGION") 118 | 119 | use_https = False 120 | if envs.get("S3_USE_HTTPS") == "true": 121 | use_https = True 122 | 123 | path_style_request = False 124 | if envs.get("S3_PATH_STYLE_REQUEST") == "true": 125 | path_style_request = True 126 | 127 | sse_c_key = None 128 | if envs.get("S3_SSE_C_KEY"): 129 | sse_c_key = envs.get("S3_SSE_C_KEY") 130 | 131 | use_iam_role = False 132 | if envs.get("S3_USE_IAM_ROLE") == "true": 133 | use_iam_role = True 134 | 135 | 136 | from objwrapper.s3 import S3Conf 137 | conf = S3Conf(key_id, key, bucket, host, port, use_v4_sig, aws_region, use_https, path_style_request, sse_c_key, use_iam_role) 138 | 139 | return conf 140 | 141 | def get_s3_conf_from_json(cfg): 142 | key_id = cfg['key_id'] 143 | key = cfg['key'] 144 | bucket = cfg['bucket'] 145 | 146 | host = None 147 | port = None 148 | 149 | if 'host' in cfg: 150 | addr = cfg['host'] 151 | 152 | segs = addr.split(':') 153 | host = segs[0] 154 | 155 | try: 156 | port = int(segs[1]) 157 | except IndexError: 158 | pass 159 | use_v4_sig = False 160 | if 'use_v4_signature' in cfg: 161 | use_v4_sig = cfg['use_v4_signature'] 162 | 163 | aws_region = None 164 | if 'aws_region' in cfg: 165 | aws_region = cfg['aws_region'] 166 | 167 | use_https = False 168 | if 'use_https' in cfg: 169 | if str(cfg['use_https']).lower().strip() == 'true': 170 | use_https = True 171 | 172 | path_style_request = False 173 | if 'path_style_request' in cfg: 174 | path_style_request = cfg['path_style_request'] 175 | 176 | sse_c_key = None 177 | if 'sse_c_key' in cfg: 178 | sse_c_key = cfg['sse_c_key'] 179 | 180 | from objwrapper.s3 import S3Conf 181 | conf = S3Conf(key_id, key, bucket, host, port, use_v4_sig, aws_region, use_https, path_style_request, sse_c_key, False) 182 | 183 | return conf 184 | 185 | def get_oss_conf(cfg, section): 186 | key_id = cfg.get(section, 'key_id') 187 | key = cfg.get(section, 'key') 188 | bucket = cfg.get(section, 'bucket') 189 | endpoint = '' 190 | if cfg.has_option(section, 'endpoint'): 191 | endpoint = cfg.get(section, 'endpoint') 192 | region = '' 193 | if cfg.has_option(section, 'region'): 194 | region = cfg.get(section, 'region') 195 | 196 | use_https = False 197 | if cfg.has_option(section, 'use_https'): 198 | use_https = cfg.getboolean(section, 'use_https') 199 | 200 | from objwrapper.alioss import OSSConf 201 | conf = OSSConf(key_id, key, bucket, endpoint, region, use_https) 202 | 203 | return conf 204 | 205 | def get_oss_conf_from_json(cfg): 206 | key_id = cfg['key_id'] 207 | key = cfg['key'] 208 | bucket = cfg['bucket'] 209 | 210 | endpoint = '' 211 | 212 | if 'endpoint' in cfg: 213 | endpoint = cfg['endpoint'] 214 | region = '' 215 | if 'region' in cfg: 216 | region = cfg['region'] 217 | 218 | use_https = False 219 | if 'use_https' in cfg: 220 | if str(cfg['use_https']).lower().strip() == 'true': 221 | use_https = True 222 | 223 | from objwrapper.alioss import OSSConf 224 | conf = OSSConf(key_id, key, bucket, endpoint, region, use_https) 225 | 226 | return conf 227 | 228 | def get_swift_conf(cfg, section): 229 | user_name = cfg.get(section, 'user_name') 230 | password = cfg.get(section, 'password') 231 | container = cfg.get(section, 'container') 232 | auth_host = cfg.get(section, 'auth_host') 233 | if not cfg.has_option(section, 'auth_ver'): 234 | auth_ver = 'v2.0' 235 | else: 236 | auth_ver = cfg.get(section, 'auth_ver') 237 | if auth_ver != 'v1.0': 238 | tenant = cfg.get(section, 'tenant') 239 | else: 240 | tenant = None 241 | if cfg.has_option(section, 'use_https'): 242 | use_https = cfg.getboolean(section, 'use_https') 243 | else: 244 | use_https = False 245 | if cfg.has_option(section, 'region'): 246 | region = cfg.get(section, 'region') 247 | else: 248 | region = None 249 | if cfg.has_option(section, 'domain'): 250 | domain = cfg.get(section, 'domain') 251 | else: 252 | domain = 'default' 253 | 254 | from seafobj.backends.swift import SwiftConf 255 | conf = SwiftConf(user_name, password, container, auth_host, auth_ver, tenant, use_https, region, domain) 256 | return conf 257 | 258 | def get_swift_conf_from_json (cfg): 259 | user_name = cfg['user_name'] 260 | password = cfg['password'] 261 | container = cfg['container'] 262 | auth_host = cfg['auth_host'] 263 | if 'auth_ver' not in cfg: 264 | auth_ver = 'v2.0' 265 | else: 266 | auth_ver = cfg['auth_ver'] 267 | if auth_ver != 'v1.0': 268 | tenant = cfg['tenant'] 269 | else: 270 | tenant = None 271 | if 'use_https' in cfg and cfg['use_https']: 272 | use_https = True 273 | else: 274 | use_https = False 275 | if 'region' in cfg: 276 | region = cfg['region'] 277 | else: 278 | region = None 279 | if 'domain' in cfg: 280 | domain = cfg['domain'] 281 | else: 282 | domain = 'default' 283 | 284 | from seafobj.backends.swift import SwiftConf 285 | conf = SwiftConf(user_name, password, container, auth_host, auth_ver, tenant, use_https, region, domain) 286 | return conf 287 | 288 | class SeafileConfig(object): 289 | def __init__(self): 290 | self.cfg = None 291 | self.seafile_conf_dir = os.environ['SEAFILE_CONF_DIR'] 292 | self.central_config_dir = os.environ.get('SEAFILE_CENTRAL_CONF_DIR', 293 | None) 294 | confdir = self.central_config_dir or self.seafile_conf_dir 295 | self.seafile_conf = os.path.join(confdir, 'seafile.conf') 296 | 297 | def get_config_parser(self): 298 | if self.cfg is None: 299 | self.cfg = configparser.ConfigParser() 300 | try: 301 | self.cfg.read(self.seafile_conf) 302 | except Exception as e: 303 | raise InvalidConfigError(str(e)) 304 | return self.cfg 305 | 306 | def get_seaf_crypto(self): 307 | if not self.cfg.has_option('store_crypt', 'key_path'): 308 | return None 309 | key_path = self.cfg.get('store_crypt', 'key_path') 310 | if not os.path.exists(key_path): 311 | raise InvalidConfigError('key file %s doesn\'t exist' % key_path) 312 | 313 | key_config = configparser.ConfigParser() 314 | key_config.read(key_path) 315 | if not key_config.has_option('store_crypt', 'enc_key') or not \ 316 | key_config.has_option('store_crypt', 'enc_iv'): 317 | raise InvalidConfigError('Invalid key file %s: incomplete info' % key_path) 318 | 319 | hex_key = key_config.get('store_crypt', 'enc_key') 320 | hex_iv = key_config.get('store_crypt', 'enc_iv') 321 | raw_key = binascii.a2b_hex(hex_key) 322 | raw_iv = binascii.a2b_hex(hex_iv) 323 | 324 | from seafobj.utils.crypto import SeafCrypto 325 | return SeafCrypto(raw_key, raw_iv) 326 | 327 | def get_seafile_storage_dir(self): 328 | if self.seafile_conf_dir and os.path.exists(self.seafile_conf_dir): 329 | return os.path.join(self.seafile_conf_dir, 'storage') 330 | raise RuntimeError('environment SEAFILE_CONF_DIR not set correctly.'); 331 | 332 | def get_seaf_cache(self): 333 | envs = os.environ 334 | cache_provider = envs.get("CACHE_PROVIDER") 335 | if cache_provider == "redis" or cache_provider == "memcached": 336 | return self.load_cache_from_env (envs) 337 | 338 | if self.cfg.has_option('redis', 'redis_host'): 339 | host = self.cfg.get('redis', 'redis_host') 340 | if self.cfg.has_option('redis', 'redis_port'): 341 | port = self.cfg.get('redis', 'redis_port') 342 | else: 343 | port = 6379 344 | if self.cfg.has_option('redis', 'redis_expiry'): 345 | expiry = self.cfg.get('redis', 'redis_expiry') 346 | else: 347 | expiry = 24 * 3600 348 | if self.cfg.has_option('redis', 'max_connections'): 349 | max_connections = self.cfg.get('redis', 'max_connections') 350 | else: 351 | max_connections = 20 352 | if self.cfg.has_option('redis', 'redis_password'): 353 | passwd = self.cfg.get('redis', 'redis_password') 354 | else: 355 | passwd = None 356 | return get_redis_cache(host, port, expiry, int(max_connections), passwd) 357 | 358 | if self.cfg.has_option('memcached', 'memcached_options'): 359 | mc_options = self.cfg.get('memcached', 'memcached_options') 360 | if self.cfg.has_option('memcached', 'memcached_expiry'): 361 | expiry = self.cfg.get('memcached', 'memcached_expiry') 362 | else: 363 | expiry = 24 * 3600 364 | 365 | return get_mc_cache(mc_options, int(expiry)) 366 | return None 367 | 368 | def load_cache_from_env(self, envs): 369 | cache_provider = envs.get("CACHE_PROVIDER") 370 | if cache_provider == "redis": 371 | host = envs.get("REDIS_HOST") 372 | if not host: 373 | return None 374 | if envs.get("REDIS_PORT"): 375 | port = int(envs.get("REDIS_PORT")) 376 | else: 377 | port = 6379 378 | if envs.get("REDIS_MAX_CONNECTIONS"): 379 | max_connections = int(envs.get("REDIS_MAX_CONNECTIONS")) 380 | else: 381 | max_connections = 100 382 | if envs.get("REDIS_EXPIRY"): 383 | expiry = int(envs.get("REDIS_EXPIRY")) 384 | else: 385 | expiry = 24 * 3600 386 | passwd = envs.get("REDIS_PASSWORD") 387 | return get_redis_cache(host, port, expiry, max_connections, passwd) 388 | elif cache_provider == "memcached": 389 | host = envs.get("MEMCACHED_HOST") 390 | if not host: 391 | return None 392 | if envs.get("MEMCACHED_PORT"): 393 | port = int(envs.get("MEMCACHED_PORT")) 394 | else: 395 | port = 11211 396 | mc_options = f"--SERVER={host}:{port} --POOL-MIN=10 --POOL-MAX=100" 397 | if envs.get("MEMCACHED_EXPIRY"): 398 | expiry = int(envs.get("MEMCACHED_EXPIRY")) 399 | else: 400 | expiry = 24 * 3600 401 | return get_mc_cache(mc_options, expiry) 402 | 403 | 404 | # You must ensure that the SeafObjStoreFactory is created in the main thread. 405 | # If you're using a high-level wrapper like SeafCommitManager or SeafFSManager, it will automatically be created in the main thread. 406 | 407 | class SeafObjStoreFactory(object): 408 | obj_section_map = { 409 | 'blocks': 'block_backend', 410 | 'fs': 'fs_object_backend', 411 | 'commits': 'commit_object_backend', 412 | } 413 | def __init__(self, cfg=None): 414 | self.seafile_cfg = cfg or SeafileConfig() 415 | self.json_cfg = None 416 | self.enable_storage_classes = False 417 | self.obj_stores = {'commits': {}, 'fs': {}, 'blocks': {}} 418 | 419 | env_storage_type = os.environ.get("SEAF_SERVER_STORAGE_TYPE") 420 | cfg = self.seafile_cfg.get_config_parser() 421 | if env_storage_type == 'multiple' or (not env_storage_type and cfg.has_option ('storage', 'enable_storage_classes')): 422 | enable_storage_classes = cfg.get('storage', 'enable_storage_classes') 423 | if env_storage_type == 'multiple': 424 | enable_storage_classes = 'true' 425 | if enable_storage_classes.lower() == 'true': 426 | from seafobj.db import init_db_session_class 427 | self.enable_storage_classes = True 428 | self.session = init_db_session_class(cfg) 429 | try: 430 | json_file = cfg.get('storage', 'storage_classes_file') 431 | f = open(json_file) 432 | self.json_cfg = json.load(f) 433 | except Exception: 434 | logging.warning('Failed to load json file') 435 | raise 436 | 437 | def get_obj_stores(self, obj_type): 438 | try: 439 | if self.obj_stores[obj_type]: 440 | return self.obj_stores[obj_type] 441 | except KeyError: 442 | raise RuntimeError('unknown obj_type ' + obj_type) 443 | 444 | for bend in self.json_cfg: 445 | storage_id = bend['storage_id'] 446 | 447 | crypto = self.seafile_cfg.get_seaf_crypto() 448 | compressed = obj_type == 'fs' 449 | cache = None 450 | if obj_type != 'blocks': 451 | cache = self.seafile_cfg.get_seaf_cache() 452 | 453 | if bend[obj_type]['backend'] == 'fs': 454 | obj_dir = os.path.join(bend[obj_type]['dir'], 'storage', obj_type) 455 | self.obj_stores[obj_type][storage_id] = SeafObjStoreFS(compressed, obj_dir, crypto) 456 | elif bend[obj_type]['backend'] == 'swift': 457 | from seafobj.backends.swift import SeafObjStoreSwift 458 | swift_conf = get_swift_conf_from_json(bend[obj_type]) 459 | self.obj_stores[obj_type][storage_id] = SeafObjStoreSwift(compressed, swift_conf, crypto, cache) 460 | elif bend[obj_type]['backend'] == 's3': 461 | from seafobj.backends.s3 import SeafObjStoreS3 462 | s3_conf = get_s3_conf_from_json(bend[obj_type]) 463 | self.obj_stores[obj_type][storage_id] = SeafObjStoreS3(compressed, s3_conf, crypto, cache) 464 | elif bend[obj_type]['backend'] == 'ceph': 465 | from seafobj.backends.ceph import SeafObjStoreCeph 466 | ceph_conf = get_ceph_conf_from_json(bend[obj_type]) 467 | self.obj_stores[obj_type][storage_id] = SeafObjStoreCeph(compressed, ceph_conf, crypto, cache) 468 | elif bend[obj_type]['backend'] == 'oss': 469 | from seafobj.backends.alioss import SeafObjStoreOSS 470 | oss_conf = get_oss_conf_from_json(bend[obj_type]) 471 | self.obj_stores[obj_type][storage_id] = SeafObjStoreOSS(compressed, oss_conf, crypto, cache) 472 | else: 473 | raise InvalidConfigError('Unknown backend type: %s.' % bend[obj_type]['backend']) 474 | 475 | if 'is_default' in bend and bend['is_default']==True: 476 | if '__default__' in self.obj_stores[obj_type]: 477 | raise InvalidConfigError('Only one default backend can be set.') 478 | self.obj_stores[obj_type]['__default__'] = self.obj_stores[obj_type][storage_id] 479 | 480 | return self.obj_stores[obj_type] 481 | 482 | def get_obj_store(self, obj_type): 483 | '''Return an implementation of SeafileObjStore''' 484 | cfg = self.seafile_cfg.get_config_parser() 485 | try: 486 | section = self.obj_section_map[obj_type] 487 | except KeyError: 488 | raise RuntimeError('unknown obj_type ' + obj_type) 489 | 490 | crypto = self.seafile_cfg.get_seaf_crypto() 491 | cache = None 492 | if obj_type != 'blocks': 493 | cache = self.seafile_cfg.get_seaf_cache() 494 | 495 | if cfg.has_option(section, 'name'): 496 | backend_name = cfg.get(section, 'name') 497 | else: 498 | backend_name = 'fs' 499 | 500 | dir_path = None 501 | if cfg.has_option(section, 'dir'): 502 | dir_path = cfg.get(section, 'dir') 503 | 504 | compressed = obj_type == 'fs' 505 | 506 | # Get s3 storage backend config from env. 507 | env_storage_type = os.environ.get("SEAF_SERVER_STORAGE_TYPE") 508 | if env_storage_type == 's3': 509 | from seafobj.backends.s3 import SeafObjStoreS3 510 | s3_conf = get_s3_conf_from_env(obj_type) 511 | return SeafObjStoreS3(compressed, s3_conf, crypto, cache) 512 | elif env_storage_type == 'disk': 513 | backend_name = 'fs' 514 | 515 | if backend_name == 'fs': 516 | if dir_path is None: 517 | obj_dir = os.path.join(self.seafile_cfg.get_seafile_storage_dir(), obj_type) 518 | else: 519 | obj_dir = os.path.join(dir_path, 'storage', obj_type) 520 | if not os.path.exists(obj_dir): 521 | os.makedirs(obj_dir) 522 | return SeafObjStoreFS(compressed, obj_dir, crypto) 523 | 524 | elif backend_name == 's3': 525 | # We import s3 backend here to avoid dependency on boto3 for users 526 | # not using s3 527 | from seafobj.backends.s3 import SeafObjStoreS3 528 | s3_conf = get_s3_conf(cfg, section) 529 | return SeafObjStoreS3(compressed, s3_conf, crypto, cache) 530 | 531 | elif backend_name == 'ceph': 532 | # We import ceph backend here to avoid depenedency on rados for 533 | # users not using rados 534 | from seafobj.backends.ceph import SeafObjStoreCeph 535 | ceph_conf = get_ceph_conf(cfg, section) 536 | return SeafObjStoreCeph(compressed, ceph_conf, crypto, cache) 537 | 538 | elif backend_name == 'oss': 539 | from seafobj.backends.alioss import SeafObjStoreOSS 540 | oss_conf = get_oss_conf(cfg, section) 541 | return SeafObjStoreOSS(compressed, oss_conf, crypto, cache) 542 | 543 | elif backend_name == 'swift': 544 | from seafobj.backends.swift import SeafObjStoreSwift 545 | swift_conf = get_swift_conf(cfg, section) 546 | return SeafObjStoreSwift(compressed, swift_conf, crypto, cache) 547 | 548 | else: 549 | raise InvalidConfigError('unknown %s backend "%s"' % (obj_type, backend_name)) 550 | 551 | objstore_factory = SeafObjStoreFactory() 552 | repo_storage_id = {} 553 | 554 | def get_repo_storage_id(repo_id): 555 | if repo_id in repo_storage_id: 556 | return repo_storage_id[repo_id] 557 | else: 558 | from .db import Base 559 | from sqlalchemy.orm.scoping import scoped_session 560 | RepoStorageId = Base.classes.RepoStorageId 561 | storage_id = None 562 | session = scoped_session(objstore_factory.session) 563 | r = session.scalars(select(RepoStorageId).where(RepoStorageId.repo_id == repo_id).limit(1)).first() 564 | storage_id = r.storage_id if r else None 565 | repo_storage_id[repo_id] = storage_id 566 | session.remove() 567 | return storage_id 568 | --------------------------------------------------------------------------------