├── test ├── python ├── smashbox │ ├── __init__.py │ ├── configgen │ │ ├── __init__.py │ │ ├── processors_hooks.py │ │ └── generator.py │ ├── protocol │ ├── compatibility │ │ └── __init__.py │ ├── owncloudorg │ │ ├── __init__.py │ │ ├── remote_sharing.py │ │ └── locking.py │ ├── no_engine.py │ ├── setup.py │ ├── utilities │ │ ├── reflection.py │ │ ├── structures.py │ │ ├── monitoring.py │ │ └── version.py │ ├── reporter.py │ └── script.py └── argparse_test.py ├── corruption_test ├── kill-smash ├── show-smash ├── README ├── reset_logs-smash ├── stat-smash ├── run_nplusone_loop ├── run_all ├── run_storm_loop └── md5blocks ├── requirements.txt ├── travis ├── check-syntax.py └── check-syntax.sh ├── protocol ├── __init__.py ├── test_protocol_simple_upload.py ├── test_protocol_chunked_upload.py ├── test_protocol_chunked_upload_android_bug_900.py ├── checksum.md ├── test_protocol_doc.py └── test_protocol_file_checksum.py ├── .travis.yml ├── client ├── owncloudcmd.rh6 ├── owncloudcmd.osx └── compile-owncloud-sync-client ├── lib ├── oc-tests │ ├── test_scrapeLogFile.py │ ├── test_reshareDir.py │ └── test_uploadFiles.py ├── examples │ ├── test_hello2.py │ ├── test_hello3.py │ └── test_hello.py ├── README ├── test_dirDelete.py ├── test_nplustwo.py ├── test_fileTinkerDownload.py ├── test_userload.py ├── owncloud │ ├── test_moveFileStatusCode.py │ ├── test_backupRestored.py │ ├── test_moveFilesTwice.py │ ├── test_chunking.py │ ├── test_dirDepth.py │ ├── test_dirBecomesFile.py │ └── test_locking.py ├── test_storm.py ├── test_dirMove.py ├── test_slowwrite.py ├── test_fileMove.py ├── test_localDirRenameRecreate.py ├── test_nplusone.py ├── test_concurrentDirMove.py ├── test_fileDownloadAbort.py ├── test_unicodejam.py ├── test_concurrentOverwriteFile.py ├── test_filenames.py ├── test_pingpong.py ├── test_concurrentDirRemove.py └── test_deltamove.py ├── .gitignore ├── server-tools ├── delete_user.php ├── check_user.php └── create_user.php ├── etc ├── makeconfig ├── smashbox.conf.template └── smashbox.conf.template-owncloud ├── bin ├── run_all_integration.sh └── config_gen └── .gitlab-ci.yml /test: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/smashbox/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/smashbox/configgen/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/smashbox/protocol: -------------------------------------------------------------------------------- 1 | ../../protocol -------------------------------------------------------------------------------- /python/smashbox/compatibility/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /python/smashbox/owncloudorg/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /corruption_test/kill-smash: -------------------------------------------------------------------------------- 1 | kill `ps aux | grep smash | awk '{print $2}'` 2 | -------------------------------------------------------------------------------- /corruption_test/show-smash: -------------------------------------------------------------------------------- 1 | ps aux | grep smas[h] | grep -v show-smash | wc -l 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e git+https://github.com/owncloud/pyocclient.git@master#egg=pyocclient 2 | -------------------------------------------------------------------------------- /corruption_test/README: -------------------------------------------------------------------------------- 1 | Copy ../etc/smashbox.conf into this directory and adjust accordingly. 2 | -------------------------------------------------------------------------------- /corruption_test/reset_logs-smash: -------------------------------------------------------------------------------- 1 | mkdir -p archive 2 | install --backup=numbered test_storm.log archive 3 | echo > test_storm.log -------------------------------------------------------------------------------- /travis/check-syntax.py: -------------------------------------------------------------------------------- 1 | import py_compile 2 | import sys 3 | 4 | sys.stderr = sys.stdout 5 | py_compile.compile(sys.argv[1]) 6 | -------------------------------------------------------------------------------- /corruption_test/stat-smash: -------------------------------------------------------------------------------- 1 | echo iter: `grep runid test_storm.log | wc -l` errors: `grep ERROR test_storm.log | grep Missing | wc -l` 2 | -------------------------------------------------------------------------------- /python/smashbox/no_engine.py: -------------------------------------------------------------------------------- 1 | 2 | def add_worker(f,name=None): 3 | return f 4 | 5 | import logging 6 | logger = logging.getLogger() 7 | 8 | config={} 9 | -------------------------------------------------------------------------------- /protocol/__init__.py: -------------------------------------------------------------------------------- 1 | # for convenience and clarity we keep all the protocol-related 2 | # functions together with the documentation in this directory 3 | 4 | 5 | from protocol import * 6 | 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "2.7" 5 | 6 | branches: 7 | only: 8 | - master 9 | - /^stable\d+(\.\d+)?$/ 10 | 11 | matrix: 12 | fast_finish: true 13 | 14 | script: 15 | 16 | # Run smashbox tests 17 | - ./travis/check-syntax.sh 18 | -------------------------------------------------------------------------------- /python/smashbox/setup.py: -------------------------------------------------------------------------------- 1 | def standardSetup(script_path): 2 | """A setup hook called when the internal environment setup is 3 | performed (typically by the boilterplate logic of the 4 | executables). The `script_path` of the calling script may be used 5 | as a hint to further customize the setup. 6 | """ 7 | pass 8 | -------------------------------------------------------------------------------- /client/owncloudcmd.rh6: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Wrapper to start the correct owncloud binary with proper preinitializations. 4 | # This should be made available by owncloud in their RPM! 5 | 6 | source /opt/qt-4.8/bin/qt-4.8-env.sh 7 | source /opt/neon-0.30.0/oc-neon-env.sh 8 | 9 | exec /opt/qt-4.8/bin/owncloudcmd $* 10 | 11 | #end 12 | -------------------------------------------------------------------------------- /travis/check-syntax.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exitCode=0 4 | for FILE in $(find ../ -name "*.py" -type f -not -path "*/.git/*") 5 | do 6 | errors=$(python travis/check-syntax.py $FILE) 7 | if [ "$errors" != "" ] 8 | then 9 | echo -n "${errors}" 10 | exitCode=1 11 | fi 12 | done 13 | 14 | echo "" 15 | 16 | exit $exitCode 17 | -------------------------------------------------------------------------------- /python/smashbox/configgen/processors_hooks.py: -------------------------------------------------------------------------------- 1 | class LoggingHook(object): 2 | ''' 3 | Simple logging hook for the processors to log what's happening 4 | ''' 5 | def __init__(self, logger, level): 6 | self.logger = logger 7 | self.level = level 8 | 9 | def notify_me(self, processor_name, event_type, message): 10 | self.logger.log(self.level, '%s - %s : %s' % (processor_name, event_type, message)) 11 | -------------------------------------------------------------------------------- /lib/oc-tests/test_scrapeLogFile.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | Test copying over the server log file and searching it for some better known nasty errors 5 | 6 | """ 7 | 8 | from smashbox.utilities import * 9 | import glob 10 | 11 | @add_worker 12 | def scrapeServerLog(step): 13 | 14 | step (1, 'create directory for server log') 15 | d = make_workdir() 16 | 17 | step (2, 'scrape log file for errors') 18 | scrape_log_file(d) 19 | 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | /etc/smashbox.conf 3 | /src/ 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Packages 9 | *.egg 10 | *.egg-info 11 | dist 12 | build 13 | eggs 14 | parts 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib64 20 | __pycache__ 21 | 22 | # Installer logs 23 | pip-log.txt 24 | 25 | # Unit test / coverage reports 26 | .coverage 27 | .tox 28 | nosetests.xml 29 | 30 | # Translations 31 | *.mo 32 | 33 | # Mr Developer 34 | .mr.developer.cfg 35 | .project 36 | .pydevproject 37 | -------------------------------------------------------------------------------- /lib/examples/test_hello2.py: -------------------------------------------------------------------------------- 1 | 2 | # this example shows the logic of handling fatal errors 3 | 4 | # import utilities which are the building block of each testcase 5 | from smashbox.utilities import * 6 | from smashbox.utilities import reflection 7 | 8 | @add_worker 9 | def helloA(step): 10 | step(1) 11 | 12 | fatal_check(False,'this is a FATAL error') 13 | 14 | 15 | @add_worker 16 | def helloB(step): 17 | step(1) 18 | 19 | fatal_check(False,'this is a FATAL error') 20 | 21 | 22 | @add_worker 23 | def helloC(step): 24 | step(2) 25 | 26 | logger.error('Executing post fatal handler') 27 | -------------------------------------------------------------------------------- /python/argparse_test.py: -------------------------------------------------------------------------------- 1 | #import sys 2 | #sys.path.append('/opt/rh/python27/root/usr/lib64/python2.7/') 3 | import smashbox.compatibility.argparse as argparse 4 | 5 | parser = argparse.ArgumentParser(description='Process some integers.') 6 | parser.add_argument('integers', metavar='N', type=int, nargs='+', 7 | help='an integer for the accumulator') 8 | parser.add_argument('--sum', dest='accumulate', action='store_const', 9 | const=sum, default=max, 10 | help='sum the integers (default: find the max)') 11 | 12 | args = parser.parse_args() 13 | print args.accumulate(args.integers) 14 | -------------------------------------------------------------------------------- /server-tools/delete_user.php: -------------------------------------------------------------------------------- 1 | 28 | -------------------------------------------------------------------------------- /lib/examples/test_hello3.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = "This example shows the testcases." 3 | 4 | # import utilities which are the building block of each testcase 5 | from smashbox.utilities import * 6 | 7 | A = int(config.get('hello3_A',0)) 8 | B = int(config.get('hello3_B',0)) 9 | 10 | testsets = [{"hello3_A":1,"hello3_B":2},{"hello3_A":111,"hello3_B":222}] 11 | 12 | logger.info("Loading tescase module...") 13 | 14 | @add_worker 15 | def helloA(step): 16 | step(1) 17 | 18 | logger.info("My A=%d",A) 19 | list_files('.') 20 | 21 | @add_worker 22 | def helloB(step): 23 | step(2) 24 | 25 | logger.info("My B=%d",B) 26 | 27 | 28 | 29 | @add_worker 30 | def helloC(step): 31 | step(3) 32 | 33 | logger.info("My A+B=%d",A+B) 34 | -------------------------------------------------------------------------------- /lib/README: -------------------------------------------------------------------------------- 1 | 2 | Tests to add 3 | ============ 4 | 5 | - Metadata propagation of files (atime, ctime, mtime). 6 | - Long paths (>255 characters nested directory tree) 7 | - Complex file write/update patterns (in place, MS Office-like writes, ...) 8 | - Raname an existing synced file into a directory with the same name... 9 | - distaster recovery: roll back database to previous version (old etag may reapper for a file modified on the server => what will happen to a client which has not yet synced and has the old etag locally?) 10 | - FILEID client mapping: A and B fuly synced, go offline, server-side rename A/a.txt to B/b.txt, go online, mv b.txt to a.txt or elsewhere. Is the move propagated (file id cached or refreshed on the client) 11 | - EOS: what happens to versions under such rename... 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /server-tools/check_user.php: -------------------------------------------------------------------------------- 1 | 35 | -------------------------------------------------------------------------------- /protocol/test_protocol_simple_upload.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import * 3 | from smashbox.protocol import file_upload, file_download 4 | 5 | @add_worker 6 | def main(step): 7 | 8 | d = make_workdir() 9 | reset_owncloud_account() 10 | 11 | URL = oc_webdav_url() 12 | 13 | filename=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.3)) 14 | 15 | r=file_upload(filename,URL) 16 | file_download(os.path.basename(filename),URL,d) 17 | analyse_hashfiles(d) 18 | 19 | # upload again matching the existing etag 20 | r=file_upload(filename,URL,header_if_match=r.headers['ETag']) 21 | analyse_hashfiles(d) 22 | 23 | # upload again with a non-matching etag 24 | r = file_upload(filename,URL,header_if_match='!@# does not exist 123') 25 | fatal_check(r.rc == 412) # precondition failed 26 | -------------------------------------------------------------------------------- /protocol/test_protocol_chunked_upload.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import * 3 | from smashbox.protocol import chunk_file_upload, file_upload, file_download 4 | 5 | @add_worker 6 | def main(step): 7 | 8 | d = make_workdir() 9 | reset_owncloud_account() 10 | 11 | URL = oc_webdav_url() 12 | 13 | filename=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3)) 14 | 15 | r=chunk_file_upload(filename,URL) 16 | file_download(os.path.basename(filename),URL,d) 17 | analyse_hashfiles(d) 18 | 19 | # upload again matching the existing etag 20 | r=chunk_file_upload(filename,URL,header_if_match=r.headers['ETag']) 21 | analyse_hashfiles(d) 22 | 23 | # upload again with a non-matching etag 24 | r = chunk_file_upload(filename,URL,header_if_match='!@# does not exist 123') 25 | fatal_check(r.rc == 412) # precondition failed 26 | -------------------------------------------------------------------------------- /etc/makeconfig: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | echo "generating smashbox.conf" $PWD 4 | 5 | cp -i smashbox.conf.template smashbox.conf 6 | 7 | sed -i 's/smashdir.*/smashdir = "\/home\/jennifer\/smashbox\/results"/' smashbox.conf 8 | sed -i 's/oc_account_name.*/oc_account_name = "user"/' smashbox.conf 9 | sed -i 's/oc_group_name.*/oc_group_name = "testgroup"/' smashbox.conf 10 | sed -i 's/oc_account_password.*/oc_account_password = "demo"/' smashbox.conf 11 | sed -i 's/oc_server .*/oc_server = "172.18.5.74"/' smashbox.conf 12 | sed -i 's/oc_server_folder.*/oc_server_folder = ""/' smashbox.conf 13 | sed -i 's/oc_ssl_enabled.*/oc_ssl_enabled = False/' smashbox.conf 14 | sed -i 's/oc_server_shell_cmd.*/oc_server_shell_cmd = "ssh root@172.18.5.74"/' smashbox.conf 15 | sed -i 's/oc_server_tools_path.*/oc_server_tools_path = "\/usr\/local\/jenkins\/workspace\/smashbox\/server-tools"/' smashbox.conf 16 | sed -i 's/oc_sync_cmd.*/oc_sync_cmd = "\/usr\/bin\/owncloudcmd --trust"/' smashbox.conf 17 | 18 | -------------------------------------------------------------------------------- /client/owncloudcmd.osx: -------------------------------------------------------------------------------- 1 | # 2 | # this is a workaround for broken owncloudcmd packaging on MacOSX 3 | # https://github.com/owncloud/mirall/issues/1896 4 | # 5 | # owncloud is installed in the standard location 6 | 7 | CMD=$1 8 | PKG=$1$OC_VERSION 9 | shift 10 | 11 | if [ x$CMD == x ]; then 12 | echo missing command-name 13 | echo syntax: owncloudcmd.osx CMD 14 | echo examples: 15 | echo owncloudcmd.osx cernbox 16 | echo owncloudcmd.osx owncloud 17 | echo env OC_VERSION=-1.6.4 owncloudcmd.osx owncloud 18 | exit 1 19 | fi 20 | 21 | #echo /Applications/${PKG}.app/Contents/MacOS/${CMD}cmd 22 | #echo /Applications/${PKG}.app/Contents/Frameworks/*/Versions/5 23 | 24 | function join { local IFS="$1"; shift; echo "$*"; } 25 | 26 | export DYLD_FALLBACK_LIBRARY_PATH=`join : /Applications/${PKG}.app/Contents/Frameworks/*/Versions/5 /Applications/${PKG}.app/Contents/MacOS/${CMD}cmd`:/Applications/${PKG}.app/Contents/MacOS 27 | 28 | #echo $DYLD_FALLBACK_LIBRARY_PATH 29 | 30 | /Applications/${PKG}.app/Contents/MacOS/${CMD}cmd $* 31 | -------------------------------------------------------------------------------- /server-tools/create_user.php: -------------------------------------------------------------------------------- 1 | 40 | -------------------------------------------------------------------------------- /bin/run_all_integration.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$1" ] ; then 4 | echo "Running from etc/smashbox.conf locally" 5 | 6 | if [ ! -f requirements.txt ]; then 7 | echo "bin/smash not found in this directory, cd to smashbox root dir then run this script" 8 | exit 1 9 | fi 10 | 11 | CMD="bin/smash -v -a" 12 | else 13 | echo "Running from in docker again server ip $1" 14 | CMD="docker run -e SMASHBOX_URL=$1 -e SMASHBOX_USERNAME=admin -e SMASHBOX_PASSWORD=admin -e SMASHBOX_ACCOUNT_PASSWORD=admin owncloud/smashbox" 15 | fi 16 | 17 | $CMD lib/test_basicSync.py && \ 18 | $CMD lib/test_concurrentDirRemove.py && \ 19 | $CMD lib/test_nplusone.py && \ 20 | $CMD lib/oc-tests/test_reshareDir.py && \ 21 | $CMD lib/oc-tests/test_shareDir.py && \ 22 | $CMD lib/oc-tests/test_shareFile.py && \ 23 | $CMD lib/oc-tests/test_shareGroup.py && \ 24 | $CMD lib/oc-tests/test_shareLink.py && \ 25 | $CMD lib/oc-tests/test_sharePermissions.py && \ 26 | $CMD lib/owncloud/test_shareMountInit.py && \ 27 | $CMD lib/owncloud/test_sharePropagationGroups.py && \ 28 | $CMD lib/owncloud/test_sharePropagationInsideGroups.py 29 | -------------------------------------------------------------------------------- /python/smashbox/utilities/reflection.py: -------------------------------------------------------------------------------- 1 | # ##### REFLECTION ############ 2 | 3 | # some generic helpers to provide reflection on the execution framework itself (the framework must be setting here the _smash_ object at import) 4 | def getProcessName(): 5 | """ This is the name of the function which defines the execution code for the worker. 6 | """ 7 | return _smash_.process_name 8 | 9 | def getWorkerNumber(): 10 | """ This is 0 for supervisor process, 0 for the first worker process, etc. 11 | """ 12 | return _smash_.process_number 13 | 14 | def getCurrentStep(): 15 | """ Get current step. When worker is waiting at step(N) then it's 16 | current step is N-1. So until it passes step(1) the current step 17 | is 0. 18 | """ 19 | if getWorkerNumber() is None: 20 | return None 21 | return _smash_.steps[getWorkerNumber()] 22 | 23 | def getSharedObject(): 24 | """ Get the object which allows to share state between worker processes. 25 | """ 26 | return _smash_.shared_object 27 | 28 | def getNumberOfWorkers(): 29 | return len(_smash_.workers) 30 | 31 | def getTestcaseFilename(): 32 | """ The absolute path to the file containing current testcase. 33 | """ 34 | return _smash_.args.test_target 35 | -------------------------------------------------------------------------------- /lib/test_dirDelete.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | 3 | __doc__ = """ This test creates a deeply nested directory structure and then removes it 4 | 5 | """ 6 | 7 | 8 | import os.path 9 | 10 | NESTING_LEVELS = config.get('dirDel_nestingLevels', 50) 11 | 12 | nfiles = int(config.get('dirDel_nfiles', 100)) 13 | 14 | TEST_FILES = ['test%02d.dat'%i for i in range(nfiles)] 15 | 16 | @add_worker 17 | def workerA(step): 18 | 19 | #cleanup remote and local test environment - this should be run once by one worker only 20 | reset_owncloud_account() 21 | reset_rundir() 22 | 23 | step(0,'create initial content and sync') 24 | 25 | # this will be our syncdir (this is different for every worker) 26 | syncdir = make_workdir() 27 | 28 | # create a folder and some files in it 29 | path = "0" 30 | for i in xrange(1, NESTING_LEVELS): 31 | path = path + "/" + str(i) 32 | d1 = mkdir(os.path.join(syncdir, path)) 33 | 34 | for f in TEST_FILES: 35 | fn = os.path.join(d1,f) 36 | createfile(fn,'0',count=1,bs=1000) 37 | 38 | run_ocsync(syncdir) 39 | 40 | step(2,'delete the folder and sync') 41 | 42 | topLevelDir = path.split("/", 1)[0] 43 | d2 = os.path.join(syncdir, topLevelDir) 44 | 45 | remove_tree(d2) 46 | 47 | #createfile(os.path.join(syncdir,'touch'),'0',count=1,bs=1) 48 | 49 | expect_webdav_exist(topLevelDir) 50 | run_ocsync(syncdir) 51 | 52 | expect_does_not_exist(d2) 53 | expect_webdav_does_not_exist(topLevelDir) 54 | 55 | -------------------------------------------------------------------------------- /lib/test_nplustwo.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | __doc__ = """ Add nfiles to a directory (two clients) and check consistency after synch. 6 | """ 7 | 8 | from smashbox.utilities import * 9 | from smashbox.utilities.hash_files import * 10 | 11 | nfiles = int(config.get('nplustwo_nfiles',10)) 12 | 13 | def adder(step): 14 | 15 | # if you don't want to cleanup server files from previous run then set oc_account_reset_procedure=keep option 16 | # 17 | # smash -o oc_account_reset_procedure=keep lib/test_nplustwo.py (or in etc/smashbox.conf) 18 | 19 | reset_owncloud_account() 20 | 21 | # cleanup all local files for the test 22 | reset_rundir() 23 | 24 | step(1,'Preparation') 25 | d = make_workdir() 26 | 27 | run_ocsync(d) 28 | k0 = count_files(d) 29 | 30 | step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles) 31 | 32 | for i in range(nfiles): 33 | create_hashfile(d,size=config.hashfile_size) 34 | 35 | run_ocsync(d) 36 | 37 | step(3,'Get other files from server and check') 38 | 39 | run_ocsync(d) 40 | (ntot,k1,ncorruptions) = analyse_hashfiles(d) 41 | 42 | error_check(k1-k0==2*nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 43 | error_check(ncorruptions==0,'After synch %d corrupted files found'%(ncorruptions)) 44 | 45 | logger.info('SUCCESS: %d files found',k1) 46 | 47 | @add_worker 48 | def worker0(step): 49 | adder(step) 50 | 51 | @add_worker 52 | def worker1(step): 53 | adder(step) 54 | 55 | -------------------------------------------------------------------------------- /corruption_test/run_nplusone_loop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | import os,sys 4 | import subprocess 5 | 6 | thisdir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0]))) 7 | smashdir=os.path.join(thisdir,'..') 8 | 9 | conf_file = os.environ.get('SMASHBOX_CONF',os.path.join(thisdir,"smashbox.conf")) 10 | 11 | dirs = {'thisdir':thisdir,'smashdir':smashdir, 'conf_file':conf_file} 12 | 13 | os.environ['OWNCLOUD_MAX_PARALLEL'] = '3' 14 | 15 | # this disables the checksumming! 16 | #os.environ['OWNCLOUD_USE_LEGACY_JOBS'] = '1' 17 | 18 | import datetime 19 | now = datetime.datetime.now().strftime('%Y-%m-%d-%H%M%S') 20 | 21 | os.environ['SMASHBOX_NOW'] = now 22 | 23 | try: 24 | N = int(sys.argv[1]) 25 | except Exception,x: 26 | print """ERROR: missing or invalid argument (%s) 27 | 28 | syntax: %s N 29 | 30 | N is number of test repetitions, you may specify -1 for indefinite loop 31 | """% (repr(x),sys.argv[0]) 32 | 33 | sys.exit(-1) 34 | 35 | i = 1 36 | 37 | dirs['options']="-o nplusone_nfiles=20 -o nplusone_filesize='(5.0,1.37)'" # --keep-state" 38 | #dirs['options']="-o nplusone_nfiles=20 -o nplusone_filesize=30000000" 39 | 40 | # infinite loop and ignore any casual errors (stop on fatal errors only) 41 | dirs['options'] += " --loop=0 --keep-going " 42 | 43 | cmd = '%(smashdir)s/bin/smash -c %(conf_file)s %(options)s %(smashdir)s/lib/test_nplusone.py' % dirs 44 | 45 | #print cmd 46 | 47 | while N: 48 | 49 | #print "test number",i 50 | 51 | rc = subprocess.call(cmd,shell=True) 52 | 53 | if rc != 0: 54 | break 55 | 56 | i+=1 57 | N-=1 58 | 59 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # Read more about this feature here: https://docs.gitlab.com/ee/user/application_security/secret_detection 2 | # 3 | # Configure the scanning tool through the environment variables. 4 | # List of the variables: https://docs.gitlab.com/ee/user/application_security/secret_detection/#available-variables 5 | # How to set: https://docs.gitlab.com/ee/ci/yaml/#variables 6 | 7 | variables: 8 | SECURE_ANALYZERS_PREFIX: "$CI_TEMPLATE_REGISTRY_HOST/security-products" 9 | SECRET_DETECTION_IMAGE_SUFFIX: "" 10 | 11 | SECRETS_ANALYZER_VERSION: "6" 12 | SECRET_DETECTION_EXCLUDED_PATHS: "" 13 | 14 | .secret-analyzer: 15 | stage: test 16 | image: "$SECURE_ANALYZERS_PREFIX/secrets:$SECRETS_ANALYZER_VERSION$SECRET_DETECTION_IMAGE_SUFFIX" 17 | services: [] 18 | allow_failure: true 19 | variables: 20 | GIT_DEPTH: "50" 21 | # `rules` must be overridden explicitly by each child job 22 | # see https://gitlab.com/gitlab-org/gitlab/-/issues/218444 23 | artifacts: 24 | access: 'developer' 25 | reports: 26 | secret_detection: gl-secret-detection-report.json 27 | 28 | secret_detection: 29 | extends: .secret-analyzer 30 | rules: 31 | - if: $SECRET_DETECTION_DISABLED == 'true' || $SECRET_DETECTION_DISABLED == '1' 32 | when: never 33 | - if: $CI_PIPELINE_SOURCE == "merge_request_event" # Add the job to merge request pipelines if there's an open merge request. 34 | - if: $CI_OPEN_MERGE_REQUESTS # Don't add it to a *branch* pipeline if it's already in a merge request pipeline. 35 | when: never 36 | - if: $CI_COMMIT_BRANCH # If there's no open merge request, add it to a *branch* pipeline instead. 37 | script: 38 | - /analyzer run 39 | -------------------------------------------------------------------------------- /lib/test_fileTinkerDownload.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ Create/modify a file locally while a file with the same name gets downloaded from the server. 7 | """ 8 | 9 | from smashbox.utilities import * 10 | from smashbox.utilities.hash_files import * 11 | 12 | tinker_wait = int(config.get('fileTinkerDownload_tinker_wait',2)) 13 | filesize = config.get('fileTinkerDownload_filesize',300000000) 14 | 15 | if type(filesize) is type(''): 16 | filesize = eval(filesize) 17 | 18 | testsets = [ 19 | { 'fileTinkerDownload_filesize': 300000000, 20 | 'fileTinkerDownload_tinker_wait': i 21 | } for i in range(1,5) ] 22 | 23 | 24 | @add_worker 25 | def worker0(step): 26 | 27 | # do not cleanup server files from previous run 28 | reset_owncloud_account() 29 | 30 | # cleanup all local files for the test 31 | reset_rundir() 32 | 33 | step(1,'Preparation') 34 | d = make_workdir() 35 | run_ocsync(d) 36 | k0 = count_files(d) 37 | 38 | step(2,'Add a file: filesize=%s'%filesize) 39 | 40 | create_hashfile(d,filemask='TINKER.DAT',size=filesize) 41 | 42 | run_ocsync(d) 43 | 44 | @add_worker 45 | def worker1(step): 46 | step(1,'Preparation') 47 | d = make_workdir('worker1') 48 | run_ocsync(d) 49 | k0 = count_files(d) 50 | 51 | step(3,'Sync the file down') 52 | run_ocsync(d) 53 | 54 | step(4) 55 | 56 | @add_worker 57 | def tinkerer(step): 58 | d = make_workdir('worker1') # use the same workdir as worker1 59 | 60 | step(3,'Tinker with the file while the worker1 downloads') 61 | 62 | sleep(tinker_wait) 63 | 64 | fn,md5 = create_hashfile2(d,filemask='TINKER.DAT',size=filesize) 65 | 66 | step(4) # worker1 ended syncing 67 | 68 | error_check(md5sum(fn) == md5) 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /python/smashbox/reporter.py: -------------------------------------------------------------------------------- 1 | class Reporter: 2 | """ Report execution state of smashbox. 3 | """ 4 | 5 | def __init__(self): 6 | self.DEMO=False 7 | 8 | def smashbox_start(self,args,config): 9 | """ 10 | Smashbox is starting. 11 | Arguments: 12 | - args: Namespace object with all the options passed when invoking smash executable 13 | - config: global configuration object 14 | """ 15 | 16 | if self.DEMO: 17 | print "SMASHBOX_START",args,config 18 | self.config=config 19 | 20 | def smashbox_stop(self): 21 | """ 22 | Smashbox is about to stop. 23 | """ 24 | 25 | if self.DEMO: 26 | print "SMASHBOX_STOP" 27 | 28 | 29 | def testcase_start(self,name,loop_i,testset_i,namespace): 30 | """ 31 | Testcase is about to start. 32 | Arguments: 33 | - name: short name of the testcase 34 | - loop_i: loop index or None if not running in the loop 35 | - testset_i: testset index or None if running with default configuration 36 | - namespace: access to the testcase module namespace 37 | 38 | Example: 39 | try: 40 | print "Current testset configuration", namespace.testsets[testset_i] 41 | expect AttributeError: 42 | print "Testsets not defined" 43 | 44 | """ 45 | 46 | if self.DEMO: 47 | print "TESTCASE_START",name,loop_i,testset_i,namespace.__doc__ 48 | 49 | barename=name.replace("test_","") 50 | 51 | for c in self.config.__dict__: 52 | if c.startswith(barename+"_"): 53 | print c,self.config[c] 54 | 55 | 56 | def testcase_stop(self,returncode): 57 | if self.DEMO: 58 | print "TESTCASE_STOP",returncode 59 | 60 | -------------------------------------------------------------------------------- /corruption_test/run_all: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | import os,sys 4 | 5 | thisdir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0]))) 6 | smashdir=os.path.join(thisdir,'..') 7 | 8 | 9 | try: 10 | LOOP=os.environ['LOOP'] 11 | except KeyError,x: 12 | LOOP=1 13 | 14 | possible_actions=['run_nplusone_loop','run_storm_loop','kill','show','stat','reset_logs'] 15 | try: 16 | N = int(sys.argv[1]) 17 | action = sys.argv[2] 18 | assert(action in possible_actions) 19 | except Exception,x: 20 | print """ ERROR: missing or invalid arguments (%s) 21 | 22 | syntax: %s N action 23 | 24 | N - number of hosts (head of host.list) 25 | action - %s 26 | 27 | You may user environment variable LOOP to defined the number of time the test should loop on each client. By default the command will loop: %d time(s) /-1 means forever/ 28 | 29 | """ % (repr(x),sys.argv[0],', '.join(possible_actions),LOOP) 30 | sys.exit(-1) 31 | 32 | 33 | def action_cmd(): 34 | if action[0:3] == 'run': 35 | return 'time %(smashdir)s/corruption_test/%(action)s %(k)s &' % {'smashdir':smashdir,'k':LOOP,'action':action} 36 | if action == 'kill': 37 | return '%(smashdir)s/corruption_test/kill-smash' % {'smashdir':smashdir} 38 | if action == 'show': 39 | return '%(smashdir)s/corruption_test/show-smash' % {'smashdir':smashdir} 40 | if action == 'stat': 41 | return '%(smashdir)s/corruption_test/stat-smash' % {'smashdir':smashdir} 42 | if action == 'reset_logs': 43 | return '%(smashdir)s/corruption_test/reset_logs-smash' % {'smashdir':smashdir} 44 | 45 | 46 | host_list="%s/corruption_test/host.list"%smashdir 47 | 48 | for i,host in enumerate(file(host_list)): 49 | if N and i>=N: 50 | break 51 | 52 | host = host.strip() 53 | print i+1 54 | cmd = "ssh root@%s -i /b/private/octest.pem %s "%(host,action_cmd()) 55 | #print repr(cmd) 56 | os.system(cmd) 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /lib/test_userload.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import time 4 | import tempfile 5 | 6 | __doc__ = """ One uploader, n downloaders. Uploader creates nfiles and syncs them at the same time to the same account. The checker verifies integrity of files and completness of sync. 7 | """ 8 | 9 | 10 | # Files created by the uploader 11 | nfiles = int(config.get('userload_nfiles',5)) 12 | # Number of downloaders 13 | nworkers = int(config.get('userload_nworkers',10)) 14 | # Verbose flag 15 | verbose = bool(config.get('userload_verbose',False)) 16 | 17 | hash_filemask = 'hash_{md5}' 18 | 19 | from smashbox.utilities import * 20 | from smashbox.utilities.hash_files import * 21 | 22 | @add_worker 23 | def uploader(step): 24 | 25 | reset_owncloud_account() 26 | reset_rundir() 27 | 28 | step(1,'Preparation') 29 | d = make_workdir() 30 | run_ocsync(d) 31 | k0 = count_files(d,filemask=hash_filemask) 32 | logger.info('Repository has %d files', k0) 33 | 34 | step(2,'Add files') 35 | logger.info('Adding %d files',nfiles) 36 | for i in range(nfiles): 37 | if verbose: logger.info('Prepare file %d',i) 38 | create_hashfile(d,filemask=hash_filemask) 39 | run_ocsync(d) 40 | logger.info('Step 2 ends here...') 41 | 42 | step(3,None) 43 | return 44 | 45 | 46 | def downloader(step): 47 | 48 | step(1,'Active clients are syncing...') 49 | d = make_workdir() 50 | run_ocsync(d) 51 | 52 | k0 = count_files(d,filemask=hash_filemask) 53 | logger.info('Repository has %d files',k0) 54 | 55 | step(2,'Download in parallel to upload...') 56 | 57 | run_ocsync(d) 58 | 59 | step(3,'Final download and check') 60 | 61 | run_ocsync(d) 62 | 63 | (ntot,nana,nbad) = analyse_hashfiles(d,filemask=hash_filemask) 64 | 65 | etot = k0 + nfiles 66 | error_check(etot == ntot,'Missing files (files at start %d, expected %d, found %d)'%(k0,etot,ntot)) 67 | 68 | 69 | for i in range(nworkers): 70 | add_worker(downloader,name="downloader%02d"%(i+1)) 71 | 72 | -------------------------------------------------------------------------------- /lib/owncloud/test_moveFileStatusCode.py: -------------------------------------------------------------------------------- 1 | from owncloud import HTTPResponseError 2 | 3 | __doc__ = """ 4 | 5 | Test moving a file via webdav 6 | 7 | """ 8 | 9 | from smashbox.utilities import * 10 | 11 | @add_worker 12 | def move_non_existing_file(step): 13 | 14 | step(1, 'Create a folder and a file') 15 | d = make_workdir() 16 | dir_name = os.path.join(d, 'folder') 17 | local_dir = make_workdir(dir_name) 18 | 19 | createfile(os.path.join(d, 'file1.txt'), '0', count=1000, bs=50) 20 | createfile(os.path.join(local_dir, 'file3.txt'), '1', count=1000, bs=50) 21 | run_ocsync(d, user_num=1) 22 | 23 | expect_webdav_exist('file1.txt', user_num=1) 24 | expect_webdav_does_not_exist(os.path.join('folder', 'file2.txt'), user_num=1) 25 | expect_webdav_exist(os.path.join('folder', 'file3.txt'), user_num=1) 26 | 27 | step(2, 'Move the file into the folder') 28 | 29 | oc = get_oc_api() 30 | oc.login("%s%i" % (config.oc_account_name, 1), config.oc_account_password) 31 | 32 | try: 33 | oc.move('file1.txt', os.path.join('folder', 'file2.txt')) 34 | except HTTPResponseError as err: 35 | error_check( 36 | False, 37 | 'Server replied with status code: %i' % err.status_code 38 | ) 39 | 40 | expect_webdav_does_not_exist('file1.txt', user_num=1) 41 | expect_webdav_exist(os.path.join('folder', 'file2.txt'), user_num=1) 42 | expect_webdav_exist(os.path.join('folder', 'file3.txt'), user_num=1) 43 | 44 | step(3, 'Move non existing file into the folder') 45 | 46 | try: 47 | oc.move('file1.txt', os.path.join('folder', 'file2.txt')) 48 | except HTTPResponseError as err: 49 | error_check( 50 | err.status_code == 404, 51 | 'Server replied with status code: %i' % err.status_code 52 | ) 53 | 54 | expect_webdav_does_not_exist('file1.txt', user_num=1) 55 | expect_webdav_exist(os.path.join('folder', 'file2.txt'), user_num=1) 56 | expect_webdav_exist(os.path.join('folder', 'file3.txt'), user_num=1) 57 | -------------------------------------------------------------------------------- /protocol/test_protocol_chunked_upload_android_bug_900.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import * 3 | from smashbox.protocol import chunk_file_upload,file_upload, file_download 4 | 5 | @add_worker 6 | def main(step): 7 | 8 | d = make_workdir() 9 | reset_owncloud_account() 10 | 11 | # we make client look like an Android client 12 | config.pycurl_USERAGENT = "Android-ownCloud" 13 | 14 | # this test should be run against the mobile endpoint of cernbox which may be convinently set in the config 15 | # for vanilla owncloud server mobile endpoint is the same as generic webdav endpoint 16 | webdav_endpoint = config.get('oc_mobile_webdav_endpoint',None) 17 | 18 | URL = oc_webdav_url(webdav_endpoint=webdav_endpoint) 19 | 20 | if webdav_endpoint is None: 21 | logger.warning('oc_mobile_webdav_endpoint was not defined, using standard endpoint, URL: %s',URL) 22 | 23 | # chunk size defined in the android source code 24 | # https://github.com/owncloud/android-library/blob/d7097983594347167b5bde3fa5b2b4ad1d843392/src/com/owncloud/android/lib/resources/files/ChunkedUploadRemoteFileOperation.java#L45 25 | # Note: specifying a different chunk size will result in corrupted file! 26 | # This is a hack until the android-client is properly fixed! 27 | 28 | ANDROID_CHUNKSIZE=1024*1000 29 | 30 | filename=create_hashfile(d,size=int(5.5*ANDROID_CHUNKSIZE)) 31 | 32 | # careful with the chunk size... 33 | r=chunk_file_upload(filename,URL,chunk_size=ANDROID_CHUNKSIZE,android_client_bug_900=True) 34 | file_download(os.path.basename(filename),URL,d) 35 | analyse_hashfiles(d) 36 | 37 | # upload again matching the existing etag 38 | r=chunk_file_upload(filename,URL,chunk_size=ANDROID_CHUNKSIZE,android_client_bug_900=True,header_if_match=r.headers['ETag']) 39 | analyse_hashfiles(d) 40 | 41 | # upload again with a non-matching etag 42 | r = chunk_file_upload(filename,URL,header_if_match='!@# does not exist 123') 43 | fatal_check(r.rc == 412) # precondition failed 44 | 45 | # TODO: 46 | # - make sure that without user agent header the upload fails 47 | # - 48 | -------------------------------------------------------------------------------- /corruption_test/run_storm_loop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | import os,sys 4 | import subprocess 5 | 6 | thisdir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0]))) 7 | smashdir=os.path.join(thisdir,'..') 8 | 9 | dirs = {'thisdir':thisdir,'smashdir':smashdir} 10 | 11 | os.environ['OWNCLOUD_MAX_PARALLEL'] = '5' 12 | 13 | try: 14 | N = int(sys.argv[1]) 15 | except Exception,x: 16 | print """ERROR: missing or invalid argument (%s) 17 | 18 | syntax: %s N 19 | 20 | N is number of test repetitions, you may specify -1 for indefinite loop 21 | """% (repr(x),sys.argv[0]) 22 | 23 | sys.exit(-1) 24 | i = 1 25 | 26 | dirs['options']="-o storm_nfiles=10 -o storm_filesize=1000 -o storm_nfiles=10 -o storm_nuploaders=10 -o storm_ndownloaders=10" 27 | #dirs['options']="-o storm_nfiles=5 -o storm_filesize=1000 -o storm_nfiles=10 -o storm_nuploaders=20 -o storm_ndownloaders=20" 28 | 29 | #dirs['options']="-o storm_filesize=1000 -o storm_nfiles=10 -o storm_nuploaders=5 -o storm_ndownloaders=5" # NO REDIRECT ERRORS IN THIS CONFIGURATION (9 client boxes) 30 | 31 | cmd = '%(smashdir)s/bin/smash -c %(thisdir)s/smashbox.conf %(options)s %(smashdir)s/lib/test_storm.py >> ~/test_storm.log 2>&1' % dirs 32 | 33 | #cmd = '%(smashdir)s/bin/smash -c %(thisdir)s/smashbox-eosdev.conf %(options)s %(smashdir)s/lib/test_storm.py >> ~/test_storm.log 2>&1' % dirs # THIS IS AGAINST EOSDEVSERVER 34 | 35 | #dirs['options']="-o storm_nfiles=10 -o storm_filesize=1000 -o storm_nfiles=10 -o storm_nuploaders=10 -o storm_ndownloaders=10" 36 | #cmd = '%(smashdir)s/bin/smash -c %(thisdir)s/smashbox-xrdcp-test.conf %(options)s %(smashdir)s/lib/test_storm_xrdcp.py >> ~/test_storm.log 2>&1' % dirs 37 | 38 | #dirs['options']="-o storm_nfiles=5 -o storm_filesize=1000 -o storm_nfiles=10 -o storm_nuploaders=20 -o storm_ndownloaders=20" 39 | #cmd = '%(smashdir)s/bin/smash -v -c %(thisdir)s/smashbox-curl-test.conf %(options)s %(smashdir)s/lib/test_storm_curl.py >> ~/test_storm.log 2>&1' % dirs 40 | 41 | os.system('echo "new loop sequence started %s" >> ~/test_storm.log'%dirs['options']) 42 | 43 | #print cmd 44 | 45 | while N: 46 | 47 | #print "test number",i 48 | 49 | rc = subprocess.call(cmd,shell=True) 50 | 51 | if rc != 0: 52 | break 53 | 54 | i+=1 55 | N-=1 56 | 57 | -------------------------------------------------------------------------------- /lib/test_storm.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import time 4 | import tempfile 5 | 6 | __doc__ = """ Each of nuploaders creates nfiles and syncs them at the same time to the same account. Each of ndownloaders downloads the files at the same time, verifies integrity of files and completness of sync. 7 | """ 8 | 9 | from smashbox.utilities import * 10 | from smashbox.utilities.hash_files import * 11 | 12 | # Files created by each uploader 13 | nfiles = int(config.get('storm_nfiles',10)) 14 | 15 | # Number of workers (uploading files) 16 | nuploaders = int(config.get('storm_nuploaders',10)) 17 | 18 | # Number of workers (downloading files) 19 | ndownloaders = int(config.get('storm_ndownloaders',10)) 20 | 21 | # Verbose flag 22 | verbose = bool(config.get('storm_verbose',False)) 23 | 24 | # File size. None = default size/distribution. 25 | filesize = config.get('storm_filesize',None) 26 | 27 | def uploader(step): 28 | 29 | step(1,'Preparation') 30 | d = make_workdir() 31 | run_ocsync(d) 32 | k0 = count_files(d) 33 | logger.info('Repository has %d files', k0) 34 | 35 | step(2,'Add files') 36 | logger.info('Adding %d files',nfiles) 37 | for i in range(nfiles): 38 | if verbose: logger.info('Prepare file %d with filesize %d',i,filesize) 39 | create_hashfile(d,size=filesize) 40 | 41 | run_ocsync(d) 42 | logger.info('Step 2 ends here...') 43 | 44 | step(3,None) 45 | return 46 | 47 | for i in range(nuploaders): 48 | add_worker(uploader,name="uploader%02d"%(i+1)) 49 | 50 | @add_worker 51 | def initializer(step): 52 | 53 | reset_owncloud_account() 54 | reset_rundir() 55 | 56 | 57 | def downloader(step): 58 | step(1,'Active clients are syncing...') 59 | d = make_workdir() 60 | run_ocsync(d) 61 | 62 | k0 = count_files(d) 63 | logger.info('Repository has %d files',k0) 64 | 65 | step(2,'Active clients are uploading files...') 66 | 67 | step(3,'Download and check') 68 | 69 | sleep(1) # avoid race condition reading the file which has yet not been properly closed after writing 70 | 71 | run_ocsync(d) 72 | 73 | (ntot,nana,nbad) = analyse_hashfiles(d) 74 | 75 | etot = k0 + nfiles * nuploaders 76 | error_check(etot == ntot,'Missing files (files at start %d, expected %d, found %d)'%(k0,etot,ntot)) 77 | fatal_check(nbad == 0, 'Corrupted files found (%d)'%nbad) 78 | 79 | for i in range(ndownloaders): 80 | add_worker(downloader,name="downloader%02d"%(i+1)) 81 | -------------------------------------------------------------------------------- /lib/test_dirMove.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | 3 | __doc__ = """ This test moves a DIRA folder to DIRB folder. 4 | 5 | By default, the top-level DIRA folder is moved to DIRB/DIRA 6 | """ 7 | 8 | testsets = [ {'dirMove_DIRA':'DIRA', 9 | 'dirMove_DIRB':'DIRB' }, 10 | 11 | {'dirMove_DIRA':'A/A/A/DIRA', 12 | 'dirMove_DIRB':'DIRB'}, 13 | 14 | {'dirMove_DIRA':'DIRA', 15 | 'dirMove_DIRB':'B/B/B/DIRB'}, 16 | 17 | {'dirMove_DIRA':'A/A/A/A/DIRA', 18 | 'dirMove_DIRB':'B/B/B/B/B/B/DIRB'} 19 | ] 20 | 21 | import os.path 22 | 23 | DIRA = os.path.normpath(config.get('dirMove_DIRA','DIRA')) 24 | DIRB = os.path.normpath(config.get('dirMove_DIRB','DIRB')) 25 | 26 | nfiles = int(config.get('dirMove_nfiles',10)) 27 | 28 | TEST_FILES = ['test%02d.dat'%i for i in range(nfiles)] 29 | 30 | @add_worker 31 | def workerA(step): 32 | 33 | #cleanup remote and local test environment - this should be run once by one worker only 34 | reset_owncloud_account() 35 | reset_rundir() 36 | 37 | step(0,'create initial content and sync') 38 | 39 | # this will be our syncdir (this is different for every worker) 40 | syncdir = make_workdir() 41 | 42 | # create a folder and some files in it 43 | d1 = mkdir(os.path.join(syncdir,DIRA)) 44 | 45 | for f in TEST_FILES: 46 | fn = os.path.join(d1,f) 47 | createfile(fn,'0',count=1000,bs=1000) 48 | 49 | run_ocsync(syncdir) 50 | 51 | step(2,'move the folder and sync') 52 | 53 | d2 = os.path.join(syncdir,DIRB) 54 | 55 | mkdir(d2) 56 | #mv(d1+'/*',d2) 57 | 58 | mv(d1,d2) 59 | 60 | #createfile(os.path.join(syncdir,'touch'),'0',count=1,bs=1) 61 | 62 | run_ocsync(syncdir) 63 | 64 | @add_worker 65 | def workerB(step): 66 | 67 | step(1,'sync the initial content') 68 | 69 | syncdir = make_workdir() 70 | run_ocsync(syncdir) 71 | 72 | step(3,'sync again to check if the change is there') 73 | 74 | run_ocsync(syncdir) 75 | 76 | # we expect to find DIRB and all test files in it 77 | # we expect DIRA is deleted 78 | 79 | d1 = os.path.join(syncdir,DIRA) 80 | d2 = os.path.join(syncdir,DIRB) 81 | 82 | logger.info('checking %s',d1) 83 | error_check(not os.path.exists(d1), "path %s should not exist"%d1) 84 | 85 | logger.info('checking %s',d2) 86 | error_check(os.path.isdir(d2), "path %s should be a directory"%d2) 87 | 88 | for fn in TEST_FILES: 89 | f = os.path.join(d2,os.path.basename(d1),fn) 90 | logger.info("checking %s",f) 91 | error_check(os.path.isfile(f), "path %s should be a file"%f) 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /lib/test_slowwrite.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ 7 | 8 | Synchronize local folder while writing into the file. 9 | 10 | This is a testcase for: 11 | 12 | https://github.com/owncloud/mirall/issues/2210 (corrupted file upload if file modified during transfer) 13 | 14 | owncloudcmd will delay syncing of the file if the file is modified every 2 seconds or less (slowWrite < 2) 15 | 16 | """ 17 | 18 | from smashbox.utilities import * 19 | from smashbox.utilities.hash_files import * 20 | 21 | MB = 1024*1000 22 | 23 | filesizeKB = int(config.get('slowwrite_filesizeKB',10000)) 24 | blockSize = int(config.get('slowwrite_blockSize',MB)) 25 | slowWrite = int(config.get('slowwrite_slowWrite',1)) 26 | 27 | nfiles=1 28 | 29 | testsets = [ 30 | { 'slowwrite_filesizeKB': 2, 31 | 'slowwrite_blockSize': 200, 32 | 'slowwrite_slowWrite':1.5 33 | }, 34 | 35 | { 'slowwrite_filesizeKB': 5000, 36 | 'slowwrite_blockSize': MB, 37 | 'slowwrite_slowWrite':1 38 | }, 39 | 40 | { 'slowwrite_filesizeKB': 11000, 41 | 'slowwrite_blockSize': MB, 42 | 'slowwrite_slowWrite':1 43 | }, 44 | 45 | { 'slowwrite_filesizeKB': 25000, 46 | 'slowwrite_blockSize': MB, 47 | 'slowwrite_slowWrite':1 48 | }, 49 | 50 | 51 | { 'slowwrite_filesizeKB': 50000, 52 | 'slowwrite_blockSize': MB, 53 | 'slowwrite_slowWrite':1 54 | } 55 | ] 56 | 57 | @add_worker 58 | def writer(step): 59 | 60 | # do not cleanup server files from previous run 61 | reset_owncloud_account() 62 | 63 | # cleanup all local files for the test 64 | reset_rundir() 65 | 66 | step(1,'Preparation') 67 | d = make_workdir('writer') # bother writer and synchronizer share the same workdir 68 | run_ocsync(d) 69 | k0 = count_files(d) 70 | 71 | step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles) 72 | 73 | create_hashfile(d,size=filesizeKB*1000,bs=blockSize,slow_write=slowWrite) #config.hashfile_size) 74 | 75 | @add_worker 76 | def synchronizer(step): 77 | 78 | step(2,'Sync the file as it is being written by writer') 79 | 80 | sleep(slowWrite*2) 81 | d = make_workdir('writer') # bother writer and synchronizer share the same workdir 82 | run_ocsync(d) 83 | 84 | 85 | @add_worker 86 | def checker(step): 87 | 88 | step(1,'Preparation') 89 | d = make_workdir() 90 | run_ocsync(d) 91 | k0 = count_files(d) 92 | 93 | step(3,'Resync and check files added by synchronizer') 94 | 95 | run_ocsync(d) 96 | 97 | analyse_hashfiles(d) 98 | k1 = count_files(d) 99 | 100 | error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 101 | 102 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /lib/owncloud/test_backupRestored.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | This test is testing that if the data-fingerprint changes because of a backup restoration 5 | we do not loose the newer file that were on the server 6 | [] 7 | 8 | """ 9 | 10 | from smashbox.utilities import * 11 | import subprocess 12 | import glob 13 | 14 | 15 | @add_worker 16 | def workerA(step): 17 | if compare_client_version('2.3.0', '<'): 18 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 19 | return 20 | 21 | #cleanup remote and local test environment - this should be run once by one worker only 22 | reset_owncloud_account() 23 | reset_rundir() 24 | 25 | 26 | 27 | step(0,'create initial content and sync') 28 | 29 | syncdir = make_workdir() 30 | folder1 = make_workdir(os.path.join(syncdir, 'folder1')) 31 | createfile(os.path.join(folder1, 'file.txt'), '0', count=1000, bs=50) 32 | createfile(os.path.join(syncdir, 'file1.txt'), '0', count=1000, bs=50) 33 | createfile(os.path.join(syncdir, 'file2.txt'), '0', count=1000, bs=50) 34 | createfile(os.path.join(syncdir, 'file3.txt'), '0', count=1000, bs=50) 35 | 36 | run_ocsync(syncdir) 37 | 38 | 39 | step(1,'simulate a backup restored by faking an old state') 40 | # it is as if file1.txt was newer and thus not present in the backup 41 | remove_file(os.path.join(syncdir, 'file1.txt')) 42 | 43 | # folder1 was not present on the backup 44 | remove_tree(os.path.join(syncdir, 'folder1')) 45 | 46 | # file2.txt is replaced by an "older" file 47 | createfile(os.path.join(syncdir, 'file2.txt'), '1', count=1000, bs=40) 48 | 49 | step(2, 'upload an the fake old state state') 50 | run_ocsync(syncdir) 51 | 52 | 53 | @add_worker 54 | def workerB(step): 55 | 56 | if compare_client_version('2.3.0', '<'): 57 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 58 | return 59 | 60 | step(1,'sync the initial content') 61 | 62 | syncdir = make_workdir() 63 | run_ocsync(syncdir) 64 | 65 | step(3,'simulate a backup by altering the data-fingerprint') 66 | 67 | #Since i can't change the data finferprint on the server, i change it on the client's database 68 | subprocess.check_output(["sqlite3" , os.path.join(syncdir, ".csync_journal.db"), 69 | "DELETE FROM datafingerprint; INSERT INTO datafingerprint (fingerprint) VALUES('1234');"]) 70 | 71 | run_ocsync(syncdir) 72 | 73 | error_check(os.path.isdir(os.path.join(syncdir, 'folder1')), 74 | "folder1 should have been restored ") 75 | 76 | error_check(os.path.exists(os.path.join(syncdir, 'folder1/file.txt')), 77 | "folder1/file.txt should have been restored ") 78 | 79 | conflict_files = get_conflict_files(syncdir) 80 | error_check(len(conflict_files) == 1, 81 | "file2 should have been backed up as a conflict ") 82 | 83 | -------------------------------------------------------------------------------- /lib/test_fileMove.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | 3 | __doc__ = """ This test moves files from one folder to another. 4 | 5 | """ 6 | 7 | testsets = [ {'dirMove_DIRA':'DIRA', 8 | 'dirMove_DIRB':'DIRB' } 9 | ] 10 | 11 | import os.path 12 | 13 | DIRA = os.path.normpath(config.get('dirMove_DIRA','DIRA')) 14 | DIRB = os.path.normpath(config.get('dirMove_DIRB','DIRB')) 15 | 16 | nfiles = int(config.get('dirMove_nfiles',10)) 17 | 18 | TEST_FILES = ['test%02d.dat'%i for i in range(nfiles)] 19 | 20 | def check_files_exist(files,d): 21 | for fn in files: 22 | f = os.path.join(d,fn) 23 | logger.info("checking %s",f) 24 | error_check(os.path.isfile(f), "path %s should be a file"%f) 25 | 26 | @add_worker 27 | def workerA(step): 28 | 29 | #cleanup remote and local test environment - this should be run once by one worker only 30 | reset_owncloud_account() 31 | reset_rundir() 32 | 33 | step(0,'create initial content and sync') 34 | 35 | # this will be our syncdir (this is different for every worker) 36 | syncdir = make_workdir() 37 | 38 | # create a folder and some files in it 39 | d1 = mkdir(os.path.join(syncdir,DIRA)) 40 | 41 | for f in TEST_FILES: 42 | fn = os.path.join(d1,f) 43 | createfile(fn,'0',count=1000,bs=1000) 44 | 45 | run_ocsync(syncdir) 46 | 47 | step(2,'move the files in the folder and sync') 48 | 49 | d2 = mkdir(os.path.join(syncdir,DIRB)) 50 | 51 | mkdir(d2) 52 | 53 | for f in TEST_FILES: 54 | fn = os.path.join(d1,f) 55 | mv(fn,d2) 56 | 57 | #createfile(os.path.join(syncdir,'touch'),'0',count=1,bs=1) 58 | 59 | run_ocsync(syncdir) 60 | 61 | step(4,'move the files back to the original folder and sync ') 62 | 63 | for f in TEST_FILES: 64 | fn = os.path.join(d2,f) 65 | mv(fn,d1) 66 | 67 | run_ocsync(syncdir) 68 | 69 | step(5,'check if the files are OK after being moved back') 70 | 71 | check_files_exist(TEST_FILES,d1) 72 | 73 | 74 | @add_worker 75 | def workerB(step): 76 | 77 | step(1,'sync the initial content') 78 | 79 | syncdir = make_workdir() 80 | run_ocsync(syncdir) 81 | 82 | step(3,'sync again to check if the change is there') 83 | 84 | run_ocsync(syncdir) 85 | 86 | # we expect to find DIRB and all test files in it 87 | # we expect DIRA is deleted 88 | 89 | d1 = os.path.join(syncdir,DIRA) 90 | d2 = os.path.join(syncdir,DIRB) 91 | 92 | logger.info('checking %s',d1) 93 | error_check(os.path.isdir(d1), "path %s should be a directory"%d1) 94 | 95 | logger.info('checking %s',d2) 96 | error_check(os.path.isdir(d2), "path %s should be a directory"%d2) 97 | 98 | check_files_exist(TEST_FILES,d2) 99 | 100 | step(5,'sync again and check if the files were moved back') 101 | 102 | run_ocsync(syncdir) 103 | 104 | check_files_exist(TEST_FILES,d1) 105 | 106 | 107 | -------------------------------------------------------------------------------- /python/smashbox/owncloudorg/remote_sharing.py: -------------------------------------------------------------------------------- 1 | from owncloud import HTTPResponseError 2 | from smashbox.script import config 3 | from smashbox.utilities import * 4 | 5 | 6 | def remote_share_file_with_user(filename, sharer, sharee, **kwargs): 7 | """ Shares a file with a user 8 | 9 | :param filename: name of the file being shared 10 | :param sharer: the user doing the sharing 11 | :param sharee: the user receiving the share 12 | :param kwargs: key words args to be passed into the api, usually for share permissions 13 | :returns: share id of the created share 14 | 15 | """ 16 | from owncloud import ResponseError 17 | 18 | logger.info('%s is sharing file %s with user %s', sharer, filename, sharee) 19 | 20 | oc_api = get_oc_api() 21 | oc_api.login(sharer, config.oc_account_password) 22 | 23 | kwargs.setdefault('remote_user', True) 24 | sharee = "%s@%s" % (sharee, oc_api.url) 25 | 26 | try: 27 | share_info = oc_api.share_file_with_user(filename, sharee, **kwargs) 28 | logger.info('share id for file share is %s', str(share_info.share_id)) 29 | return share_info.share_id 30 | except ResponseError as err: 31 | logger.info('Share failed with %s - %s', str(err), str(err.get_resource_body())) 32 | if err.status_code == 403 or err.status_code == 404: 33 | return -1 34 | else: 35 | return -2 36 | 37 | 38 | def list_open_remote_share(sharee): 39 | """ Accepts a remote share 40 | 41 | :param sharee: user who created the original share 42 | """ 43 | logger.info('Listing remote shares for user %s', sharee) 44 | 45 | oc_api = get_oc_api() 46 | oc_api.login(sharee, config.oc_account_password) 47 | try: 48 | open_remote_shares = oc_api.list_open_remote_share() 49 | except HTTPResponseError as err: 50 | logger.error('Share failed with %s - %s', str(err), str(err.get_resource_body())) 51 | if err.status_code == 403 or err.status_code == 404: 52 | return -1 53 | else: 54 | return -2 55 | 56 | return open_remote_shares 57 | 58 | 59 | def accept_remote_share(sharee, share_id): 60 | """ Accepts a remote share 61 | 62 | :param sharee: user who created the original share 63 | :param share_id: id of the share to be accepted 64 | 65 | """ 66 | logger.info('Accepting share %i for user %s', share_id, sharee) 67 | 68 | oc_api = get_oc_api() 69 | oc_api.login(sharee, config.oc_account_password) 70 | error_check(oc_api.accept_remote_share(share_id), 'Accepting remote share failed') 71 | 72 | 73 | def decline_remote_share(sharee, share_id): 74 | """ Delines a remote share 75 | 76 | :param sharer: user who created the original share 77 | :param share_id: id of the share to be declined 78 | 79 | """ 80 | logger.info('Declining share %i from user %s', share_id, sharee) 81 | 82 | oc_api = get_oc_api() 83 | oc_api.login(sharee, config.oc_account_password) 84 | error_check(oc_api.decline_remote_share(share_id), 'Accepting remote share failed') -------------------------------------------------------------------------------- /protocol/checksum.md: -------------------------------------------------------------------------------- 1 | # Owncloud protocol extension: checksumming 2 | 3 | ## Scope and purpose 4 | 5 | Add checksum capability to verify end-to-end integrity of file uploads and downloads operations. 6 | 7 | NOT in scope: using checksums as ETAG 8 | 9 | ## Enabling transfer checksums in the client 10 | 11 | As of version 1.7.2-cernbox and port to 1.8(.2) the type of the checksum is defined in the main config file as: 12 | 13 | [General] 14 | tranmissionChecksum=Adler32 15 | 16 | BITS NOT YET IMPLEMENTED/UNDER DISCUSSION: [see comments in the source of this file] 17 | 32 | 33 | ## Simple PUT (not-chunked) 34 | 35 | Client computes the checksum and sends it in the request header OC-Checksum. The OC-Checksum is defined as: checkum_type:checksum_value 36 | 37 | Examples: 38 | 39 | PUT /file HTTP/1.1 40 | OC-Checksum: Adler32:xxxxxxxxxxxxxxxxxx 41 | 42 | PUT /file HTTP/1.1 43 | OC-Checksum: MD5:xxxxxxxxxxxxxxxxxx 44 | 45 | 46 | If the checksum does not match the content on the server then the server returns 412 (Precondition Failed). 47 | 48 | Response: 412 49 | 50 | BITS NOT YET IMPLEMENTED/UNDER DISCUSSION: [see comments in the source of this file] 51 | 60 | 61 | ## GET 62 | 63 | Server may provide the OC-Checksum response header with the GET request. If OC-Checksum is provides then client may use it to verify the checksum on the final destination. 64 | 65 | In case of byte-range request the OC-Checksum response header is the checksum of the entire file (like for the GET of the entire file). 66 | 67 | ## Chunked PUT 68 | 69 | OC-Checksum of the entire file content is sent with the last chunk PUT request (and of course should not change during the upload). 70 | 71 | ## Remarks 72 | 73 | The checksumming feature is optional. Client may decide NOT to provide 74 | OC-Checksum header for PUT request and ignore OC-Checksum header 75 | in the GET reponse. If the type of the checksum is not understood or supported by the client or by the server then 76 | the checksum should be ignored. 77 | 78 | Transfers should fail if the checksum type is understood and supported but the checksum value does not match. 79 | 80 | BITS NOT YET IMPLEMENTED/UNDER DISCUSSION: [see comments in the source of this file] 81 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /lib/owncloud/test_moveFilesTwice.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | This test is testing that moving files server multiple do not get uploaded 5 | too much. Even if the files are moved when the sync is running. 6 | [https://github.com/owncloud/client/issues/4370] 7 | 8 | """ 9 | 10 | from smashbox.utilities import * 11 | import subprocess 12 | 13 | nfiles = 20 14 | TEST_FILES = ['test%02d.dat'%i for i in range(nfiles)] 15 | 16 | def getFileId(syncdir, fileName): 17 | return subprocess.check_output(["sqlite3" , syncdir + "/.csync_journal.db", 18 | "select fileid from metadata where path = \"" + fileName + "\""]) 19 | 20 | @add_worker 21 | def workerA(step): 22 | if compare_client_version('2.1.0', '<='): 23 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 24 | return 25 | 26 | #cleanup remote and local test environment - this should be run once by one worker only 27 | reset_owncloud_account() 28 | reset_rundir() 29 | 30 | syncdir = make_workdir("workdir") 31 | d1 = os.path.join(syncdir,"dir1") 32 | d2 = os.path.join(syncdir,"dir2") 33 | d_final = os.path.join(syncdir,"dirFinal") 34 | 35 | step(0,'create initial content and sync') 36 | 37 | 38 | # create a folder and some files in it 39 | mkdir(d1) 40 | 41 | for f in TEST_FILES: 42 | fn = os.path.join(d1,f) 43 | createfile(fn,'0',count=1000,bs=1000) 44 | 45 | run_ocsync(syncdir) 46 | 47 | fileIds = list(map((lambda f:getFileId(syncdir, 'dir1/' + f)), TEST_FILES)) 48 | 49 | step(1,'move the folder') 50 | 51 | mkdir(d2) 52 | mv(d1+"/*",d2) 53 | 54 | step(2, 'sync') 55 | run_ocsync(syncdir) 56 | 57 | step(3,'final sync') 58 | 59 | run_ocsync(syncdir) 60 | 61 | final_fileIds = list(map((lambda f:getFileId(syncdir, 'dirFinal/' + f)), TEST_FILES)) 62 | 63 | #The file ids needs to stay the same for every files, since they only got moved and not re-uploaded 64 | error_check(fileIds == final_fileIds, "File id differ (%s != %s)" % (fileIds, final_fileIds)) 65 | 66 | 67 | @add_worker 68 | def workerB(step): 69 | 70 | if compare_client_version('2.1.0', '<='): 71 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 72 | return 73 | 74 | step(2,'move the folder during the sync') 75 | 76 | syncdir = make_workdir("workdir") 77 | d1 = os.path.join(syncdir,"dir1") 78 | d2 = os.path.join(syncdir,"dir2") 79 | d3 = os.path.join(syncdir,"dir3") 80 | d4 = os.path.join(syncdir,"dir4") 81 | d5 = os.path.join(syncdir,"dir5") 82 | d6 = os.path.join(syncdir,"dir6") 83 | d_final = os.path.join(syncdir,"dirFinal") 84 | 85 | #Do it several time with one second interval to be sure we do it at lease once 86 | # during the propagation phase 87 | sleep(1) 88 | mkdir(d3) 89 | mv(d2+"/*",d3) 90 | 91 | sleep(1) 92 | mkdir(d4) 93 | mv(d3+"/*",d4) 94 | 95 | sleep(1) 96 | mkdir(d5) 97 | mv(d4+"/*",d5) 98 | 99 | sleep(1) 100 | mkdir(d6) 101 | mv(d5+"/*",d6) 102 | 103 | sleep(1) 104 | mkdir(d_final) 105 | mv(d6+"/*",d_final) 106 | 107 | -------------------------------------------------------------------------------- /client/compile-owncloud-sync-client: -------------------------------------------------------------------------------- 1 | # 2 | # The _open_SmashBox Project. 3 | # 4 | # Author: Jakub T. Moscicki, CERN, 2013 5 | # License: AGPL 6 | # 7 | # this script compiles and downloads the owncloud clients automatically 8 | # versions are controlled by variables below 9 | # - src: download sources 10 | # - build: contains build directories 11 | # - patches: applied patches (if any) 12 | 13 | # if a patch is applied the name of a build directory is changed to reflect that fact 14 | 15 | # offical version of the client 16 | # previous verions: 0.80, 1.3 17 | export OCSYNC=ocsync-0.90.2 18 | export MIRALL=mirall-1.4.1 19 | export DOWNLOAD_PATH= 20 | 21 | # latest development preview (with a different download path on the server) 22 | #export OCSYNC=ocsync-0.82.0 23 | #export MIRALL=mirall-1.4.0beta2 24 | #export DOWNLOAD_PATH=testing 25 | 26 | 27 | # this specifies if to apply patches 28 | # $PATCH is also appended to build directory name to distguish patched from vanilla builds 29 | #export PATCH= 30 | export PATCH=ssl-patch 31 | 32 | ############################################################################################# 33 | 34 | # workspace is the directory where this script resides 35 | 36 | # http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in 37 | PREFIX="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 38 | 39 | PREFIX=`readlink -f $PREFIX` # this is to remove any symlinks that may be local (such as /b) 40 | 41 | cat << EOF 42 | ======================================================" 43 | compiling owncloud client 44 | ------------------------- 45 | PREFIX=$PREFIX 46 | PATCH=$PATCH 47 | OCSYNC=$OCSYNC 48 | MIRALL=$MIRALL 49 | ------------------------- 50 | EOF 51 | 52 | cd $PREFIX 53 | 54 | # download sources if needed 55 | 56 | mkdir -p src 57 | 58 | cd src 59 | 60 | test -f ${OCSYNC}.tar.bz2 || wget http://download.owncloud.com/download/${DOWNLOAD_PATH}/${OCSYNC}.tar.bz2 61 | test -f ${MIRALL}.tar.bz2 || wget http://download.owncloud.com/download/${DOWNLOAD_PATH}/${MIRALL}.tar.bz2 62 | 63 | test -d ${OCSYNC} || tar fvxj ${OCSYNC}.tar.bz2 64 | test -d ${MIRALL} || tar fvxj ${MIRALL}.tar.bz2 65 | 66 | cd .. 67 | 68 | # patch sources if needed 69 | 70 | if [ $PATCH ]; then 71 | echo Patching: "$PATCH" 72 | cd src 73 | 74 | # adjust source directory names 75 | 76 | rm -rf ${OCSYNC}-${PATCH} 77 | rm -rf ${MIRALL}-${PATCH} 78 | mv ${OCSYNC} ${OCSYNC}-${PATCH} 79 | mv ${MIRALL} ${MIRALL}-${PATCH} 80 | 81 | # adjust build directory names 82 | 83 | export OCSYNC=${OCSYNC}-${PATCH} 84 | export MIRALL=${MIRALL}-${PATCH} 85 | cd .. 86 | 87 | # patch sources 88 | 89 | patch --backup ${PREFIX}/src/${OCSYNC}/modules/csync_owncloud.c patches/ocsync-ssl.patch 90 | fi 91 | 92 | # BUILD 93 | 94 | mkdir -p build 95 | cd build 96 | 97 | rm -rf build-${OCSYNC} 98 | mkdir build-${OCSYNC} 99 | cd build-${OCSYNC} 100 | cmake -DCMAKE_BUILD_TYPE=Debug ${PREFIX}/src/${OCSYNC} 101 | make 102 | 103 | mkdir include 104 | cp csync_version.h include 105 | cp ${PREFIX}/src/${OCSYNC}/src/*.h include 106 | cd .. 107 | 108 | rm -rf build-${MIRALL} 109 | mkdir build-${MIRALL} 110 | cd build-${MIRALL} 111 | cmake -DCMAKE_BUILD_TYPE="Debug" ${PREFIX}/src/${MIRALL} -DCSYNC_BUILD_PATH=${PREFIX}/build/build-${OCSYNC} -DCSYNC_INCLUDE_PATH=${PREFIX}/build/build-${OCSYNC}/include/ 112 | make 113 | 114 | -------------------------------------------------------------------------------- /protocol/test_protocol_doc.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import * 3 | from smashbox.protocol import * 4 | 5 | import smashbox.utilities.reflection 6 | import smashbox.curl 7 | 8 | import re 9 | 10 | """ Test docstrings in protocol.md 11 | """ 12 | 13 | @add_worker 14 | def main(step): 15 | 16 | d = make_workdir() 17 | reset_owncloud_account() 18 | 19 | # grab the protocol.md file sitting in the same directory as this source file 20 | docfile = os.path.join(os.path.dirname(smashbox.utilities.reflection.getTestcaseFilename()),'protocol.md') 21 | 22 | blocks = parse_docfile(docfile) 23 | 24 | block_ok=0 25 | 26 | for (block_check, block_header, block_body, lineno) in blocks: 27 | logger.info('checking doc block: %s %s line %d: %s',block_check.__name__,block_header,lineno,block_body) 28 | try: 29 | block_check(block_header,block_body) 30 | block_ok += 1 31 | except Exception,x: 32 | logger.error('while checking %s %s line %d: %s %s',block_check.__name__,block_header,lineno,block_body,x) 33 | 34 | logger.info('checked %d ok out of %d',block_ok,len(blocks)) 35 | 36 | def check_propfind_request(header,body): 37 | # as a bare minimum check if XML syntax is correct 38 | 39 | from xml.etree import ElementTree 40 | ElementTree.fromstring(body) 41 | 42 | def check_propfind_response(header,body): 43 | smashbox.curl._parse_propfind_response(body) 44 | 45 | def parse_docfile(docfile): 46 | 47 | propfind_request = re.compile("(?P\s+)> PROPFIND") 48 | propfind_response = re.compile("(?P\s+)< PROPFIND") 49 | 50 | blocks=[] 51 | 52 | block_re=None 53 | 54 | block_indent=0 55 | block_check=None 56 | block_header=[] 57 | block_body='' 58 | in_header=False 59 | block_lineno=0 60 | 61 | lineno=0 62 | 63 | def DEBUG(*x): 64 | print 'DEBUG:',lineno,x 65 | 66 | for line in file(docfile): 67 | 68 | lineno+=1 69 | 70 | r1 = propfind_request.match(line) 71 | r2 = propfind_response.match(line) 72 | 73 | if r1 or r2: 74 | if r1: 75 | block_check = check_propfind_request 76 | r=r1 77 | else: 78 | block_check = check_propfind_response 79 | r=r2 80 | block_indent=len(r.group('indent')) 81 | block_re=re.compile(r.group('indent')+"(?P.*)") 82 | in_header=True 83 | block_lineno=lineno 84 | DEBUG('start_of_block',block_check,block_indent) 85 | continue 86 | 87 | if block_re: 88 | r = block_re.match(line) 89 | 90 | #print r,line 91 | if r: 92 | text=r.group('text') 93 | 94 | if in_header: 95 | if text == '': 96 | in_header=False 97 | DEBUG('end_of_header') 98 | continue 99 | block_header.append(text) 100 | DEBUG('block_header',line) 101 | else: 102 | block_body += text+'\n' 103 | DEBUG('block_body',line) 104 | else: 105 | blocks.append((block_check,block_header,block_body,block_lineno)) 106 | block_check=None 107 | block_header=[] 108 | block_body='' 109 | block_re=None 110 | block_indent=None 111 | DEBUG('end_of_block') 112 | 113 | 114 | return blocks 115 | -------------------------------------------------------------------------------- /lib/test_localDirRenameRecreate.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | 3 | __doc__ = """ This test renames a directory A->B, creates an empty directory A and syncs. The move should be correctly propagated on the server. 4 | 5 | Optionally the files are also moved back. 6 | 7 | This pattern could happen in a situation when a sync client if switched off for some time. 8 | 9 | This follows from discussion in: https://github.com/owncloud/client/issues/3324 10 | 11 | TODO: a similar test for server side move. 12 | 13 | """ 14 | 15 | testsets = [ {'localDirRenameRecreate_DIRA':'DIRA', 16 | 'localDirRenameRecreate_DIRB':'DIRB', 17 | 'localDirRenameRecreate_moveFilesBack':False}, 18 | 19 | {'localDirRenameRecreate_DIRA':'DIRA', 20 | 'localDirRenameRecreate_DIRB':'DIRB', 21 | 'localDirRenameRecreate_moveFilesBack':True} 22 | ] 23 | 24 | import os.path 25 | 26 | DIRA = os.path.normpath(config.get('dirMove_DIRA','DIRA')) 27 | DIRB = os.path.normpath(config.get('dirMove_DIRB','DIRB')) 28 | 29 | nfiles = int(config.get('localDirRenameRecreate_nfiles',10)) 30 | moveFilesBack = bool(config.get('localDirRenameRecreate_moveFilesBack',False)) 31 | 32 | TEST_FILES = ['test%02d.dat'%i for i in range(nfiles)] 33 | 34 | def check_files_exist(files,d): 35 | for fn in files: 36 | f = os.path.join(d,fn) 37 | logger.info("checking %s",f) 38 | error_check(os.path.isfile(f), "path %s should be a file"%f) 39 | 40 | @add_worker 41 | def workerA(step): 42 | 43 | #cleanup remote and local test environment - this should be run once by one worker only 44 | reset_owncloud_account() 45 | reset_rundir() 46 | 47 | step(0,'create initial content and sync') 48 | 49 | # this will be our syncdir (this is different for every worker) 50 | syncdir = make_workdir() 51 | 52 | # create a folder and some files in it 53 | d1 = mkdir(os.path.join(syncdir,DIRA)) 54 | 55 | for f in TEST_FILES: 56 | fn = os.path.join(d1,f) 57 | createfile(fn,'0',count=1000,bs=1000) 58 | 59 | run_ocsync(syncdir) 60 | 61 | step(2,'move the files in the folder and sync') 62 | 63 | d2 = os.path.join(syncdir,DIRB) 64 | 65 | mv(d1,d2) 66 | 67 | # recreate empty directory with the same name 68 | mkdir(d1) 69 | 70 | # optionally moves files back 71 | if moveFilesBack: 72 | for f in TEST_FILES: 73 | fn = os.path.join(d2,f) 74 | mv(fn,d1) 75 | 76 | run_ocsync(syncdir) 77 | 78 | # check after runing our sync (workerA) 79 | check_final_state(syncdir) 80 | 81 | step(4,'check if nothing changed after running other sync (workerB)') 82 | 83 | check_final_state(syncdir) 84 | 85 | 86 | @add_worker 87 | def workerB(step): 88 | 89 | step(1,'sync the initial content') 90 | 91 | syncdir = make_workdir() 92 | run_ocsync(syncdir) 93 | 94 | step(3,'sync again to check if the change is there') 95 | run_ocsync(syncdir) 96 | 97 | check_final_state(syncdir) 98 | 99 | 100 | def check_final_state(syncdir): 101 | 102 | # we expect to find DIRB and all test files in it 103 | # we expect DIRA is deleted 104 | 105 | d1 = os.path.join(syncdir,DIRA) 106 | d2 = os.path.join(syncdir,DIRB) 107 | 108 | logger.info('checking %s',d1) 109 | error_check(os.path.isdir(d1), "path %s should be a directory"%d1) 110 | 111 | logger.info('checking %s',d2) 112 | error_check(os.path.isdir(d2), "path %s should be a directory"%d2) 113 | 114 | if moveFilesBack: 115 | check_files_exist(TEST_FILES,d1) 116 | else: 117 | check_files_exist(TEST_FILES,d2) 118 | 119 | 120 | 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /lib/test_nplusone.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ Add nfiles to a directory and check consistency. 7 | """ 8 | 9 | from smashbox.utilities import * 10 | from smashbox.utilities.hash_files import * 11 | from smashbox.utilities.monitoring import commit_to_monitoring 12 | 13 | nfiles = int(config.get('nplusone_nfiles',10)) 14 | filesize = config.get('nplusone_filesize',1000) 15 | 16 | if type(filesize) is type(''): 17 | filesize = eval(filesize) 18 | 19 | # True => use new webdav endpoint (dav/files) 20 | # False => use old webdav endpoint (webdav) 21 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 22 | 23 | testsets = [ 24 | { 'nplusone_filesize': OWNCLOUD_CHUNK_SIZE(0.1), 25 | 'nplusone_nfiles':100, 26 | 'use_new_dav_endpoint':True 27 | }, 28 | { 'nplusone_filesize': OWNCLOUD_CHUNK_SIZE(6), 29 | 'nplusone_nfiles':1, 30 | 'use_new_dav_endpoint':True 31 | }, 32 | { 'nplusone_filesize': (3.5,1.37), # standard file distribution: 10^(3.5) Bytes 33 | 'nplusone_nfiles':10, 34 | 'use_new_dav_endpoint':True 35 | }, 36 | { 'nplusone_filesize': (3.5,1.37), # standard file distribution: 10^(3.5) Bytes 37 | 'nplusone_nfiles':10, 38 | 'use_new_dav_endpoint':False 39 | }, 40 | ] 41 | 42 | def finish_if_not_capable(): 43 | # Finish the test if some of the prerequisites for this test are not satisfied 44 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 45 | #Dont test for <= 9.1 with new endpoint, since it is not supported 46 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 47 | return True 48 | return False 49 | 50 | @add_worker 51 | def worker0(step): 52 | if finish_if_not_capable(): 53 | return 54 | 55 | # do not cleanup server files from previous run 56 | reset_owncloud_account() 57 | 58 | # cleanup all local files for the test 59 | reset_rundir() 60 | 61 | step(1,'Preparation') 62 | d = make_workdir() 63 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 64 | k0 = count_files(d) 65 | 66 | step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles) 67 | 68 | total_size=0 69 | sizes=[] 70 | 71 | # compute the file sizes in the set 72 | for i in range(nfiles): 73 | size=size2nbytes(filesize) 74 | sizes.append(size) 75 | total_size+=size 76 | 77 | logger.log(35,"Timestamp %f Files %d TotalSize %d",time.time(),nfiles,total_size) 78 | 79 | # create the test files 80 | for size in sizes: 81 | create_hashfile(d,size=size) 82 | 83 | time0=time.time() 84 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 85 | time1=time.time() 86 | 87 | ncorrupt = analyse_hashfiles(d)[2] 88 | 89 | k1 = count_files(d) 90 | 91 | error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 92 | fatal_check(ncorrupt==0, 'Corrupted files (%s) found'%ncorrupt) 93 | 94 | logger.info('SUCCESS: %d files found',k1) 95 | 96 | step(4, "Final report") 97 | commit_to_monitoring("upload_duration",time1-time0) 98 | 99 | @add_worker 100 | def worker1(step): 101 | if finish_if_not_capable(): 102 | return 103 | 104 | step(1,'Preparation') 105 | d = make_workdir() 106 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 107 | k0 = count_files(d) 108 | 109 | step(3,'Resync and check files added by worker0') 110 | 111 | time0=time.time() 112 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 113 | time1=time.time() 114 | 115 | ncorrupt = analyse_hashfiles(d)[2] 116 | k1 = count_files(d) 117 | 118 | error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 119 | 120 | fatal_check(ncorrupt==0, 'Corrupted files (%d) found'%ncorrupt) #Massimo 12-APR 121 | 122 | step(4,"Final report") 123 | commit_to_monitoring("download_duration",time1-time0) 124 | 125 | 126 | 127 | -------------------------------------------------------------------------------- /lib/test_concurrentDirMove.py: -------------------------------------------------------------------------------- 1 | __doc__ = """ 2 | 3 | This test moves concurrently a directory ('mover' worker) while 4 | files are added to it ('adder' worker) . The expected outcome is that 5 | all added files are kept on the server and are found in the final directory. 6 | 7 | """ 8 | 9 | 10 | 11 | nfiles = int(config.get('concurrentMoveDir_nfiles',100)) 12 | filesize = int(config.get('concurrentMoveDir_filesize',10)) 13 | delaySeconds = int(config.get('concurrentMoveDir_delaySeconds',3)) # if delaySeconds > 0 then remover waits; else the adder waits; 14 | 15 | from smashbox.utilities import * 16 | 17 | testsets = [ 18 | {'concurrentMoveDir_nfiles':100, 19 | 'concurrentMoveDir_filesize':10, 20 | 'concurrentMoveDir_delaySeconds':10 }, # removing the directory while lots of tiny files are uploaded 21 | 22 | {'concurrentMoveDir_nfiles':3, 23 | 'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(1.1), 24 | 'concurrentMoveDir_delaySeconds':5 }, # removing the directory while a large file is chunk-uploaded 25 | 26 | {'concurrentMoveDir_nfiles':20, 27 | 'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(0.9), 28 | 'concurrentMoveDir_delaySeconds':10 }, # removing the directory more but smaller files are uploaded 29 | 30 | {'concurrentMoveDir_nfiles':5, 31 | 'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(0.1), 32 | 'concurrentMoveDir_delaySeconds':-5 }, # removing the directory before files are uploaded 33 | 34 | {'concurrentMoveDir_nfiles':5, 35 | 'concurrentMoveDir_filesize':OWNCLOUD_CHUNK_SIZE(2.1), 36 | 'concurrentMoveDir_delaySeconds':-10 } # removing the directory before laarge files are chunk-uploaded 37 | 38 | ] 39 | 40 | import time 41 | import tempfile 42 | 43 | 44 | from smashbox.utilities.hash_files import * 45 | 46 | @add_worker 47 | def creator(step): 48 | reset_owncloud_account() 49 | reset_rundir() 50 | 51 | step(1,'upload empty subdirectory') 52 | d = make_workdir() 53 | d2 = os.path.join(d,'subdir') 54 | mkdir(d2) 55 | run_ocsync(d) 56 | 57 | step(5,'final check') 58 | run_ocsync(d) 59 | final_check(d) 60 | 61 | 62 | @add_worker 63 | def adder(step): 64 | 65 | step(2,'sync the empty directory created by the creator') 66 | d = make_workdir() 67 | run_ocsync(d) 68 | 69 | step(3,'locally create content in the subdirectory') 70 | d2 = os.path.join(d,'subdir') 71 | 72 | for i in range(nfiles): 73 | create_hashfile(d2, size=filesize) #createfile_zero(os.path.join(d2,"test.%02d"%i),count=filesize, bs=1000) 74 | 75 | step(4,'sync the added files in parallel') 76 | if delaySeconds<0: 77 | sleep(-delaySeconds) 78 | run_ocsync(d,n=2) 79 | 80 | step(5,'final check') 81 | run_ocsync(d) 82 | final_check(d) 83 | 84 | 85 | @add_worker 86 | def mover(step): 87 | step(2,'sync the empty directory created by the creator') 88 | d = make_workdir() 89 | run_ocsync(d) 90 | 91 | step(3,'locally rename subdir to subdir2') 92 | s1 = os.path.join(d,'subdir') 93 | s2 = os.path.join(d,'subdir2') 94 | os.rename(s1,s2) 95 | 96 | step(4,'sync the subdir2 in parallel') 97 | if delaySeconds>0: 98 | sleep(delaySeconds) 99 | run_ocsync(d) 100 | 101 | step(5,'final check') 102 | run_ocsync(d) 103 | final_check(d) 104 | 105 | @add_worker 106 | def checker(step): 107 | 108 | step(5,'sync the final state of the repository into a fresh local folder') 109 | d = make_workdir() 110 | run_ocsync(d) 111 | 112 | final_check(d) 113 | 114 | 115 | def final_check(d): 116 | 117 | list_files(d,recursive=True) 118 | 119 | d2 = os.path.join(d,'subdir2') 120 | 121 | logger.info('final output: %s',d2) 122 | 123 | all_files,analysed_files,bad_files = analyse_hashfiles(d2) 124 | 125 | error_check(bad_files == 0,'%s corrupted files in %s'%(bad_files,d2)) 126 | error_check(analysed_files == nfiles,"not all files are present (%d/%d)"%(nfiles,analysed_files)) # FIXME: well, there may be other files - we don't check that yet 127 | 128 | 129 | #runcmd('find %s'%d) 130 | 131 | #log('content of /subdir as reported by webdav') 132 | #list_webdav_propfind('subdir') 133 | 134 | 135 | -------------------------------------------------------------------------------- /lib/owncloud/test_chunking.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ 7 | Upload a small file "small.dat" (10 kB) 8 | Upload a big file "big.dat" (50 MB) 9 | Overwrite big with small file, keeping the target name 10 | Overwrite small with big file, keeping the target name 11 | """ 12 | 13 | from smashbox.utilities import * 14 | from smashbox.utilities.hash_files import * 15 | 16 | small_file_size = 10 # KB 17 | big_file_size = 50000 # KB 18 | zero_file_size = 0 # KB 19 | 20 | # True => use new webdav endpoint (dav/files) 21 | # False => use old webdav endpoint (webdav) 22 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 23 | 24 | testsets = [ 25 | { 26 | 'use_new_dav_endpoint':True 27 | }, 28 | { 29 | 'use_new_dav_endpoint':False 30 | }, 31 | ] 32 | 33 | def expect_content(fn,md5): 34 | actual_md5 = md5sum(fn) 35 | error_check(actual_md5 == md5, "inconsistent md5 of %s: expected %s, got %s"%(fn,md5,actual_md5)) 36 | 37 | def finish_if_not_capable(): 38 | # Finish the test if some of the prerequisites for this test are not satisfied 39 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 40 | #Dont test for <= 9.1 with new endpoint, since it is not supported 41 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 42 | return True 43 | return False 44 | 45 | @add_worker 46 | def worker0(step): 47 | if finish_if_not_capable(): 48 | return 49 | 50 | # do not cleanup server files from previous run 51 | reset_owncloud_account() 52 | 53 | # cleanup all local files for the test 54 | reset_rundir() 55 | 56 | step(1,'Preparation') 57 | shared = reflection.getSharedObject() 58 | d = make_workdir() 59 | run_ocsync(d) 60 | 61 | step(2,'Create and sync test files') 62 | 63 | createfile(os.path.join(d,'TEST_SMALL_TO_BIG.dat'),'0',count=1000,bs=small_file_size) 64 | createfile(os.path.join(d,'TEST_BIG_TO_SMALL.dat'),'0',count=1000,bs=big_file_size) 65 | #createfile(os.path.join(d,'TEST_ZERO_TO_BIG.dat'),'0',count=1000,bs=filesizeKB) 66 | #createfile(os.path.join(d,'TEST_FILE_MODIFIED_BOTH.dat'),'0',count=1000,bs=filesizeKB) 67 | 68 | shared['TEST_SMALL_TO_BIG'] = md5sum(os.path.join(d,'TEST_SMALL_TO_BIG.dat')) 69 | shared['TEST_BIG_TO_SMALL'] = md5sum(os.path.join(d,'TEST_BIG_TO_SMALL.dat')) 70 | logger.info('TEST_SMALL_TO_BIG: %s',shared['TEST_SMALL_TO_BIG']) 71 | logger.info('TEST_BIG_TO_SMALL: %s',shared['TEST_BIG_TO_SMALL']) 72 | 73 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 74 | 75 | step(5,'Sync down and check if correct') 76 | 77 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 78 | expect_content(os.path.join(d,'TEST_SMALL_TO_BIG.dat'), shared['TEST_SMALL_TO_BIG']) 79 | expect_content(os.path.join(d,'TEST_BIG_TO_SMALL.dat'), shared['TEST_BIG_TO_SMALL']) 80 | 81 | 82 | @add_worker 83 | def worker1(step): 84 | if finish_if_not_capable(): 85 | return 86 | 87 | step(3,'Preparation') 88 | shared = reflection.getSharedObject() 89 | d = make_workdir() 90 | run_ocsync(d) 91 | 92 | expect_content(os.path.join(d,'TEST_SMALL_TO_BIG.dat'), shared['TEST_SMALL_TO_BIG']) 93 | expect_content(os.path.join(d,'TEST_BIG_TO_SMALL.dat'), shared['TEST_BIG_TO_SMALL']) 94 | 95 | step(4,'Ovverwrite files') 96 | 97 | createfile(os.path.join(d,'TEST_SMALL_TO_BIG.dat'),'1',count=1000,bs=big_file_size) 98 | createfile(os.path.join(d,'TEST_BIG_TO_SMALL.dat'),'1',count=1000,bs=small_file_size) 99 | shared['TEST_SMALL_TO_BIG'] = md5sum(os.path.join(d,'TEST_SMALL_TO_BIG.dat')) 100 | shared['TEST_BIG_TO_SMALL'] = md5sum(os.path.join(d,'TEST_BIG_TO_SMALL.dat')) 101 | logger.info('TEST_SMALL_TO_BIG: %s',shared['TEST_SMALL_TO_BIG']) 102 | logger.info('TEST_BIG_TO_SMALL: %s',shared['TEST_BIG_TO_SMALL']) 103 | 104 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 105 | 106 | step(5,'Check if correct') 107 | expect_content(os.path.join(d,'TEST_SMALL_TO_BIG.dat'), shared['TEST_SMALL_TO_BIG']) 108 | expect_content(os.path.join(d,'TEST_BIG_TO_SMALL.dat'), shared['TEST_BIG_TO_SMALL']) 109 | 110 | -------------------------------------------------------------------------------- /bin/config_gen: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys, os.path 4 | # insert the path to cernafs based on the relative position of this scrip inside the service directory tree 5 | exeDir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0]))) 6 | pythonDir = os.path.join(os.path.dirname(exeDir), 'python' ) 7 | sys.path.insert(0, pythonDir) 8 | etcDir = os.path.join(os.path.dirname(exeDir), 'etc') 9 | defaultTemplateFile = os.path.join(etcDir, 'smashbox.conf.template-owncloud') 10 | defaultOutputFile = os.path.join(etcDir, 'smashbox.conf') 11 | 12 | import smashbox.configgen.generator as generator 13 | import smashbox.configgen.processors as processors 14 | from smashbox.configgen.processors_hooks import LoggingHook 15 | import logging 16 | import argparse 17 | import json 18 | 19 | parser = argparse.ArgumentParser(description='Config generator for smashbox') 20 | parser.add_argument('-i', default=defaultTemplateFile, help='template file to be used', dest='input_file') 21 | parser.add_argument('-o', default=defaultOutputFile, help='output file', dest='output_file') 22 | group = parser.add_mutually_exclusive_group() 23 | group.add_argument('--no-ask', default=None, action='store_false', help='don\'t ask for required keys', dest='ask_keys') 24 | group.add_argument('--ask', default=None, action='store_true', help='ask for required keys', dest='ask_keys') 25 | parser.add_argument('-k', default=[], action='append', required=False, help='key=value pairs', dest='keys') 26 | parser.add_argument('-kt', default=[], action='append', required=False, help='key=type pairs', dest='key_types') 27 | parser.add_argument('--key-value-file', help='json file containing key-value pairs. The file format should something like {keyname: {value: value, type: type}, oc_server: {value: server.com, type: string}, oc_ssl_enable: {value: True, type: bool}}') 28 | parser.add_argument('--logfile', help='write logs in this file') 29 | args = parser.parse_args() 30 | 31 | global_vars = {} 32 | local_vars = {} 33 | with open(args.input_file) as ifile: 34 | code = compile(ifile.read(), args.input_file, 'exec') 35 | exec(code, global_vars, local_vars) 36 | 37 | overwrite_dict = {} 38 | 39 | if args.key_value_file: 40 | with open(args.key_value_file, 'r') as f: 41 | data = json.load(f) 42 | if type(data) is dict: 43 | for data_element in data: 44 | key = data_element 45 | value = str(data[data_element]['value']) 46 | if 'type' in data[data_element]: 47 | value = processors.convert_string_to_type(value, data[data_element]['type']) 48 | overwrite_dict[key] = value 49 | 50 | # convert the keys argument to a dictionary 51 | key_list = [item.split('=', 1) for item in args.keys] 52 | key_dict = dict(key_list) 53 | 54 | # convert the key_types to [[key, type],[key, type]] and change the type of the values 55 | key_type_list = [item.split('=', 1) for item in args.key_types] 56 | for keytype in key_type_list: 57 | if keytype[0] in key_dict: 58 | key_dict[keytype[0]] = processors.convert_string_to_type(key_dict[keytype[0]], keytype[1]) 59 | overwrite_dict.update(key_dict) 60 | 61 | config_generator = generator.Generator() 62 | config_generator.set_processors_from_data(local_vars['_configgen']) 63 | 64 | if args.ask_keys is not None: 65 | processor = config_generator.get_processor_by_name('RequiredKeysProcessor') 66 | if processor is not None: 67 | processor.set_ask(args.ask_keys) 68 | 69 | if overwrite_dict: 70 | # we need to overwrite keys 71 | processor2 = config_generator.get_processor_by_name('OverwritterProcessor') 72 | if processor2 is not None: 73 | processor2.set_dict_to_merge(overwrite_dict) 74 | 75 | # setup logging for each processor 76 | if args.logfile: 77 | logging.basicConfig(level=logging.NOTSET, format='%(asctime)-15s %(levelname)s %(name)s : %(message)s', filename=args.logfile) 78 | for p in config_generator.get_processor_list(): 79 | processor_name = p.get_name() 80 | logger = logging.getLogger('%s.%s' % (__name__, processor_name)) 81 | p.register_observer('logger', LoggingHook(logger, logging.INFO)) 82 | 83 | logging.getLogger(__name__).info('ready to start the generation') 84 | 85 | # generate the config file 86 | config_generator.process_data_to_file(local_vars, args.output_file) 87 | 88 | -------------------------------------------------------------------------------- /python/smashbox/utilities/structures.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # wget https://raw.githubusercontent.com/kennethreitz/requests/v1.2.3/requests/structures.py 4 | 5 | """ 6 | requests.structures 7 | ~~~~~~~~~~~~~~~~~~~ 8 | 9 | Data structures that power Requests. 10 | 11 | """ 12 | 13 | import os 14 | import collections 15 | from itertools import islice 16 | 17 | 18 | class IteratorProxy(object): 19 | """docstring for IteratorProxy""" 20 | def __init__(self, i): 21 | self.i = i 22 | # self.i = chain.from_iterable(i) 23 | 24 | def __iter__(self): 25 | return self.i 26 | 27 | def __len__(self): 28 | if hasattr(self.i, '__len__'): 29 | return len(self.i) 30 | if hasattr(self.i, 'len'): 31 | return self.i.len 32 | if hasattr(self.i, 'fileno'): 33 | return os.fstat(self.i.fileno()).st_size 34 | 35 | def read(self, n): 36 | return "".join(islice(self.i, None, n)) 37 | 38 | 39 | class CaseInsensitiveDict(collections.MutableMapping): 40 | """ 41 | A case-insensitive ``dict``-like object. 42 | 43 | Implements all methods and operations of 44 | ``collections.MutableMapping`` as well as dict's ``copy``. Also 45 | provides ``lower_items``. 46 | 47 | All keys are expected to be strings. The structure remembers the 48 | case of the last key to be set, and ``iter(instance)``, 49 | ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` 50 | will contain case-sensitive keys. However, querying and contains 51 | testing is case insensitive: 52 | 53 | cid = CaseInsensitiveDict() 54 | cid['Accept'] = 'application/json' 55 | cid['aCCEPT'] == 'application/json' # True 56 | list(cid) == ['Accept'] # True 57 | 58 | For example, ``headers['content-encoding']`` will return the 59 | value of a ``'Content-Encoding'`` response header, regardless 60 | of how the header name was originally stored. 61 | 62 | If the constructor, ``.update``, or equality comparison 63 | operations are given keys that have equal ``.lower()``s, the 64 | behavior is undefined. 65 | 66 | """ 67 | def __init__(self, data=None, **kwargs): 68 | self._store = dict() 69 | if data is None: 70 | data = {} 71 | self.update(data, **kwargs) 72 | 73 | def __setitem__(self, key, value): 74 | # Use the lowercased key for lookups, but store the actual 75 | # key alongside the value. 76 | self._store[key.lower()] = (key, value) 77 | 78 | def __getitem__(self, key): 79 | return self._store[key.lower()][1] 80 | 81 | def __delitem__(self, key): 82 | del self._store[key.lower()] 83 | 84 | def __iter__(self): 85 | return (casedkey for casedkey, mappedvalue in self._store.values()) 86 | 87 | def __len__(self): 88 | return len(self._store) 89 | 90 | def lower_items(self): 91 | """Like iteritems(), but with all lowercase keys.""" 92 | return ( 93 | (lowerkey, keyval[1]) 94 | for (lowerkey, keyval) 95 | in self._store.items() 96 | ) 97 | 98 | def __eq__(self, other): 99 | if isinstance(other, collections.Mapping): 100 | other = CaseInsensitiveDict(other) 101 | else: 102 | return NotImplemented 103 | # Compare insensitively 104 | return dict(self.lower_items()) == dict(other.lower_items()) 105 | 106 | # Copy is required 107 | def copy(self): 108 | return CaseInsensitiveDict(self._store.values()) 109 | 110 | def __repr__(self): 111 | return '%s(%r)' % (self.__class__.__name__, dict(self.items())) 112 | 113 | 114 | class LookupDict(dict): 115 | """Dictionary lookup object.""" 116 | 117 | def __init__(self, name=None): 118 | self.name = name 119 | super(LookupDict, self).__init__() 120 | 121 | def __repr__(self): 122 | return '' % (self.name) 123 | 124 | def __getitem__(self, key): 125 | # We allow fall-through here, so values default to None 126 | 127 | return self.__dict__.get(key, None) 128 | 129 | def get(self, key, default=None): 130 | return self.__dict__.get(key, default) 131 | -------------------------------------------------------------------------------- /corruption_test/md5blocks: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import hashlib 4 | 5 | def md5sum(filename,BLOCK_SIZE,BLOCK_LIMIT,BLOCK_OFFSET): 6 | blocks=[] 7 | cnt=0 8 | with open(filename, mode='rb') as f: 9 | d = hashlib.md5() 10 | while True: 11 | buf = f.read(BLOCK_SIZE) # 128 is smaller than the typical filesystem block 12 | if not buf: 13 | break 14 | #print 15 | #print cnt 16 | #print buf 17 | 18 | if BLOCK_OFFSET>0: 19 | BLOCK_OFFSET-=1 20 | continue 21 | 22 | dd = hashlib.md5() 23 | dd.update(buf) 24 | blocks.append(dd.hexdigest()) 25 | d.update(buf) 26 | cnt+=1 27 | if BLOCK_LIMIT and cnt>=BLOCK_LIMIT: 28 | break 29 | return (d.hexdigest(),blocks) 30 | 31 | import sys 32 | import argparse 33 | 34 | def main(): 35 | parser = argparse.ArgumentParser() 36 | 37 | parser.add_argument('--block-size', '-b', action='store', dest="BLOCK_SIZE", type=int, default=16384, help='block size') 38 | parser.add_argument('--block-limit', '-n', action='store', dest="BLOCK_LIMIT", type=int, default=0, help='block limit') 39 | parser.add_argument('--block-offset', '-s', action='store', dest="BLOCK_OFFSET", type=int, default=0, help='block limit') 40 | parser.add_argument('--check', action='store_true', dest="check", default=False, help='check if block 0 repeats somewhere else') 41 | parser.add_argument('--scan-offsets', action='store_true', dest="scan_offsets", default=False, help='run the --check for all offsets which correspond to chunk uploads: i x 10*1024*1024') 42 | parser.add_argument('file1', help='file1') 43 | parser.add_argument('file2', nargs='?', default="", help='file2') 44 | 45 | args = parser.parse_args() 46 | 47 | if args.check: 48 | import os 49 | 50 | CHUNK_SIZE = 10*1024*1024 # bytes, this is the minimal chunk size used in the analyzed population of files (the other was 20*MB) 51 | SKIP_BLOCKS = CHUNK_SIZE/args.BLOCK_SIZE # the number of BLOCK_SIZE blocks contained in one CHUNK_SIZE, these two number should divide without a remainder 52 | 53 | if args.scan_offsets: 54 | N_OFFSETS = os.path.getsize(args.file1)/CHUNK_SIZE + 1 55 | else: 56 | N_OFFSETS = 1 57 | 58 | for i in range(N_OFFSETS): 59 | offset = i*SKIP_BLOCKS + args.BLOCK_OFFSET 60 | 61 | m,blocks = md5sum(args.file1,args.BLOCK_SIZE,args.BLOCK_LIMIT,offset) 62 | 63 | b0s = [i for i,b in enumerate(blocks) if b == blocks[0]] 64 | 65 | if len(b0s) > 1: 66 | print args.file1,': ERROR block at %d repeated %d times (%s)'%(offset*args.BLOCK_SIZE, len(b0s)-1, b0s) 67 | sys.exit(2) 68 | 69 | print args.file1 70 | 71 | sys.exit(0) 72 | 73 | m1,blocks1 = md5sum(args.file1,args.BLOCK_SIZE,args.BLOCK_LIMIT,args.BLOCK_OFFSET) 74 | if args.file2: 75 | m2,blocks2 = md5sum(args.file2,args.BLOCK_SIZE,args.BLOCK_LIMIT,args.BLOCK_OFFSET) 76 | 77 | print "File block analyzer" 78 | for a in sorted(vars(args).keys()): 79 | print a,vars(args)[a] 80 | 81 | print 82 | 83 | diff1 = [] 84 | diff2 = [] 85 | 86 | def print_header(): 87 | 88 | print " offset i file1", 89 | if args.file2: 90 | print " file2 mod j" 91 | else: 92 | print 93 | 94 | print_header() 95 | for i in range(len(blocks1)): 96 | b1 = blocks1[i] 97 | if args.file2: 98 | b2 = blocks2[i] 99 | else: 100 | b2 = '' 101 | 102 | print "%10d %3d %s %s"%(args.BLOCK_SIZE*(i+args.BLOCK_OFFSET),args.BLOCK_OFFSET+i,b1,b2), 103 | if b1 != b2: 104 | print '***', 105 | diff1 = b1 106 | diff2 = b2 107 | repeats=[] 108 | rcnt=0 109 | for j,bb in enumerate(blocks1): 110 | if bb == b2: 111 | if rcnt<5: # limit the number of repeats to 5 112 | repeats.append(j+args.BLOCK_OFFSET) 113 | if rcnt==5: 114 | repeats.append('...') 115 | rcnt+=1 116 | print " ".join([str(v) for v in repeats]) 117 | else: 118 | print 119 | 120 | mod="" 121 | 122 | if args.file2: 123 | if m1!=m2: 124 | mod="***" 125 | assert(diff1 or diff2) 126 | else: 127 | m2='' 128 | 129 | print_header() 130 | 131 | print 132 | print " total --- %s %s %s"%(m1,m2,mod) 133 | 134 | 135 | main() 136 | -------------------------------------------------------------------------------- /python/smashbox/utilities/monitoring.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import reflection, config, os 2 | import smashbox.utilities 3 | 4 | def push_to_local_monitor(metric, value): 5 | print metric, value 6 | 7 | def commit_to_monitoring(metric,value,timestamp=None): 8 | shared = reflection.getSharedObject() 9 | if not 'monitoring_points' in shared.keys(): 10 | shared['monitoring_points'] = [] 11 | 12 | # Create monitoring metric point 13 | monitoring_point = dict() 14 | monitoring_point['metric'] = metric 15 | monitoring_point['value'] = value 16 | monitoring_point['timestamp'] = timestamp 17 | 18 | # Append metric to shared object 19 | monitoring_points = shared['monitoring_points'] 20 | monitoring_points.append(monitoring_point) 21 | shared['monitoring_points'] = monitoring_points 22 | 23 | def handle_local_push(returncode, total_duration, monitoring_points): 24 | for monitoring_point in monitoring_points: 25 | push_to_local_monitor(monitoring_point['metric'], monitoring_point['value']) 26 | push_to_local_monitor("returncode", returncode) 27 | push_to_local_monitor("elapsed", total_duration) 28 | 29 | def handle_prometheus_push(returncode, total_duration, monitoring_points): 30 | monitoring_endpoint = config.get('endpoint', None) 31 | release = config.get('owncloud', None) 32 | client = config.get('client', None) 33 | suite = config.get('suite', None) 34 | build = config.get('build', None) 35 | duration_label = config.get('duration_label', None) 36 | queries_label = config.get('queries_label', None) 37 | 38 | points_to_push = [] 39 | 40 | # total duration is default for jenkins if given 41 | if duration_label is not None: 42 | points_to_push.append('# TYPE %s gauge' % (duration_label)) 43 | points_to_push.append('%s{owncloud=\\"%s\\",client=\\"%s\\",suite=\\"%s\\",build=\\"%s\\",exit=\\"%s\\"} %s' % ( 44 | duration_label, 45 | release, 46 | client, 47 | suite, 48 | build, 49 | returncode, 50 | total_duration)) 51 | 52 | # No. queries is default for jenkins if given 53 | if queries_label is not None: 54 | no_queries = 0 55 | res_diagnostic_logs = smashbox.utilities.get_diagnostic_log() 56 | for diagnostic_log in res_diagnostic_logs: 57 | if 'diagnostics' in diagnostic_log and 'totalSQLQueries' in diagnostic_log['diagnostics']: 58 | no_queries += int(diagnostic_log['diagnostics']['totalSQLQueries']) 59 | 60 | points_to_push.append('# TYPE %s gauge' % (queries_label)) 61 | points_to_push.append('%s{owncloud=\\"%s\\",client=\\"%s\\",suite=\\"%s\\",build=\\"%s\\",exit=\\"%s\\"} %s' % ( 62 | queries_label, 63 | release, 64 | client, 65 | suite, 66 | build, 67 | returncode, 68 | no_queries)) 69 | 70 | # Export all commited monitoring points 71 | for monitoring_point in monitoring_points: 72 | points_to_push.append('# TYPE %s gauge' % (monitoring_point['metric'])) 73 | points_to_push.append('%s{owncloud=\\"%s\\",client=\\"%s\\",suite=\\"%s\\",build=\\"%s\\",exit=\\"%s\\"} %s' % ( 74 | monitoring_point['metric'], 75 | release, 76 | client, 77 | suite, 78 | build, 79 | returncode, 80 | monitoring_point['value'])) 81 | 82 | # Push to monitoring all points to be pushed 83 | cmd = '' 84 | for point_to_push in points_to_push: 85 | cmd += point_to_push + '\n' 86 | 87 | monitoring_cmd = 'echo "%s" | curl --data-binary @- %s\n' % (cmd, monitoring_endpoint) 88 | os.system(monitoring_cmd) 89 | smashbox.utilities.log_info('Pushing to monitoring: %s' % monitoring_cmd) 90 | 91 | def push_to_monitoring(returncode, total_duration): 92 | monitoring_points = [] 93 | shared = reflection.getSharedObject() 94 | if 'monitoring_points' in shared.keys(): 95 | monitoring_points = shared['monitoring_points'] 96 | 97 | monitoring_type = config.get('monitoring_type', None) 98 | if monitoring_type == 'prometheus': 99 | handle_prometheus_push(returncode, total_duration, monitoring_points) 100 | elif monitoring_type == 'local': 101 | handle_local_push(returncode, total_duration, monitoring_points) -------------------------------------------------------------------------------- /protocol/test_protocol_file_checksum.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import * 3 | from smashbox.protocol import chunk_file_upload, file_upload, file_download 4 | import smashbox.protocol 5 | 6 | import os 7 | import os.path 8 | 9 | @add_worker 10 | def main(step): 11 | 12 | d = make_workdir() 13 | reset_owncloud_account() 14 | 15 | URL = oc_webdav_url() 16 | 17 | logger.info('*** 0. upload without the checksum (files should be accepted by the server)') 18 | 19 | filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1)) 20 | filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3)) 21 | 22 | # upload the file without a checksum and then download it to get the checksum type used by the server 23 | file_upload(filename1,URL) 24 | chunk_file_upload(filename2,URL) 25 | 26 | file_download(os.path.basename(filename1),URL,d) 27 | r=file_download(os.path.basename(filename2),URL,d) 28 | 29 | analyse_hashfiles(d) # make sure that files uploaded without a checksum are not corrupted 30 | 31 | logger.info('Got checksum from the server: %s', r.headers['OC-Checksum']) 32 | 33 | try: 34 | active_server_checksum_type = r.headers['OC-Checksum'].strip().split(':')[0] 35 | except KeyError,x: 36 | logger.warning('Checksum not enabled for %s',oc_webdav_url(hide_password=True)) 37 | return 38 | 39 | # now check the checksum type supported on the server 40 | logger.info('Server supports %s checksum',repr(active_server_checksum_type)) 41 | smashbox.protocol.enable_checksum(active_server_checksum_type) 42 | 43 | logger.info('*** 1. upload with correct checksum (files should be accepted by the server)') 44 | 45 | filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1)) 46 | filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3)) 47 | 48 | file_upload(filename1,URL) 49 | chunk_file_upload(filename2,URL) 50 | 51 | file_download(os.path.basename(filename1),URL,d) 52 | file_download(os.path.basename(filename2),URL,d) 53 | 54 | analyse_hashfiles(d) 55 | 56 | # pass around incorrect checksum (of the type supported by the server) 57 | # the puts should be failing 58 | 59 | def corrupted_checksum(fn): 60 | c = smashbox.protocol.compute_checksum(fn) 61 | c = c[:-1]+chr(ord(c[-1])+1) 62 | return c 63 | 64 | logger.info('*** 2. upload with corrupted checksum value (files should be refused by the server)') 65 | 66 | filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1)) 67 | filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3)) 68 | 69 | r = file_upload(filename1,URL,checksum=corrupted_checksum(filename1)) 70 | fatal_check(r.rc == 412) 71 | 72 | r = chunk_file_upload(filename2,URL,checksum=corrupted_checksum(filename2)) 73 | fatal_check(r.rc == 412) 74 | 75 | # paranoia check: make sure that the server did not store the corrupted files inspite of returning 412 76 | 77 | d_corrupted = mkdir(os.path.join(d,'corrupted_files')) 78 | file_download(os.path.basename(filename1),URL,d_corrupted) 79 | file_download(os.path.basename(filename2),URL,d_corrupted) 80 | 81 | fatal_check(os.listdir(d_corrupted)==[]) 82 | 83 | # pass around a checksum of the type unsupported by the server, including some garbage types (which are not even well-formatted) 84 | # in this case the checksums should be ignored and the files transmitted as if checksum was not provided at all 85 | checksum_types = list(set(smashbox.protocol.known_checksum_types)-set([active_server_checksum_type])) 86 | 87 | checksum_types += ['blabla'] 88 | 89 | logger.info('*** 3. upload with corrupted checksum types and strange values (files should be accepted by the server)') 90 | 91 | for value in ['',':bah',':']: 92 | for cstype in checksum_types: 93 | 94 | smashbox.protocol.enable_checksum(cstype) 95 | 96 | filename1=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(0.1)) 97 | filename2=create_hashfile(d,size=OWNCLOUD_CHUNK_SIZE(3.3)) 98 | 99 | logger.info(' *** testing checkum value: '+cstype+value) 100 | 101 | file_upload(filename1,URL,checksum=cstype+value) 102 | chunk_file_upload(filename2,URL,checksum=cstype+value) 103 | 104 | file_download(os.path.basename(filename1),URL,d) 105 | file_download(os.path.basename(filename2),URL,d) 106 | 107 | analyse_hashfiles(d) 108 | -------------------------------------------------------------------------------- /lib/test_fileDownloadAbort.py: -------------------------------------------------------------------------------- 1 | import os 2 | import requests 3 | import random 4 | 5 | __doc__ = """ Download a file and abort before the end of the transfer. 6 | """ 7 | 8 | from smashbox.utilities import * 9 | from smashbox.utilities.hash_files import * 10 | 11 | filesize = config.get('fileDownloadAbort_filesize', 900000000) 12 | iterations = config.get('fileDownloadAbort_iterations', 25) 13 | 14 | if type(filesize) is type(''): 15 | filesize = eval(filesize) 16 | 17 | testsets = [ 18 | { 'fileDownloadAbort_filesize': 900000000, 19 | 'fileDownloadAbort_iterations': 25 20 | } 21 | ] 22 | 23 | @add_worker 24 | def main(step): 25 | 26 | step(1, 'Preparation') 27 | 28 | # cleanup server files from previous run 29 | reset_owncloud_account(num_test_users=1) 30 | check_users(1) 31 | 32 | # cleanup all local files for the test 33 | reset_rundir() 34 | 35 | d = make_workdir() 36 | run_ocsync(d,user_num=1) 37 | 38 | step(2, 'Add a file: filesize=%s'%filesize) 39 | 40 | create_hashfile(d,filemask='BLOB.DAT',size=filesize) 41 | list_files(d) 42 | run_ocsync(d,user_num=1) 43 | list_files(d) 44 | 45 | reset_server_log_file(True) 46 | 47 | step(3, 'Create link share') 48 | user1 = "%s%i"%(config.oc_account_name, 1) 49 | 50 | oc_api = get_oc_api() 51 | oc_api.login(user1, config.oc_account_password) 52 | 53 | share = oc_api.share_file_with_link('BLOB.DAT', perms=31) 54 | share_url = share.get_link() + '/download' 55 | 56 | # Start testing 57 | test_urls = [ 58 | { 59 | 'url': oc_public_webdav_url(), 60 | 'auth': (share.get_token(), ''), 61 | 'description': 'Public webdav URL' 62 | }, 63 | { 64 | 'url': share.get_link() + '/download', 65 | 'auth': None, 66 | 'description': 'Link share URL' 67 | }, 68 | { 69 | 'url': os.path.join(oc_webdav_url(), 'BLOB.DAT'), 70 | 'auth': (user1, config.oc_account_password), 71 | 'description': 'Webdav URL' 72 | }, 73 | ] 74 | 75 | stepCount = 4 76 | 77 | for test_url in test_urls: 78 | cases = [ 79 | {'use_range': False, 'abort': True, 'description': 'download abort'}, 80 | {'use_range': True, 'abort': True, 'description': 'range download abort'}, 81 | {'use_range': False, 'abort': False, 'description': 'full download'}, 82 | {'use_range': True, 'abort': False, 'description': 'range download'}, 83 | ] 84 | 85 | for case in cases: 86 | step(stepCount, test_url['description'] + ' ' + case['description']); 87 | for i in range(1, iterations): 88 | test_download(i, test_url['url'], test_url['auth'], case['use_range'], case['abort']) 89 | check_and_reset_logs() 90 | stepCount += 1 91 | 92 | def check_and_reset_logs(): 93 | d = make_workdir() 94 | scrape_log_file(d, True) 95 | reset_server_log_file(True) 96 | 97 | if len(reported_errors) > 0: 98 | raise AssertionError('Errors found in log, aborting') 99 | 100 | def test_download(i, url, auth = None, use_range = False, abort = False): 101 | 102 | if use_range: 103 | range_start = random.randint(8192, filesize) 104 | range_end = random.randint(range_start, filesize - 8192) 105 | else: 106 | range_start = 0 107 | range_end = filesize 108 | 109 | if abort: 110 | break_bytes = random.randint(range_start + 8192, range_end - 8192) 111 | 112 | text = 'Download iteration %i' % i 113 | 114 | headers = {} 115 | if use_range: 116 | headers['Range'] = 'bytes=%i-%i' % (range_start, range_end) 117 | text += ' with range %s' % headers['Range'] 118 | 119 | if abort: 120 | text += ' aborting after %i bytes' % break_bytes 121 | 122 | text += ' of total size %i ' % filesize 123 | 124 | text += ' url %s' % url 125 | 126 | logger.info(text) 127 | 128 | res = requests.get(url, auth=auth, stream=True, headers=headers) 129 | 130 | if use_range: 131 | expected_status_code = 206 132 | else: 133 | expected_status_code = 200 134 | 135 | error_check(res.status_code == expected_status_code, 'Could not download, status code %i' % res.status_code) 136 | 137 | read_bytes = 0; 138 | for chunk in res.iter_content(8192): 139 | read_bytes += len(chunk) 140 | if abort and read_bytes >= break_bytes: 141 | break 142 | 143 | res.close() 144 | 145 | -------------------------------------------------------------------------------- /python/smashbox/configgen/generator.py: -------------------------------------------------------------------------------- 1 | import smashbox.configgen.processors as processors 2 | 3 | class Generator(object): 4 | ''' 5 | Class to generate configuration files. 6 | 7 | You need to set a processor chain in order to process the dict-like object 8 | and write it into a file. If no processor is set, it will write the same object 9 | 10 | Result may vary depending the processor chain being used 11 | ''' 12 | def __init__(self, processor_list = None): 13 | ''' 14 | Initialize the object with the processor chain set, or with an empty chain 15 | if None 16 | ''' 17 | self.processor_list = [] if processor_list == None else processor_list 18 | 19 | def insert_processor(self, i, processor): 20 | ''' 21 | Insert a new processor in the "i" position 22 | Check list.insert for details 23 | ''' 24 | self.processor_list.insert(i, processor) 25 | 26 | def append_processor(self, processor): 27 | ''' 28 | Append the processor to the end of the chain 29 | ''' 30 | self.processor_list.append(processor) 31 | 32 | def get_processor_list(self): 33 | ''' 34 | Get the processor list / chain 35 | ''' 36 | return self.processor_list 37 | 38 | def get_processor_by_name(self, name): 39 | ''' 40 | Get the processor by name or None if it's not found 41 | ''' 42 | for p in self.processor_list: 43 | if p.get_name() == name: 44 | return p 45 | 46 | def process_dict(self, local_dict): 47 | ''' 48 | Process the dictionary. It will go through all the process chain and it will be 49 | returned after that. 50 | ''' 51 | for p in self.processor_list: 52 | local_dict = p.do_process(local_dict) 53 | return local_dict 54 | 55 | def write_dict(self, output_file, local_dict): 56 | ''' 57 | Write the dictionary into a file. It will be readable by using the execfile 58 | function, which should be the same or similar format that the smashbox.conf.template 59 | file has, and MUST be a valid smashbox.conf file 60 | ''' 61 | with open(output_file, 'w') as f: 62 | for key in local_dict: 63 | f.write('%s = %s\n' % (key, repr(local_dict[key]))) 64 | 65 | def generate_new_config(self, input_file, output_file): 66 | ''' 67 | Generate a new configuration file from the input_file. The input file should be 68 | similar to the smashbox.conf.template. The processor chain must be set before 69 | calling this function 70 | ''' 71 | input_globals = {} 72 | input_locals = {} 73 | execfile(input_file, input_globals, input_locals) 74 | 75 | input_locals = self.process_dict(input_locals) 76 | self.write_dict(output_file, input_locals) 77 | 78 | def set_processors_from_data(self, processor_data): 79 | ''' 80 | Set the processor chain based on the data passed as parameter. Check the 81 | _configgen variable in the smashbox.conf.template for working data 82 | 83 | The processor_data should be a dictionary-like. Due to the order of the processor 84 | matters, an OrderedDict is recommended. 85 | The keys of the dictionary are 86 | the class name of the processor that will be used (from the 87 | smashbox.configgen.processors module). Currently there are only 4 processors available. 88 | The value for the key should also be a dictionary to initialize the processor. Only 89 | one parameter will be passed, that's why a dictionary is recommended, although 90 | what you must pass depends on the specific processor. 91 | ''' 92 | for key in processor_data: 93 | if hasattr(processors, key): 94 | processor_class = getattr(processors, key) 95 | if not issubclass(processor_class, processors.BasicProcessor): 96 | continue 97 | values = processor_data[key] 98 | processor = processor_class(values) 99 | self.append_processor(processor) 100 | else: 101 | pass 102 | 103 | def process_data_to_file(self, data, output_file): 104 | ''' 105 | Process the data passed as parameter through the chain and write the result 106 | to the file 107 | ''' 108 | data_to_output = self.process_dict(data) 109 | self.write_dict(output_file, data_to_output) 110 | 111 | -------------------------------------------------------------------------------- /lib/test_unicodejam.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ Add 1 (n) files to a directory (1 client) and check consistency across synch (2 clients). 7 | """ 8 | 9 | from smashbox.utilities import * 10 | from smashbox.utilities.hash_files import * 11 | 12 | nfiles = 10 13 | 14 | def removeunicodejam(localdir): 15 | import glob 16 | fl = glob.glob(localdir+os.sep+'*') 17 | for f in fl: 18 | os.remove(f) 19 | return 20 | 21 | def checkunicodejam(localdir): 22 | import glob 23 | fl = glob.glob(unicode(localdir+os.sep+'*')) 24 | 25 | ngood = 0 26 | nbad = 0 27 | 28 | for f in fl: 29 | logger.debug('Checking: %s',f) 30 | fh = file(f) 31 | a = localdir+os.sep+fh.read() 32 | a = a.decode("UTF-8") 33 | 34 | if a!=f: 35 | logger.error('FILELEN: %d',len(f)) 36 | logger.error('FILE : %s',f) 37 | logger.error('CONTENT: %s',a) 38 | logger.error('CONTLEN: %d',len(a)) 39 | 40 | logger.error("FILE BYTES: %s %d",repr(f),len(repr(f))) 41 | logger.error("CONTENT BYTES: %s %d",repr(a),len(repr(a))) 42 | fh.close() 43 | nbad += 1 44 | else: 45 | ngood += 1 46 | 47 | return(ngood,nbad) 48 | 49 | import sys 50 | 51 | ## 52 | # various unicode random generators which probe subsets of unicode space 53 | # 54 | # 55 | import random 56 | 57 | def g_massimo(): 58 | return int( 1 + random.triangular(0,10,100)*(1+random.uniform(0,1))) # this gives only ASCII ? 59 | 60 | def g_all_unicode(): 61 | return int(random.uniform(97,sys.maxunicode)) # with this I manage to "break" owncloud server 5.0.14a --> I needed to manually delete entries from oc_filecache 62 | 63 | def g_plane0_unicode(): 64 | return int(random.uniform(0x80,0xffff)) # unicode plane0 only, non-ascii 65 | 66 | def g_plane0_unicode_degressive(): 67 | return int(random.triangular(0x80,0xffff,0x80)) # unicode plane0 only, non-ascii 68 | 69 | def g_plane0_reduced(): 70 | return int(random.uniform(0x80,0x1000)) # unicode plane0 only, non-ascii, first few pages only... 71 | 72 | ## 73 | 74 | 75 | def createunicodejam(localdir): 76 | 77 | forbidden = "/" 78 | nchar = int(random.uniform(1,50)) 79 | raw = u"" 80 | for i in range(nchar): 81 | 82 | j = g_plane0_reduced() 83 | 84 | cc = unichr(j) 85 | if cc in forbidden: continue 86 | raw+=cc 87 | 88 | filename = raw 89 | 90 | assert( len(filename) > 0) 91 | 92 | ff = localdir+os.sep+filename 93 | #print 'Preparing:',ff 94 | try: 95 | fh = file(ff,'w') 96 | fh.write(filename.encode("UTF-8")) 97 | fh.close 98 | except Exception,x: 99 | logger.warning('cannot create file: %s',x) 100 | 101 | 102 | @add_worker 103 | def worker0(step): 104 | 105 | # do not cleanup server files from previous run 106 | reset_owncloud_account() 107 | 108 | # cleanup all local files for the test 109 | reset_rundir() 110 | 111 | step(1,'Preparation') 112 | d = make_workdir() 113 | run_ocsync(d) 114 | k0 = count_files(d) 115 | if k0>0: 116 | os.system('rm '+d+os.sep+'*') 117 | run_ocsync(d) 118 | k0 = count_files(d) 119 | assert k0==0,'Cannot cleanup the directory %s' % d 120 | 121 | step(2,'Add %s files and check if we still have k1+nfiles after resync'%nfiles) 122 | 123 | for i in range(nfiles): 124 | logger.info('*** Creating file: %d',i) 125 | createunicodejam(d) 126 | 127 | run_ocsync(d) 128 | 129 | (ngood,nbad) = checkunicodejam(d) 130 | 131 | error_check(ngood==nfiles,'Not all files are OK! good=%d, bad=%d, expected=%d'%(ngood,nbad,nfiles)) 132 | error_check(nbad==0,'After synch corrupted files found good=%d, bad%d, expected=%d'%(ngood,nbad,nfiles)) 133 | 134 | if ngood==nfiles and nbad==0: logger.info('SUCCESS: %d files found',ngood) 135 | 136 | step(3,'Do nothing') 137 | 138 | 139 | @add_worker 140 | def worker1(step): 141 | 142 | step(1,'Preparation') 143 | d = make_workdir() 144 | 145 | step(2,'Do nothing') 146 | 147 | step(3,'Resync and check files added by worker0') 148 | 149 | run_ocsync(d) 150 | 151 | (ngood,nbad) = checkunicodejam(d) 152 | 153 | error_check(ngood==nfiles,'Not all files are OK! good=%d, bad=%d, expected=%d'%(ngood,nbad,nfiles)) 154 | error_check(nbad==0,'After synch corrupted files found: good=%d, bad=%d, expected=%d'%(ngood,nbad,nfiles)) 155 | 156 | if ngood==nfiles and nbad==0: logger.info('SUCCESS: %d files found',ngood) 157 | 158 | 159 | 160 | 161 | 162 | 163 | -------------------------------------------------------------------------------- /python/smashbox/owncloudorg/locking.py: -------------------------------------------------------------------------------- 1 | import owncloud 2 | 3 | __author__ = 'nickv' 4 | 5 | 6 | class LockProvider: 7 | LOCK_SHARED = 1 8 | LOCK_EXCLUSIVE = 2 9 | 10 | def __init__(self, oc_api): 11 | """ 12 | :param oc_api owncloud.Client 13 | """ 14 | self.oc_api = oc_api 15 | 16 | def enable_testing_app(self): 17 | try: 18 | self.oc_api.make_ocs_request( 19 | 'POST', 20 | 'cloud', 21 | 'apps/testing' 22 | ) 23 | except owncloud.ResponseError as err: 24 | raise err 25 | 26 | def disable_testing_app(self): 27 | try: 28 | self.oc_api.make_ocs_request( 29 | 'DELETE', 30 | 'cloud', 31 | 'apps/testing' 32 | ) 33 | except owncloud.ResponseError as err: 34 | raise err 35 | 36 | def isUsingDBLocking(self): 37 | try: 38 | kwargs = {'accepted_codes': [100, 501, 999]} 39 | res = self.oc_api.make_ocs_request( 40 | 'GET', 41 | 'apps/testing/api/v1', 42 | 'lockprovisioning', 43 | **kwargs 44 | ) 45 | 46 | import xml.etree.ElementTree as ET 47 | tree = ET.fromstring(res.content) 48 | code_el = tree.find('meta/statuscode') 49 | 50 | return int(code_el.text) == 100 51 | 52 | except owncloud.ResponseError as err: 53 | raise err 54 | 55 | 56 | def lock(self, lock_level, user, path): 57 | """ 58 | Lock the path for the given user 59 | 60 | :param lock_level: 1 (shared) or 2 (exclusive) 61 | :param user: User to lock the path 62 | :param path: Path to lock 63 | :raises: ResponseError if the path could not be locked 64 | """ 65 | try: 66 | self.oc_api.make_ocs_request( 67 | 'POST', 68 | 'apps/testing/api/v1', 69 | 'lockprovisioning/%i/%s?path=%s' % (lock_level, user, path) 70 | ) 71 | except owncloud.ResponseError as err: 72 | raise err 73 | 74 | def change_lock(self, lock_level, user, path): 75 | """ 76 | Change an existing lock 77 | 78 | :param lock_level: 1 (shared) or 2 (exclusive) 79 | :param user: User to lock the path 80 | :param path: Path to lock 81 | :raises: ResponseError if the lock could not be changed 82 | """ 83 | try: 84 | self.oc_api.make_ocs_request( 85 | 'PUT', 86 | 'apps/testing/api/v1', 87 | 'lockprovisioning/%i/%s?path=%s' % (lock_level, user, path) 88 | ) 89 | except owncloud.ResponseError as err: 90 | raise err 91 | 92 | def is_locked(self, lock_level, user, path): 93 | """ 94 | Check whether the path is locked 95 | 96 | :param lock_level: 1 (shared) or 2 (exclusive) 97 | :param user: User to lock the path 98 | :param path: Path to lock 99 | :returns bool 100 | """ 101 | try: 102 | kwargs = {'accepted_codes': [100, 423]} 103 | res = self.oc_api.make_ocs_request( 104 | 'GET', 105 | 'apps/testing/api/v1', 106 | 'lockprovisioning/%i/%s?path=%s' % (lock_level, user, path), 107 | **kwargs 108 | ) 109 | 110 | import xml.etree.ElementTree as ET 111 | tree = ET.fromstring(res.content) 112 | code_el = tree.find('meta/statuscode') 113 | 114 | return int(code_el.text) == 100 115 | 116 | except owncloud.ResponseError as err: 117 | raise err 118 | 119 | def unlock(self, lock_level=None, user=None, path=None): 120 | """ 121 | Remove all set locks 122 | 123 | :param lock_level: 1 (shared) or 2 (exclusive) 124 | :param user: User to unlock the path 125 | :param path: Path to unlock 126 | :raises: ResponseError if the lock could not be removed 127 | """ 128 | ocs_path = 'lockprovisioning' 129 | 130 | if lock_level is not None: 131 | ocs_path = '%s/%i' % (ocs_path, lock_level) 132 | 133 | if user is not None: 134 | ocs_path = '%s/%s?path=%s' % (ocs_path, user, path) 135 | 136 | try: 137 | self.oc_api.make_ocs_request( 138 | 'DELETE', 139 | 'apps/testing/api/v1', 140 | ocs_path 141 | ) 142 | except owncloud.ResponseError as err: 143 | raise err 144 | -------------------------------------------------------------------------------- /lib/examples/test_hello.py: -------------------------------------------------------------------------------- 1 | 2 | # this is all-in-one example which shows various aspects of the smashbox framework 3 | 4 | # import utilities which are the building block of each testcase 5 | from smashbox.utilities import * 6 | from smashbox.utilities import reflection 7 | 8 | # all normal output should go via logger.info() 9 | # all additional output should go via logger.debug() 10 | 11 | logger.info("THIS IS A HELLO WORLD EXAMPLE") 12 | 13 | logger.debug("globals() %s",globals().keys()) 14 | 15 | 16 | # Workers run as independent processes and wait for each other at each 17 | # defined step: a worker will not enter step(N) until all others have 18 | # completed step(N-1). A worker waiting at step(N) has already 19 | # implicitly completed all steps 0..N-1 20 | 21 | @add_worker 22 | def helloA(step): 23 | logger.debug("globals() %s",globals().keys()) 24 | 25 | 26 | # Sharing of variables between workers - see below. 27 | shared = reflection.getSharedObject() 28 | 29 | step(0,'defining n') 30 | 31 | shared['n'] = 111 32 | 33 | # Variable 'n' is now shared and visible to all the workers. 34 | # This happens when a value of the variable is assigned. 35 | # 36 | # Limitations: Workers A and B should not modify the same shared 37 | # variable in parallel (that it in the same step). Also that 38 | # worker that sets the variable should do it in a step preceding 39 | # the steps in which other workers are making use of it. Only this 40 | # will guarantee that the value is set before someone else is 41 | # trying to make use of it. 42 | # 43 | # If you need more than one worker to modify the same 44 | # shared variable make sure this happens in separate steps. 45 | 46 | 47 | step(1,'defining xyz') 48 | 49 | # Contrary to the plain types (string,int,float) here we share a list - see limitations below. 50 | shared['xyz'] = [1,2,3] 51 | 52 | # If you modify the value in place of a shared.attribute 53 | # (e.g. list.append or list.sort) then this is NOT visible to other 54 | # processes until you really make the assignment. 55 | # 56 | # Some ideas how to handle lists by assigning a new value: 57 | # * use shared['list']+=[a] instead of shared['list'].append(a) 58 | # * use shared['list']=sorted(shared['list']) instead of shared['list'].sort() 59 | # 60 | step(2,'waiting...') 61 | 62 | step(3,'checking integrity') 63 | 64 | # this is an non-fatal assert - error will be rerpoted and test marked as failed but execution will continue 65 | error_check(shared['n']==222, 'problem handling shared n=%d'%shared['n']) 66 | 67 | # this is a fatal assert - execution will stop immediately 68 | fatal_check(list(shared['xyz'])==[1,2,3,4], 'problem handlign shared xyz=%s'%repr(shared['xyz'])) 69 | 70 | @add_worker 71 | def helloB(step): 72 | logger.debug("dir() %s",dir()) 73 | 74 | shared = reflection.getSharedObject() 75 | 76 | step(2,'modifying and reassigning n, xyz') 77 | shared['n'] += 111 78 | shared['xyz'] += [4] 79 | 80 | step(3, 'checking integrity') 81 | error_check(shared['n']==222, 'problem handling shared n=%d'%shared['n']) 82 | error_check(list(shared['xyz'])==[1,2,3,4], 'problem handlign shared xyz=%s'%repr(shared['xyz'])) 83 | 84 | 85 | @add_worker 86 | def reporter(step): 87 | shared = reflection.getSharedObject() 88 | 89 | # report on shared objects at every step 90 | for i in range(5): # until the last step used in this example 91 | step(i) 92 | logger.info("shared: %s",str(shared)) 93 | 94 | # this shows how workers with the same function body may be added in a loop any number of times 95 | 96 | # shared.k is an example on how NOT to use the shared object -- see comments at the top of this file 97 | # this worker code will run N times in parallel -- see below 98 | def any_worker(step): 99 | shared=reflection.getSharedObject() 100 | 101 | shared['k'] = 0 102 | 103 | step(1,None) 104 | 105 | shared['k'] += 1 106 | 107 | step(2,None) 108 | shared['k'] += 1 109 | 110 | step(3,None) 111 | shared['k'] += 1 112 | 113 | step(4,'finish') 114 | 115 | logger.info("k=%d, expected %d",shared['k'],N*3) 116 | # one would assume here that shared.k == N*3, however as the 117 | # assignments to shared.k are not atomic and may happen in parallel this is not reliable. 118 | # just don't do this kind of thing! 119 | 120 | # this shows how one may add configuration parameters to the testcase 121 | N = int(config.get('n_hello_workers',5)) 122 | 123 | logger.info("will create %d additional workers",N) 124 | 125 | # here we add the workers (and append the number to each name) 126 | for i in range(N): 127 | add_worker(any_worker,'any_worker%d'%i) 128 | 129 | 130 | -------------------------------------------------------------------------------- /python/smashbox/script.py: -------------------------------------------------------------------------------- 1 | 2 | import smashbox.compatibility.argparse as argparse 3 | 4 | def keyval_tuple(x): 5 | a,b = x.split('=',1) 6 | return (a.strip(),b) 7 | 8 | def arg_parser(**kwds): 9 | """ Create an ArgumentParser with common options for smash scripts and tools. 10 | """ 11 | 12 | parser = argparse.ArgumentParser(**kwds) 13 | 14 | parser.add_argument('--option', '-o', metavar="key=val", dest="options", type=keyval_tuple, action='append', help='set config option') 15 | parser.add_argument('--dry-run', '-n', action='store_true', help='show config options and print what tests would be run') 16 | parser.add_argument('--quiet', '-q', action="store_true", help='do not produce output (other than errors)') 17 | parser.add_argument('--verbose', '-v', action="store_true", help='produce more output') 18 | parser.add_argument('--debug', action="store_true", help='produce very verbose output') 19 | parser.add_argument('--config','-c',dest="configs",default=[],action="append",help='config files (one or more), added on to of default config file') 20 | return parser 21 | 22 | 23 | import os.path 24 | main_config_file = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),'etc','smashbox.conf') 25 | 26 | class Configuration: 27 | # you may use config object for string interpolation "..."%config 28 | def __getitem__(self,x): 29 | return getattr(self,x) 30 | 31 | def _dict(self,**args): 32 | return dict(self.__dict__.items() + args.items()) 33 | 34 | def get(self,x,default): 35 | logger = getLogger() 36 | logger.debug('config.get(%s,default=%s)',repr(x),repr(default)) 37 | try: 38 | return getattr(self,x) 39 | except AttributeError: 40 | return default 41 | 42 | 43 | config = Configuration() 44 | 45 | def configure_from_blob(config_blob): 46 | import pickle 47 | global config 48 | config = pickle.loads(config_blob) 49 | config_log(level=logging.DEBUG) 50 | return config 51 | 52 | def dump_config_to_blob(): 53 | import pickle 54 | return pickle.dumps(config) 55 | 56 | def configure(cmdline_opts,config_files=None): 57 | """ Initialize config object and return it. 58 | 59 | First exec the sequence of config_files (including the 60 | main_config_file). All symbols defined by these files will be set 61 | as attributes of the config object. 62 | 63 | Then process cmdline_opts (which is a list of tuples generated by 64 | arg_parser). If attribute matching the option already exists (was 65 | defined in a configuration file) then eval to the same type (if not 66 | None). Otherwise leave string values. The string "None" is special 67 | and it is always converted to None and may always be assigned. 68 | 69 | """ 70 | 71 | if config_files is None: 72 | config_files = [] 73 | 74 | logger = getLogger() 75 | 76 | config_files = [main_config_file] + config_files 77 | 78 | for cf in config_files: 79 | execfile(cf,{},config.__dict__) 80 | 81 | if cmdline_opts: 82 | for key,val in cmdline_opts: 83 | try: 84 | if val == "None": 85 | val = None 86 | else: 87 | attr = getattr(config,key) 88 | # coerce val type to attr's type unless attr is set to None (then leave as-is ) 89 | try: 90 | if attr is not None: 91 | val = type(attr)(val) 92 | except ValueError,x: 93 | # allow setting to None 94 | logger.warning("cannot set option (type mismatch) %s=%s --> %s",key,repr(val),x) 95 | continue 96 | except AttributeError: 97 | # if attr unknown then leave the val as-is (string) 98 | pass 99 | 100 | setattr(config,key,val) 101 | 102 | config_log(level=logging.DEBUG) 103 | 104 | return config 105 | 106 | def config_log(level,hide_password=False): 107 | """ Dump the entire configuration to the logging system at the given level. 108 | If hide_password=True then do not show the real value of the options which contain "password" in their names. 109 | """ 110 | logger = getLogger() 111 | for d in dir(config): 112 | if not d.startswith("_") and d != "get": 113 | value = repr(getattr(config,d)) 114 | if hide_password and 'password' in d: 115 | value = "***" 116 | 117 | logger.log(level,"CONFIG: %s = %s",d,value) 118 | 119 | 120 | import logging 121 | 122 | logger = None 123 | def getLogger(name="",level=None): 124 | global logger 125 | if not logger: 126 | if level is None: 127 | level = logging.INFO # change here to DEBUG if you want to debug config stuff 128 | logging.basicConfig(level=level) 129 | 130 | return logging.getLogger('.'.join(['smash',name])) 131 | -------------------------------------------------------------------------------- /lib/test_concurrentOverwriteFile.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ Concurrently upload the same large file by two sync clients. It may ne necessary to run mulitple times. In owncloud 5.0.10 this testcase triggers a race condition which is reported in the following way: 3 | 4 | 2013-11-13 15:54:23,039 - INFO - checker - shared w0d1 af27141daa272ef2285695fe8e709d9f 5 | 2013-11-13 15:54:23,039 - INFO - checker - shared w0v1 19987ddec02a36d6403a274565032045 6 | 2013-11-13 15:54:23,040 - INFO - checker - shared w0v2 af27141daa272ef2285695fe8e709d9f 7 | 2013-11-13 15:54:23,040 - INFO - checker - shared w1d1 19987ddec02a36d6403a274565032045 8 | 2013-11-13 15:54:23,040 - INFO - checker - shared w1d2 ffffc84eaed851baa0e61b554aa90daa 9 | 2013-11-13 15:54:23,041 - INFO - checker - shared w1v1 ffffc84eaed851baa0e61b554aa90daa 10 | 2013-11-13 15:54:23,041 - INFO - checker - shared w2d1 ffffc84eaed851baa0e61b554aa90daa 11 | 12 | 2013-11-13 15:54:24,337 - ERROR - checker - a version af27141daa272ef2285695fe8e709d9f (filename test.BIG.v1384354395) does not correspond to any previously generated file 13 | 14 | 15 | """ 16 | import time 17 | import tempfile 18 | import glob 19 | 20 | from smashbox.utilities import * 21 | from smashbox.utilities import reflection 22 | 23 | @add_worker 24 | def worker0(step): 25 | shared = reflection.getSharedObject() 26 | 27 | reset_owncloud_account() 28 | reset_rundir() 29 | 30 | #versions = get_md5_versions_on_server('test.BIG') 31 | 32 | step(1,'create initial content and sync') 33 | 34 | d = make_workdir() 35 | fn = '%s/test.BIG'%d 36 | createfile(fn,'0',count=100000,bs=1000) 37 | shared['w0v1'] = md5sum(fn) 38 | logger.info(shared['w0v1']) 39 | hexdump(fn) 40 | 41 | run_ocsync(d) 42 | 43 | step(3,'modify local content') 44 | 45 | createfile(fn,'1',count=200,bs=1000000) # create large file -> it will take longer to sync 46 | shared['w0v2'] = md5sum(fn) 47 | logger.info(shared['w0v2']) 48 | hexdump(fn) 49 | 50 | step(4,'sync local content') 51 | 52 | run_ocsync(d) 53 | 54 | shared['w0d1'] = md5sum(fn) 55 | logger.info(shared['w0d1']) 56 | hexdump(fn) 57 | 58 | if shared['w0d1'] == shared['w0v2']: 59 | logger.info("Content NOT changed locally") 60 | else: 61 | logger.info("CONTENT CHANGED LOCALLY") 62 | 63 | #step(4) 64 | #run_ocsync(d) 65 | #step(5) 66 | logger.info('output %s',d) 67 | 68 | 69 | @add_worker 70 | def worker1(step): 71 | shared = reflection.getSharedObject() 72 | 73 | step(2,'sync initial state created by worker 0') 74 | 75 | d = make_workdir() 76 | run_ocsync(d) 77 | 78 | fn = '%s/test.BIG'%d 79 | 80 | shared['w1d1'] = md5sum(fn) 81 | logger.info(shared['w1d1']) 82 | error_check(shared['w1d1'] == shared['w0v1'],'downloaded files does not match the initially created file') 83 | 84 | step(3,'modify local content') 85 | 86 | createfile(fn,'2',count=200000,bs=1000) # create large file -> it will take longer to sync 87 | 88 | shared['w1v1'] = md5sum(fn) 89 | logger.info(shared['w1v1']) 90 | hexdump(fn) 91 | 92 | step(4,'sync modified file') 93 | 94 | # add a bit of delay to make sure worker1 starts later than worker0 95 | sleep(2) 96 | 97 | run_ocsync(d) 98 | 99 | shared['w1d2'] = md5sum(fn) 100 | logger.info(shared['w1d2']) 101 | hexdump(fn) 102 | 103 | step(5) 104 | 105 | logger.info('output %s',d) 106 | 107 | 108 | @add_worker 109 | def checker(step): 110 | shared = reflection.getSharedObject() 111 | 112 | step(6,'sync the final state of the repository into a fresh local folder') 113 | #sleep(10) 114 | 115 | d = make_workdir() 116 | run_ocsync(d) 117 | 118 | fn = '%s/test.BIG'%d 119 | 120 | shared['w2d1'] = md5sum(fn) 121 | logger.info(shared['w2d1']) 122 | 123 | # print the status 124 | logger.info('final output %s',d) 125 | logger.info('content as reported by webdav') 126 | #runcmd('curl -s -k -XPROPFIND %s | xmllint --format -'%oc_webdav_url()) #FIXME: no request body, unsupported by EOS 127 | 128 | #DISABLED FOR NOW 129 | #list_versions_on_server('test.BIG') 130 | 131 | for x in sorted(shared.keys()): 132 | logger.info('shared %s %s',x,shared[x]) 133 | 134 | # verify the status 135 | 136 | error_check(shared['w2d1'] in [shared['w0v1'],shared['w0v2'],shared['w1v1']], "file downloaded by the checker does not correspond to any file created locally by the workers") 137 | 138 | if False: 139 | # DISABLED FOR NOW 140 | # make sure that all versions stored on a server correpond to a version generated locally 141 | versions = get_md5_versions_on_server('test.BIG') 142 | 143 | for v5,name in versions: 144 | error_check(not v5 in [shared['w0v1'],shared['w0v2'], shared['w1v1']], 145 | 'a version %s (filename %s) does not correspond to any previously generated file'%(v5,name)) 146 | 147 | 148 | ### ASSERT 149 | # make sure it is empty 150 | #assert(glob.glob(d+'/*') == []) 151 | 152 | -------------------------------------------------------------------------------- /lib/test_filenames.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import get_files 3 | from smashbox.utilities.hash_files import count_files 4 | 5 | import os 6 | 7 | __doc__ = """ Test various characters in the file names. 8 | 9 | bug #104648: add UTF-8 escaping in PROPFIND response body (https://savannah.cern.ch/bugs/?104648) 10 | 11 | Notes: 12 | - unescaped % characters in propfind response crashes csync 13 | 14 | """ 15 | 16 | filesizeKB = int(config.get('filenames_filesizeKB',1)) 17 | 18 | # see: mirall/csync/src/csync_exclude.c 19 | charsets_excluded_from_sync = { 20 | 'backslash' : '\\', 21 | 'colon' : ':', 22 | 'questionmark' : '?', 23 | 'asterisk' : '*', 24 | 'doublequote' : '"', 25 | 'greater' : '>', 26 | 'smaller' : '<', 27 | 'pipe' : '|' 28 | } 29 | 30 | def is_excluded(name): 31 | """ 32 | Returns true if the given file name matches an pattern 33 | excluded from sync by the sync client. 34 | 35 | :param name: file name to check 36 | :returns: True if the file name must be excluded, False otherwise 37 | """ 38 | if name == '.': # skip this 39 | return True 40 | 41 | # excluded pattern "*~" 42 | if name[-1] == '~': 43 | return True 44 | 45 | # excluded pattern "._*" 46 | if len(name) >= 2 and name[0:2] == '._': 47 | return True 48 | 49 | file_name, ext = os.path.splitext(name) 50 | 51 | # excluded pattern "*.~*" 52 | if len(ext) > 1 and ext[1] == '~': 53 | return True 54 | 55 | return False 56 | 57 | @add_worker 58 | def creator(step): 59 | 60 | reset_owncloud_account() 61 | reset_rundir() 62 | 63 | step(1,'create initial content and sync') 64 | 65 | d = make_workdir() 66 | run_ocsync(d) 67 | 68 | namepatterns = [ 69 | "space1 testfile.dat", 70 | "space2testfile .dat", 71 | " space3testfile .dat", 72 | "space4testfile.dat ", 73 | "space5testfile. dat", 74 | " space6 testfile . dat ", 75 | " " 76 | ] 77 | 78 | charsets = { 'space' : ' ', 79 | 'plus' : '+', 80 | 'underscore' : '_', 81 | 'moscicki' : '\xc5\x9b', # some UTF-8 unicode character... 82 | 'singlequote' : "'" 83 | } 84 | 85 | charsets.update(charsets_excluded_from_sync) 86 | 87 | filenames = [] 88 | 89 | for c in charsets: 90 | for n in namepatterns: 91 | nn = n.replace('space', "_"+c+"_").replace(' ',charsets[c]) 92 | #print nn 93 | filenames.append(nn) 94 | createfile(os.path.join(d,nn),'1',count=filesizeKB,bs=1000) 95 | 96 | # generic charsets -- let's take a hammer and test (almost) all ANSI characters 97 | # we don't test for the foward slash 98 | char_range = range(32,47)+range(58,65)+range(91,97)+range(123,127) 99 | 100 | #char_range.remove(37) #to see the tests to complition temporarily remove this character as it crashes csync 101 | #char_range=[] 102 | for i in char_range: 103 | for n in namepatterns: 104 | nn = n.replace('space','_chr'+str(i)+'_').replace(' ',chr(i)) 105 | if nn == '.': # skip this 106 | continue 107 | filenames.append(nn) 108 | createfile(os.path.join(d,nn),'1',count=filesizeKB,bs=1000) 109 | 110 | files_1 = get_files(d) 111 | N = count_files(d) 112 | 113 | shared = reflection.getSharedObject() 114 | 115 | shared['files_1'] = files_1 116 | shared['N'] = N 117 | 118 | for i in range(3): # 2 is enough but 3 is better ;-) 119 | list_files(d) 120 | run_ocsync(d) 121 | error_check(count_files(d) == N, "some files lost!") 122 | 123 | files_2 = get_files(d) 124 | 125 | for fn in set(files_1)-set(files_2): 126 | error_check(False, "the file has disappeared: %s"%repr(fn)) 127 | 128 | for fn in set(files_2)-set(files_1): 129 | error_check(False, "New file appeared: %s" % repr(fn)) 130 | 131 | 132 | 133 | @add_worker 134 | def propagator(step): 135 | 136 | step(2,'check propagation of files') 137 | 138 | d = make_workdir() 139 | 140 | shared = reflection.getSharedObject() 141 | 142 | files_1 = shared['files_1'] 143 | 144 | # take the original file list produced by creator and remove all file names containing characters excluded from sync 145 | expected_files = [fn for fn in files_1 if not any((c in charsets_excluded_from_sync.values()) for c in fn) ] 146 | # also exclude file name patterns 147 | expected_files = [fn for fn in expected_files if not is_excluded(fn)] 148 | 149 | logger.info("expected %d files to be propagated (excluding the ones with unsyncable characters %s)",len(expected_files),repr(charsets_excluded_from_sync.values())) 150 | 151 | run_ocsync(d) 152 | 153 | N2 = count_files(d) 154 | files_3 = os.listdir(d) 155 | 156 | for fn in set(expected_files)-set(files_3): 157 | error_check(False, "the file has not been propagated: %s"%repr(fn)) 158 | -------------------------------------------------------------------------------- /etc/smashbox.conf.template: -------------------------------------------------------------------------------- 1 | # 2 | # The _open_SmashBox Project. 3 | # 4 | # Author: Jakub T. Moscicki, CERN, 2013 5 | # License: AGPL 6 | # 7 | # this is the main config file template: copy to smashbox.conf and adjust the settings 8 | # 9 | # this template should work without changes if you are running your tests directly on the owncloud application server 10 | # 11 | 12 | # this is the top directory where all local working files are kept (test working direcotires, test logs, test data, temporary filesets, ..) 13 | smashdir = "~/smashdir" 14 | 15 | # name of the account used for testing 16 | # if None then account name is chosen automatically (based on the test name) 17 | oc_account_name=None 18 | 19 | # default number of users for tests involving multiple users (user number is appended to the oc_account_name) 20 | # this only applies to the tests involving multiple users 21 | oc_number_test_users=3 22 | 23 | # name of the group used for testing 24 | oc_group_name=None 25 | 26 | # default number of groups for tests involving multiple groups (group number is appended to the oc_group_name) 27 | # this only applies to the tests involving multiple groups 28 | oc_number_test_groups=1 29 | 30 | # password for test accounts: all test account will have the same password 31 | # if not set then it's an error 32 | oc_account_password="" 33 | 34 | # owncloud test server 35 | # if left blank or "localhost" then the real hostname of the localhost will be set 36 | oc_server = '' 37 | 38 | 39 | # root of the owncloud installation as visible in the URL 40 | oc_root = 'owncloud' 41 | 42 | # webdav endpoint URI within the oc_server 43 | import os.path 44 | oc_webdav_endpoint = os.path.join(oc_root,'remote.php/webdav') # standard owncloud server 45 | 46 | # target folder on the server (this may not be compatible with all tests) 47 | oc_server_folder = '' 48 | 49 | # should we use protocols with SSL (https, ownclouds) 50 | oc_ssl_enabled = True 51 | 52 | # how to invoke shell commands on the server 53 | # for localhost there is no problem - leave it blank 54 | # for remote host it may be set like this: "ssh -t -l root $oc_server" 55 | # note: configure ssh for passwordless login 56 | # note: -t option is to make it possible to run sudo 57 | oc_server_shell_cmd = "" 58 | 59 | # Data directory on the owncloud server. 60 | # 61 | oc_server_datadirectory = os.path.join('/var/www/html',oc_root, 'data') 62 | 63 | # a path to server side tools (create_user.php, ...) 64 | # 65 | # it may be specified as relative path "dir" and then resolves to 66 | # /dir where is the top-level of of the tree 67 | # containing THIS configuration file 68 | # 69 | 70 | oc_server_tools_path = "server-tools" 71 | 72 | # a path to ocsync command with options 73 | # this path should work for all client hosts 74 | # 75 | # it may be specified as relative path "dir" and then resolves to 76 | # /dir where is the top-level of of the tree 77 | # containing THIS configuration file 78 | # 79 | oc_sync_cmd = "client/build/mirall/bin/owncloudcmd --trust" 80 | 81 | # number of times to repeat ocsync run every time 82 | oc_sync_repeat = 1 83 | 84 | #################################### 85 | 86 | # unique identifier of your test run 87 | # if None then the runid is chosen automatically (and stored in this variable) 88 | runid = None 89 | 90 | # if True then the local working directory path will have the runid added to it automatically 91 | workdir_runid_enabled=False 92 | 93 | # if True then the runid will be part of the oc_account_name automatically 94 | oc_account_runid_enabled=False 95 | 96 | #################################### 97 | 98 | # this defines the default account cleanup procedure 99 | # - "delete": delete account if exists and then create a new account with the same name 100 | # - "keep": don't delete existing account but create one if needed 101 | # 102 | # these are not implemeted yet: 103 | # - "sync_delete": delete all files via a sync run 104 | # - "webdav_delete": delete all files via webdav DELETE request 105 | # - "filesystem_delete": delete all files directly on the server's filesystem 106 | oc_account_reset_procedure = "delete" 107 | 108 | # this defined the default local run directory reset procedure 109 | # - "delete": delete everything in the local run directory prior to running the test 110 | # - "keep": keep all files (from the previous run) 111 | rundir_reset_procedure = "delete" 112 | 113 | web_user = "www-data" 114 | 115 | oc_admin_user = "at_admin" 116 | oc_admin_password = "admin" 117 | 118 | # cleanup imported namespaces 119 | del os 120 | 121 | # Verbosity of curl client. 122 | # If none then verbosity is on when smashbox run in --debug mode. 123 | # set it to True or False to override 124 | # 125 | pycurl_verbose = None 126 | 127 | # scp port to be used in scp commands, used primarily when copying over the server log file 128 | scp_port = 22 129 | 130 | # user that can r+w the owncloud.log file (needs to be configured for passwordless login) 131 | oc_server_log_user = "www-data" 132 | 133 | # 134 | # Reset the server log file and verify that no exceptions and other known errors have been logged 135 | # 136 | oc_check_server_log = False 137 | 138 | # 139 | # Reset the diagnostic log file and use diagnostics for assertions 140 | # 141 | oc_check_diagnostic_log = False -------------------------------------------------------------------------------- /lib/test_pingpong.py: -------------------------------------------------------------------------------- 1 | from smashbox.utilities import * 2 | from smashbox.utilities.hash_files import count_files 3 | 4 | import glob 5 | 6 | __doc__ = """ Test parallel upload of the same file by two clients. 7 | 8 | The name of the test derives from the behaviour observed with eos 9 | webdav endpoint: two file versions would ping-pong between the 10 | clients. 11 | 12 | This documents an interesting semantics of the system: parallel upload 13 | into the same destination file should both succeed without creating a 14 | conflict -- this is because the ETAG check happens while processing 15 | the headers and not while commiting the file to the strorage. 16 | 17 | So one file version will appear as "lost" - but in fact is recoverable via the versions. 18 | 19 | EOS webdav endpoint behaviour is like this. 20 | 21 | Owncloud6 behaviour (with EOS fuse-mounted datadirectory on cbox06) is 22 | broken and fails silently: ETAG generated by first upload of ping and 23 | pong is the same, hence the clients think they are up-to-date and 24 | never download a newer content from the server. 25 | 26 | Owncloud 5 server is also broken and fails one of the initial uploads with Bad Request: 27 | 28 | Logfile: test_pingpong/pong-ocsync.step02.cnt000.log 29 | 30 | "PUT" QUrl( "https://box.server/remote.php/webdav/BALL 31 | 32 | 33 | Sabre_DAV_Exception_BadRequest 34 | expected filesize 5000000 got 35 | 1.7.6 36 | 37 | 38 | 39 | """ 40 | 41 | filesizeKB = int(config.get('pingpong_filesizeKB',5000)) 42 | pongdelay = float(config.get('pingpong_pongdelay',0)) 43 | 44 | @add_worker 45 | def ping(step): 46 | 47 | reset_owncloud_account() 48 | reset_rundir() 49 | 50 | shared = reflection.getSharedObject() 51 | 52 | seen_files = set() 53 | 54 | step(1,'initialize') 55 | 56 | d = make_workdir() 57 | 58 | createfile(os.path.join(d,'BALL'),'0',count=1000,bs=filesizeKB) 59 | 60 | BALL = md5sum(os.path.join(d,'BALL')) 61 | logger.info('BALL: %s',BALL) 62 | 63 | shared['PING_BALL'] = BALL 64 | seen_files.add(BALL) 65 | 66 | # we do exactly 2 sync runs 67 | # in the first one the files are uploaded in parallel -- note: no conflict created! 68 | # in the second one either ping or pong grabs and downloads the most recent version of the file 69 | step(2,'first sync') 70 | run_ocsync(d,n=2) 71 | LAST_BALL = md5sum(os.path.join(d,'BALL')) 72 | logger.info('LAST_BALL: %s',LAST_BALL) 73 | 74 | for i in range(3,10): 75 | seen_files.add(LAST_BALL) 76 | step(i,'next sync') 77 | run_ocsync(d,n=1) 78 | BALL = md5sum(os.path.join(d,'BALL')) 79 | logger.info('BALL: %s',BALL) 80 | error_check( BALL == LAST_BALL, "the file is ping-ponging between the clients") 81 | LAST_BALL = BALL 82 | 83 | shared['PING_SEEN_FILES'] = len(seen_files) 84 | 85 | step(90, "Verification if files moved at all") 86 | 87 | conflict_files = get_conflict_files(d) 88 | error_check( len(conflict_files) == 0, "Conflicts found!") 89 | 90 | @add_worker 91 | def pong(step): 92 | 93 | seen_files = set() 94 | 95 | step(1,'initialize') 96 | 97 | d = make_workdir() 98 | shared = reflection.getSharedObject() 99 | 100 | createfile(os.path.join(d,'BALL'),'1',count=1000,bs=filesizeKB) 101 | 102 | BALL = md5sum(os.path.join(d,'BALL')) 103 | logger.info('BALL: %s',BALL) 104 | 105 | shared['PONG_BALL'] = BALL 106 | seen_files.add(BALL) 107 | 108 | step(2,'first sync') 109 | if pongdelay: 110 | logger.info('pong delay %0.3fs',pongdelay) 111 | time.sleep(pongdelay) 112 | 113 | run_ocsync(d,n=2) 114 | LAST_BALL = md5sum(os.path.join(d,'BALL')) 115 | logger.info('LAST_BALL: %s',LAST_BALL) 116 | 117 | for i in range(3,10): 118 | seen_files.add(LAST_BALL) 119 | step(i,'next sync') 120 | run_ocsync(d,n=1) 121 | BALL = md5sum(os.path.join(d,'BALL')) 122 | logger.info('BALL: %s',BALL) 123 | error_check( BALL == LAST_BALL, "the file is ping-ponging between the clients") 124 | LAST_BALL = BALL 125 | 126 | step(90, "Verification if files moved at all") 127 | 128 | PING_SEEN_FILES = shared['PING_SEEN_FILES'] 129 | PONG_SEEN_FILES = len(seen_files) 130 | 131 | logger.info('PING_SEEN_FILES: %d',PING_SEEN_FILES) 132 | logger.info('PONG_SEEN_FILES: %d',PONG_SEEN_FILES) 133 | 134 | 135 | # one client should see exactly one file version and the other one exactly two versions 136 | if not ( (PING_SEEN_FILES==1 and PONG_SEEN_FILES==2) or (PING_SEEN_FILES==2 and PONG_SEEN_FILES==1) ): 137 | 138 | if PING_SEEN_FILES==2 and PONG_SEEN_FILES==2: 139 | error_check(False, "File was pingponging") 140 | else: 141 | if PING_SEEN_FILES==1 or PONG_SEEN_FILES==1: 142 | error_check(False, "File was not transmitted by one or both clients") 143 | if PING_SEEN_FILES>2 or PONG_SEEN_FILES>2: 144 | error_check(False, "Too many file versions -- possible data corruption") 145 | 146 | # FIXME: check if versions have been correctly created on the server 147 | 148 | conflict_files = get_conflict_files(d) 149 | error_check( len(conflict_files) == 0, "Conflicts found!") 150 | -------------------------------------------------------------------------------- /lib/owncloud/test_dirDepth.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | Test uploading a large number of files to a directory and then syncing 5 | 6 | +--------+----------------------------------------------+-------------------------------------+ 7 | | Step | Uploader | Downloader | 8 | | Number | | | 9 | +========+==============================================+=====================================+ 10 | | 2 | Create work dir | Create work dir | 11 | +--------+----------------------------------------------+-------------------------------------+ 12 | | 3 | Create directories and files and upload them | | 13 | +--------+----------------------------------------------+-------------------------------------+ 14 | | 4 | Validate files have been uploaded | | 15 | +--------+----------------------------------------------+-------------------------------------+ 16 | | 5 | | Sync | 17 | +--------+----------------------------------------------+-------------------------------------+ 18 | | 6 | | Validate files have been downloaded | 19 | +--------+----------------------------------------------+-------------------------------------+ 20 | 21 | Data Providers: 22 | test_numFilesToCreate: Number of files to create 23 | test_filesizeKB: Size of file to create in KB 24 | dir_depth: How deep the directory structure should go 25 | dir_depth_style: Defines if the directory layout is flat or hierarchial 26 | 27 | """ 28 | 29 | from smashbox.utilities import * 30 | import re 31 | 32 | filesizeKB = int(config.get('test_filesizeKB', 10)) 33 | numFilesToCreate = config.get('test_numFilesToCreate', 10) 34 | dir_depth = config.get('dir_depth', 5) 35 | style = config.get('dir_depth_style', 'nested') 36 | 37 | testsets = [ 38 | { 39 | 'dir_depth': 5, 40 | 'test_numFilesToCreate': 50, 41 | 'test_filesizeKB': 20, 42 | 'dir_depth_style': 'nested', 43 | }, 44 | { 45 | 'dir_depth': 5, 46 | 'test_numFilesToCreate': 50, 47 | 'test_filesizeKB': 200, 48 | 'dir_depth_style': 'nested', 49 | }, 50 | { 51 | 'dir_depth': 10, 52 | 'test_numFilesToCreate': 5, 53 | 'test_filesizeKB': 2000, 54 | 'dir_depth_style': 'flat' 55 | }, 56 | { 57 | 'dir_depth': 10, 58 | 'test_numFilesToCreate': 5, 59 | 'test_filesizeKB': 2000, 60 | 'dir_depth_style': 'nested' 61 | }, 62 | ] 63 | 64 | 65 | def uploader(step): 66 | 67 | step(2, 'Create workdir') 68 | d = make_workdir() 69 | user_num = get_user_number_from_work_directory(d) 70 | 71 | step(3, 'Create directories and files then sync') 72 | files = [] 73 | 74 | if style == 'flat': 75 | for i in range(dir_depth): 76 | dir_name = os.path.join(d, "%s_%d" % ('upload_dir', i)) 77 | upload_dir = make_workdir(dir_name) 78 | for j in range(0, numFilesToCreate): 79 | upload_name = "%s_%d.dat" % ('TEST_FILE_NEW_USER_SHARE', j) 80 | createfile(os.path.join(upload_dir, upload_name), '0', count=1000, bs=filesizeKB) 81 | files.append(os.path.join(upload_dir, upload_name)[len(d) + 1:]) 82 | else: 83 | dir_name = d 84 | for i in range(dir_depth): 85 | dir_name = os.path.join(dir_name, "%s_%d" % ('upload_dir', i)) 86 | upload_dir = make_workdir(dir_name) 87 | for j in range(0, numFilesToCreate): 88 | upload_name = "%s_%d.dat" % ('TEST_FILE_NEW_USER_SHARE', j) 89 | createfile(os.path.join(upload_dir, upload_name), '0', count=1000, bs=filesizeKB) 90 | files.append(os.path.join(upload_dir, upload_name)[len(d) + 1:]) 91 | 92 | run_ocsync(d, user_num=user_num) 93 | shared = reflection.getSharedObject() 94 | shared['FILES_%i' % user_num] = files 95 | 96 | step(4, 'Uploader verify files are uploaded') 97 | 98 | for f in files: 99 | expect_exists(os.path.join(d, f)) 100 | expect_webdav_exist(f, user_num=user_num) 101 | 102 | step(5, 'Uploader final step') 103 | 104 | 105 | def downloader(step): 106 | 107 | step(2, 'Create workdir') 108 | d = make_workdir() 109 | user_num = get_user_number_from_work_directory(d) 110 | 111 | step(5, 'Sync and validate') 112 | run_ocsync(d, user_num=user_num) 113 | 114 | step(6, 'Downloader validate that all files exist') 115 | shared = reflection.getSharedObject() 116 | files = shared['FILES_%i' % user_num] 117 | 118 | error_check(len(files) == dir_depth * numFilesToCreate, 'Number of files does not match') 119 | 120 | for f in files: 121 | expect_exists(os.path.join(d, f)) 122 | 123 | for u in range(config.oc_number_test_users): 124 | add_worker(uploader, name="uploader%02d" % (u+1)) 125 | add_worker(downloader, name="downloader%02d" % (u+1)) 126 | 127 | 128 | def get_user_number_from_work_directory(work_dir): 129 | """ 130 | :param work_dir: string Path of the directory 131 | /home/user/smashdir/test_uploadFiles-150522-111229/shareeTwo01 132 | :return: integer User number from the last directory name 133 | """ 134 | 135 | work_dir = work_dir[len(config.rundir) + 1:] 136 | user_num = int(re.search(r'\d+', work_dir).group()) 137 | return user_num 138 | -------------------------------------------------------------------------------- /python/smashbox/utilities/version.py: -------------------------------------------------------------------------------- 1 | import operator 2 | 3 | 4 | def version_compare(v1, operator, v2): 5 | """ 6 | The function first replaces _, - and + with a dot . in the version strings and also inserts dots . before and 7 | after any non number so that for example '4.3.2RC1' becomes '4.3.2.RC.1'. Then it compares the parts starting 8 | from left to right. If a part contains special version strings these are handled in the following order: 9 | any string not found in this list < dev < alpha = a < beta = b < RC = rc < # 10 | This way not only versions with different levels like '4.1' and '4.1.2' can be compared but also any version 11 | containing development state. 12 | 13 | :param v1: Version to compare 14 | :param operator: Can be one of <, <=, =, !=, >=, > 15 | :param v2: Version to compare against 16 | :return: Boolean 17 | """ 18 | return __version_compare_tuple(__normalize_version(v1), operator, __normalize_version(v2)) 19 | 20 | 21 | def __version_compare_tuple(t1, compare, t2): 22 | 23 | # Make both versions the same length 24 | if len(t1) < len(t2): 25 | for i in range(len(t1), len(t2)): 26 | t1.append(0) 27 | elif len(t2) < len(t1): 28 | for i in range(len(t2), len(t1)): 29 | t2.append(0) 30 | 31 | if compare == '<': 32 | return operator.lt(t1, t2) 33 | elif compare == '<=': 34 | return operator.le(t1, t2) 35 | elif compare == '=' or compare == '==': 36 | return operator.eq(t1, t2) 37 | elif compare == '!=': 38 | return operator.ne(t1, t2) 39 | elif compare == '>=': 40 | return operator.ge(t1, t2) 41 | elif compare == '>': 42 | return operator.gt(t1, t2) 43 | else: 44 | raise ValueError('Invalid operator') 45 | 46 | 47 | def __normalize_version(version): 48 | v = version.lower() 49 | v = v.replace('-', '.') 50 | v = v.replace('_', '.') 51 | v = v.replace('+', '.') 52 | 53 | fixed_version = '' 54 | last_was_digit = True 55 | for char in v: 56 | if (char.isdigit() or char == '.') and not last_was_digit: 57 | fixed_version += '.' 58 | last_was_digit = True 59 | elif not (char.isdigit() or char == '.') and last_was_digit: 60 | fixed_version += '.' 61 | last_was_digit = False 62 | fixed_version += char 63 | 64 | v = fixed_version 65 | 66 | while '..' in v: 67 | v = v.replace('..', '.') 68 | 69 | if v[:1] == '.': 70 | v = v[1:] 71 | if v[-1:] == '.': 72 | v = v[:-1] 73 | 74 | return map(__prepare_tuple, v.split(".")) 75 | 76 | 77 | def __prepare_tuple(item): 78 | """ 79 | Replaces strings with integers, so we can compare them correctly: 80 | any string not found in this list < dev < alpha = a < beta = b < RC = rc < # 81 | 82 | :param item: 83 | :return: 84 | """ 85 | if item == 'dev': 86 | return -4 87 | if item == 'a' or item == 'alpha': 88 | return -3 89 | if item == 'b' or item == 'beta': 90 | return -2 91 | if item == 'rc': 92 | return -1 93 | if item.isdigit(): 94 | return int(item) 95 | # if item == 'pl' or item == 'p': 96 | # return sys.maxsize 97 | 98 | # Any other string that is no number string 99 | return -5 100 | 101 | 102 | if __name__ == "__main__": 103 | 104 | def assert_version_compare(v1, operator, v2, result): 105 | if version_compare(v1, operator, v2) == result: 106 | print('[PASS] Comparing %s %s %s' % (v1, operator, v2)) 107 | else: 108 | print('[ERROR] Comparing %s %s %s' % (v1, operator, v2)) 109 | 110 | assert_version_compare('2.1.0alpha1', '<', '2.1.0beta1', True) 111 | assert_version_compare('2.1.0alpha1', '<=', '2.1.0beta1', True) 112 | assert_version_compare('2.1.0alpha1', '=', '2.1.0beta1', False) 113 | assert_version_compare('2.1.0alpha1', '>=', '2.1.0beta1', False) 114 | assert_version_compare('2.1.0alpha1', '>', '2.1.0beta1', False) 115 | 116 | assert_version_compare('2.1.0beta1', '<', '2.1.0rc1', True) 117 | assert_version_compare('2.1.0beta1', '<=', '2.1.0rc1', True) 118 | assert_version_compare('2.1.0beta1', '=', '2.1.0rc1', False) 119 | assert_version_compare('2.1.0beta1', '>=', '2.1.0rc1', False) 120 | assert_version_compare('2.1.0beta1', '>', '2.1.0rc1', False) 121 | 122 | assert_version_compare('2.1.0rc1', '<', '2.1.0', True) 123 | assert_version_compare('2.1.0rc1', '<=', '2.1.0', True) 124 | assert_version_compare('2.1.0rc1', '=', '2.1.0', False) 125 | assert_version_compare('2.1.0rc1', '>=', '2.1.0', False) 126 | assert_version_compare('2.1.0rc1', '>', '2.1.0', False) 127 | 128 | assert_version_compare('2.1.0', '<', '2.1.0', False) 129 | assert_version_compare('2.1.0', '<=', '2.1.0', True) 130 | assert_version_compare('2.1.0', '=', '2.1.0', True) 131 | assert_version_compare('2.1.0', '>=', '2.1.0', True) 132 | assert_version_compare('2.1.0', '>', '2.1.0', False) 133 | 134 | assert_version_compare('2.1.0', '<', '2.1', False) 135 | assert_version_compare('2.1.0', '<=', '2.1', True) 136 | assert_version_compare('2.1.0', '=', '2.1', True) 137 | assert_version_compare('2.1.0', '>=', '2.1', True) 138 | assert_version_compare('2.1.0', '>', '2.1', False) 139 | 140 | assert_version_compare('2.1.1', '<', '2.1.0', False) 141 | assert_version_compare('2.1.1', '<=', '2.1.0', False) 142 | assert_version_compare('2.1.1', '=', '2.1.0', False) 143 | assert_version_compare('2.1.1', '>=', '2.1.0', True) 144 | assert_version_compare('2.1.1', '>', '2.1.0', True) 145 | 146 | assert_version_compare('2.1.1', '<', '2.1', False) 147 | assert_version_compare('2.1.1', '<=', '2.1', False) 148 | assert_version_compare('2.1.1', '=', '2.1', False) 149 | assert_version_compare('2.1.1', '>=', '2.1', True) 150 | assert_version_compare('2.1.1', '>', '2.1', True) 151 | 152 | -------------------------------------------------------------------------------- /lib/test_concurrentDirRemove.py: -------------------------------------------------------------------------------- 1 | __doc__ = """ 2 | 3 | This test removes concurrently a directory ('remover' worker) while 4 | files are added to it ('adder' worker) . 5 | 6 | According to Webdav specs, PUT into inexisting path does not create missing directories but returns 409 (Conflict). 7 | 8 | Cernbox/EOS: Hence the expected outcome is that part of the files that was already uploaded gets deleted. 9 | 10 | OwnCloud7? : For PUT which creates the missing directories the expected outcome is that all added files are kept on the server. 11 | 12 | """ 13 | 14 | nfiles = int(config.get('concurrentRemoveDir_nfiles',10)) 15 | filesizeKB = int(config.get('concurrentRemoveDir_filesizeKB',9000)) 16 | delaySeconds = int(config.get('concurrentRemoveDir_delaySeconds',3)) # if delaySeconds > 0 then remover waits; else the adder waits; 17 | 18 | # True => use new webdav endpoint (dav/files) 19 | # False => use old webdav endpoint (webdav) 20 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 21 | 22 | testsets = [ 23 | {'concurrentRemoveDir_nfiles':3, 24 | 'concurrentRemoveDir_filesizeKB':10000, 25 | 'concurrentRemoveDir_delaySeconds':5, 26 | 'use_new_dav_endpoint': True }, # removing the directory while a large file is chunk-uploaded 27 | {'concurrentRemoveDir_nfiles':3, 28 | 'concurrentRemoveDir_filesizeKB':10000, 29 | 'concurrentRemoveDir_delaySeconds':5, 30 | 'use_new_dav_endpoint': False }, # removing the directory while a large file is chunk-uploaded 31 | 32 | {'concurrentRemoveDir_nfiles':40, 33 | 'concurrentRemoveDir_filesizeKB':9000, 34 | 'concurrentRemoveDir_delaySeconds':5, 35 | 'use_new_dav_endpoint': True }, # removing the directory while lots of smaller files are uploaded 36 | {'concurrentRemoveDir_nfiles': 40, 37 | 'concurrentRemoveDir_filesizeKB': 9000, 38 | 'concurrentRemoveDir_delaySeconds': 5, 39 | 'use_new_dav_endpoint': False}, # removing the directory while lots of smaller files are uploaded 40 | 41 | {'concurrentRemoveDir_nfiles':5, 42 | 'concurrentRemoveDir_filesizeKB':15000, 43 | 'concurrentRemoveDir_delaySeconds':-5, 44 | 'use_new_dav_endpoint': True }, # removing the directory before files are uploaded 45 | {'concurrentRemoveDir_nfiles':5, 46 | 'concurrentRemoveDir_filesizeKB':15000, 47 | 'concurrentRemoveDir_delaySeconds':-5, 48 | 'use_new_dav_endpoint': False }, # removing the directory before files are uploaded 49 | ] 50 | 51 | import time 52 | import tempfile 53 | 54 | from smashbox.utilities import * 55 | from smashbox.utilities.hash_files import * 56 | 57 | def finish_if_not_capable(): 58 | # Finish the test if some of the prerequisites for this test are not satisfied 59 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 60 | #Dont test for <= 9.1 with new endpoint, since it is not supported 61 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 62 | return True 63 | return False 64 | 65 | @add_worker 66 | def creator(step): 67 | if finish_if_not_capable(): 68 | return 69 | 70 | reset_owncloud_account() 71 | reset_rundir() 72 | 73 | step(1,'upload empty subdirectory') 74 | d = make_workdir() 75 | d2 = os.path.join(d,'subdir') 76 | mkdir(d2) 77 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 78 | 79 | step(5,'final check') 80 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 81 | final_check(d) 82 | 83 | 84 | @add_worker 85 | def adder(step): 86 | if finish_if_not_capable(): 87 | return 88 | 89 | step(2,'sync the empty directory created by the creator') 90 | d = make_workdir() 91 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 92 | 93 | step(3,'locally create content in the subdirectory') 94 | d2 = os.path.join(d,'subdir') 95 | 96 | for i in range(nfiles): 97 | create_hashfile(d2, size=filesizeKB*1000) #createfile_zero(os.path.join(d2,"test.%02d"%i),count=filesizeKB, bs=1000) 98 | 99 | step(4,'sync the added files in parallel') 100 | if delaySeconds<0: 101 | sleep(-delaySeconds) 102 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 103 | 104 | step(5,'final check') 105 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 106 | 107 | 108 | @add_worker 109 | def remover(step): 110 | if finish_if_not_capable(): 111 | return 112 | 113 | step(2,'sync the empty directory created by the creator') 114 | d = make_workdir() 115 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 116 | 117 | step(3,'locally remove subdir') 118 | d2 = os.path.join(d,'subdir') 119 | remove_tree(d2) 120 | 121 | step(4,'sync the removed subdir in parallel') 122 | if delaySeconds>0: 123 | sleep(delaySeconds) 124 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 125 | 126 | step(5,'final check') 127 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 128 | final_check(d) 129 | 130 | @add_worker 131 | def checker(step): 132 | if finish_if_not_capable(): 133 | return 134 | 135 | step(5,'sync the final state of the repository into a fresh local folder') 136 | d = make_workdir() 137 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 138 | 139 | final_check(d) 140 | 141 | 142 | def final_check(d): 143 | 144 | list_files(d,recursive=True) 145 | 146 | d2 = os.path.join(d,'subdir') 147 | 148 | logger.info('final output: %s',d2) 149 | 150 | all_files,analysed_files,bad_files = analyse_hashfiles(d2) 151 | 152 | error_check(bad_files == 0,'%s corrupted files in %s'%(bad_files,d2)) 153 | 154 | #it is hard to determine how many files should be present with 409 Conflict behaviour 155 | #error_check(analysed_files == nfiles,"not all files are present (%d/%d)"%(analysed_files,nfiles)) # FIXME: well, there may be other files - we don't check that yet 156 | 157 | 158 | #runcmd('find %s'%d) 159 | 160 | #log('content of /subdir as reported by webdav') 161 | #list_webdav_propfind('subdir') 162 | 163 | 164 | -------------------------------------------------------------------------------- /lib/owncloud/test_dirBecomesFile.py: -------------------------------------------------------------------------------- 1 | from owncloud import HTTPResponseError 2 | 3 | __doc__ = """ 4 | 5 | Test syncing when a directory turns into a file or back. 6 | 7 | """ 8 | 9 | from smashbox.utilities import * 10 | from shutil import rmtree 11 | 12 | def make_subdir(d, sub): 13 | return make_workdir(os.path.join(d, sub)) 14 | 15 | def expect_webdav_isfile(path, user_num=None): 16 | exitcode,stdout,stderr = runcmd('curl -s -k %s -XPROPFIND %s | xmllint --format -'%(config.get('curl_opts',''),oc_webdav_url(remote_folder=path, user_num=user_num))) 17 | error_check("NotFound" not in stdout, "Remote path %s does not exist" % path) 18 | error_check("d:collection" not in stdout, "Remote path %s is not a file" % path) 19 | 20 | @add_worker 21 | def dir_to_file(step): 22 | 23 | if compare_client_version('2.1.0', '<='): 24 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 25 | return 26 | 27 | step(1, 'Create a folder and sync it') 28 | 29 | d = make_workdir() 30 | folder1 = make_subdir(d, 'folder1') 31 | folder2 = make_subdir(d, 'folder2') 32 | 33 | def make_folder(name): 34 | folder = make_subdir(folder1, name) 35 | sub_del = make_subdir(folder, 'sub_del') 36 | sub_move = make_subdir(folder, 'sub_move') 37 | createfile(os.path.join(folder, 'file-delete.txt'), '0', count=1000, bs=50) 38 | createfile(os.path.join(folder, 'file-move.txt'), '1', count=1000, bs=50) 39 | createfile(os.path.join(sub_del, 'file-sub-del.txt'), '2', count=1000, bs=50) 40 | createfile(os.path.join(sub_move, 'file-sub-move.txt'), '3', count=1000, bs=50) 41 | 42 | make_folder('dirtofile') 43 | make_folder('dirtofile2') 44 | 45 | # this will later replace dirtofile2 46 | createfile(os.path.join(folder1, 'dirtofile2-move'), '4', count=1000, bs=50) 47 | # and later this will become dirtofile2 48 | dirtofile2move = make_subdir(folder1, 'dirtofile2-move2') 49 | createfile(os.path.join(folder1, 'dirtofile2-move2', 'foo.txt'), '5', count=1000, bs=50) 50 | 51 | run_ocsync(folder1) 52 | # sanity check only 53 | expect_webdav_exist('dirtofile/file-delete.txt') 54 | expect_webdav_exist('dirtofile2/file-delete.txt') 55 | expect_webdav_exist('dirtofile2-move') 56 | 57 | # at this point, both client and server have 'dirtofile' folders 58 | 59 | 60 | step(2, 'Turn the folder into a file locally and propagate to the server') 61 | # This tests folder->file propagating to the server 62 | 63 | # we do this by syncing to a different folder, adjusting, syncing up again 64 | run_ocsync(folder2) 65 | mv(os.path.join(folder2, 'dirtofile', 'file-move.txt'), os.path.join(folder2, 'file-move.txt')) 66 | mv(os.path.join(folder2, 'dirtofile', 'sub_move'), os.path.join(folder2, 'sub_move')) 67 | rmtree(os.path.join(folder2, 'dirtofile')) 68 | createfile(os.path.join(folder2, 'dirtofile'), 'N', count=1000, bs=50) 69 | 70 | mv(os.path.join(folder2, 'dirtofile2', 'file-move.txt'), os.path.join(folder2, 'file-move2.txt')) 71 | mv(os.path.join(folder2, 'dirtofile2', 'sub_move'), os.path.join(folder2, 'sub_move2')) 72 | rmtree(os.path.join(folder2, 'dirtofile2')) 73 | mv(os.path.join(folder2, 'dirtofile2-move'), os.path.join(folder2, 'dirtofile2')) 74 | 75 | run_ocsync(folder2) 76 | 77 | error_check(os.path.isfile(os.path.join(folder2, 'dirtofile')), "expected 'dirtofile' to be a file") 78 | expect_webdav_isfile('dirtofile') 79 | expect_webdav_exist('file-move.txt') 80 | expect_webdav_exist('sub_move') 81 | error_check(os.path.isfile(os.path.join(folder2, 'dirtofile2')), "expected 'dirtofile2' to be a file") 82 | expect_webdav_isfile('dirtofile2') 83 | expect_webdav_exist('file-move2.txt') 84 | expect_webdav_exist('sub_move2') 85 | expect_webdav_does_not_exist('dirtofile2-move') 86 | 87 | 88 | step(3, 'Sync the folder that became a file into the old working tree') 89 | # This tests folder->file propagating from the server 90 | 91 | run_ocsync(folder1) 92 | 93 | # server is unchanged 94 | expect_webdav_isfile('dirtofile') 95 | expect_webdav_isfile('dirtofile2') 96 | 97 | # client has the files too 98 | expect_exists(os.path.join(folder1, 'dirtofile')) 99 | expect_exists(os.path.join(folder1, 'file-move.txt')) 100 | expect_exists(os.path.join(folder1, 'sub_move/file-sub-move.txt')) 101 | error_check(os.path.isfile(os.path.join(folder1, 'dirtofile')), "'dirtofile' didn't become a file") 102 | expect_exists(os.path.join(folder1, 'dirtofile2')) 103 | expect_exists(os.path.join(folder1, 'file-move2.txt')) 104 | expect_exists(os.path.join(folder1, 'sub_move2/file-sub-move.txt')) 105 | error_check(os.path.isfile(os.path.join(folder1, 'dirtofile2')), "'dirtofile2' didn't become a file") 106 | expect_does_not_exist(os.path.join(folder1, 'dirtofile2-move')) 107 | 108 | # at this point, both client and server have a 'dirtofile' file 109 | 110 | 111 | step(4, 'Turn the file into a folder locally and propagate to the server') 112 | # This tests file->folder propagating to the server 113 | 114 | # we do this by syncing to a different folder, adjusting, syncing up again 115 | run_ocsync(folder2) 116 | 117 | delete_file(os.path.join(folder2, 'dirtofile')) 118 | mkdir(os.path.join(folder2, 'dirtofile')) 119 | createfile(os.path.join(folder2, 'dirtofile', 'newfile.txt'), 'M', count=1000, bs=50) 120 | 121 | delete_file(os.path.join(folder2, 'dirtofile2')) 122 | mv(os.path.join(folder2, 'dirtofile2-move2'), os.path.join(folder2, 'dirtofile2')) 123 | 124 | run_ocsync(folder2) 125 | 126 | error_check(os.path.isdir(os.path.join(folder2, 'dirtofile')), "expected 'dirtofile' to be a folder") 127 | expect_webdav_exist('dirtofile/newfile.txt') 128 | error_check(os.path.isdir(os.path.join(folder2, 'dirtofile2')), "expected 'dirtofile2' to be a folder") 129 | expect_webdav_exist('dirtofile2/foo.txt') 130 | 131 | 132 | step(5, 'Sync the file that became a folder into the old working tree') 133 | # This tests file->folder propagating from the server 134 | 135 | run_ocsync(folder1) 136 | 137 | # server is unchanged 138 | expect_webdav_exist('dirtofile/newfile.txt') 139 | expect_webdav_exist('dirtofile2/foo.txt') 140 | 141 | # client has the file too, implying that dirtofile is a folder 142 | expect_exists(os.path.join(folder1, 'dirtofile/newfile.txt')) 143 | expect_exists(os.path.join(folder1, 'dirtofile2/foo.txt')) 144 | -------------------------------------------------------------------------------- /lib/oc-tests/test_reshareDir.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | Test basic directory re-sharing between users. This test is a sub-test of 5 | test_shareDir.py 6 | 7 | +-----------+-----------------+------------------+------------------+--------------+ 8 | | Step | Sharer | Sharee One | Sharee Two | Admin | 9 | | Number | | (not in group) | (in group) | | 10 | +===========+======================+==================+============================| 11 | | 2 | create work dir | create work dir | create work dir | | 12 | +-----------+-----------------+------------------+------------------+--------------+ 13 | | 3 | Create test | | | | 14 | | | files and dir | | | | 15 | +-----------+-----------------+------------------+------------------+--------------+ 16 | | 4 | Share directory | | | | 17 | +-----------+-----------------+------------------+------------------+--------------+ 18 | | 5 | | Syncs and | | | 19 | | | | validates | | | 20 | | | | directory exists | | | 21 | +-----------+-----------------+------------------+------------------+--------------+ 22 | | 6 | | Shares a file | | | 23 | | | | with Sharee two | | | 24 | +-----------+-----------------+------------------+------------------+--------------+ 25 | | 7 | | | Syncs and | | 26 | | | | | validates file | | 27 | | | | | present | | 28 | +-----------+-----------------+------------------+------------------+--------------+ 29 | | 8 | Final step | Final step | Final Step | | 30 | +-----------+-----------------+------------------+------------------+--------------+ 31 | 32 | 33 | Data Providers: 34 | 35 | """ 36 | 37 | from smashbox.utilities import * 38 | import glob 39 | 40 | OCS_PERMISSION_READ = 1 41 | OCS_PERMISSION_UPDATE = 2 42 | OCS_PERMISSION_CREATE = 4 43 | OCS_PERMISSION_DELETE = 8 44 | OCS_PERMISSION_SHARE = 16 45 | OCS_PERMISSION_ALL = 31 46 | 47 | filesizeKB = int(config.get('share_filesizeKB',10)) 48 | 49 | # True => use new webdav endpoint (dav/files) 50 | # False => use old webdav endpoint (webdav) 51 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 52 | 53 | testsets = [ 54 | { 55 | 'use_new_dav_endpoint':False 56 | }, 57 | { 58 | 'use_new_dav_endpoint':True 59 | } 60 | ] 61 | 62 | def finish_if_not_capable(): 63 | # Finish the test if some of the prerequisites for this test are not satisfied 64 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 65 | #Dont test for <= 9.1 with new endpoint, since it is not supported 66 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 67 | return True 68 | return False 69 | 70 | @add_worker 71 | def setup(step): 72 | if finish_if_not_capable(): 73 | return 74 | 75 | step (1, 'create test users') 76 | reset_owncloud_account(num_test_users=config.oc_number_test_users) 77 | check_users(config.oc_number_test_users) 78 | 79 | reset_rundir() 80 | 81 | @add_worker 82 | def sharer(step): 83 | if finish_if_not_capable(): 84 | return 85 | 86 | step (2, 'Create workdir') 87 | d = make_workdir() 88 | 89 | step (3, 'Create initial test files and directories') 90 | 91 | procName = reflection.getProcessName() 92 | dirName = "%s/%s"%(procName, 'localShareDir') 93 | localDir = make_workdir(dirName) 94 | 95 | createfile(os.path.join(localDir,'TEST_FILE_USER_SHARE.dat'),'0',count=1000,bs=filesizeKB) 96 | createfile(os.path.join(localDir,'TEST_FILE_USER_RESHARE.dat'),'0',count=1000,bs=filesizeKB) 97 | createfile(os.path.join(localDir,'TEST_FILE_MODIFIED_USER_SHARE.dat'),'0',count=1000,bs=filesizeKB) 98 | 99 | shared = reflection.getSharedObject() 100 | shared['md5_sharer'] = md5sum(os.path.join(localDir,'TEST_FILE_MODIFIED_USER_SHARE.dat')) 101 | logger.info('md5_sharer: %s',shared['md5_sharer']) 102 | 103 | list_files(d) 104 | run_ocsync(d,user_num=1, use_new_dav_endpoint=use_new_dav_endpoint) 105 | list_files(d) 106 | 107 | step (4, 'Sharer shares directory') 108 | 109 | user1 = "%s%i"%(config.oc_account_name, 1) 110 | user2 = "%s%i"%(config.oc_account_name, 2) 111 | 112 | kwargs = {'perms': OCS_PERMISSION_ALL} 113 | shared['SHARE_LOCAL_DIR'] = share_file_with_user ('localShareDir', user1, user2, **kwargs) 114 | 115 | step (8, 'Sharer Final step') 116 | 117 | @add_worker 118 | def shareeOne(step): 119 | if finish_if_not_capable(): 120 | return 121 | 122 | step (2, 'Sharee One creates workdir') 123 | d = make_workdir() 124 | 125 | step (5, 'Sharee One syncs and validates directory exist') 126 | 127 | run_ocsync(d,user_num=2, use_new_dav_endpoint=use_new_dav_endpoint) 128 | list_files(d) 129 | 130 | sharedDir = os.path.join(d,'localShareDir') 131 | logger.info ('Checking that %s is present in local directory for Sharee One', sharedDir) 132 | error_check(os.path.exists(sharedDir), "Directory %s should exist" %sharedDir) 133 | 134 | step (6, 'Sharee One share files with user 3') 135 | 136 | user2 = "%s%i"%(config.oc_account_name, 2) 137 | user3 = "%s%i"%(config.oc_account_name, 3) 138 | kwargs = {'perms': OCS_PERMISSION_ALL} 139 | share_file_with_user ('localShareDir/TEST_FILE_USER_RESHARE.dat', user2, user3, **kwargs) 140 | 141 | step (8, 'Sharee One final step') 142 | 143 | @add_worker 144 | def shareeTwo(step): 145 | if finish_if_not_capable(): 146 | return 147 | 148 | step (2, 'Sharee Two creates workdir') 149 | d = make_workdir() 150 | 151 | procName = reflection.getProcessName() 152 | dirName = "%s/%s"%(procName, 'localShareDir') 153 | localDir = make_workdir(dirName) 154 | 155 | step (7, 'Sharee two validates share file') 156 | 157 | run_ocsync(d,user_num=3, use_new_dav_endpoint=use_new_dav_endpoint) 158 | list_files(d) 159 | 160 | sharedFile = os.path.join(d,'TEST_FILE_USER_RESHARE.dat') 161 | logger.info ('Checking that %s is present in local directory for Sharee Two', sharedFile) 162 | error_check(os.path.exists(sharedFile), "File %s should exist" %sharedFile) 163 | 164 | step (8, 'Sharee Two final step') 165 | 166 | 167 | -------------------------------------------------------------------------------- /lib/test_deltamove.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import tempfile 4 | 5 | 6 | __doc__ = """ Check that changes to file are also propagated when file is moved 7 | 8 | +-----------+-----------------+------------------+ 9 | | Step | Client1 | Client2 | 10 | +===========+======================+=============+ 11 | | 2 | create ref file | create work dir | 12 | | | and workdir | | 13 | +-----------+-----------------+------------------+ 14 | | 3 | add files and | | 15 | | | sync | | 16 | +-----------+-----------------+------------------+ 17 | | 4 | | sync down | 18 | | | | and check | 19 | +-----------+-----------------+------------------+ 20 | | 5 | mod files and | | 21 | | | sync | | 22 | +-----------+-----------------+------------------+ 23 | | 6 | | sync down | 24 | | | | and check | 25 | +-----------+-----------------+------------------+ 26 | | 7 | | move files | 27 | | | | and modify | 28 | +-----------+-----------------+------------------+ 29 | | 8 | sync files and | | 30 | | | check | | 31 | +-----------+-----------------+------------------+ 32 | | 9 | check checksums | check checksums | 33 | +-----------+-----------------+------------------+ 34 | 35 | """ 36 | 37 | from smashbox.utilities import * 38 | from smashbox.utilities.hash_files import * 39 | from smashbox.utilities.monitoring import commit_to_monitoring 40 | 41 | nfiles = int(config.get('deltamove_nfiles',10)) 42 | filesize = config.get('deltamove_filesize',1000) 43 | 44 | if type(filesize) is type(''): 45 | filesize = eval(filesize) 46 | 47 | # True => use new webdav endpoint (dav/files) 48 | # False => use old webdav endpoint (webdav) 49 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 50 | 51 | testsets = [ 52 | { 'deltamove_filesize': OWNCLOUD_CHUNK_SIZE(0.01), 53 | 'deltamove_nfiles':2, 54 | 'use_new_dav_endpoint':True 55 | }, 56 | { 'deltamove_filesize': OWNCLOUD_CHUNK_SIZE(3.5), 57 | 'deltamove_nfiles':2, 58 | 'use_new_dav_endpoint':True 59 | }, 60 | 61 | ] 62 | 63 | def finish_if_not_capable(): 64 | # Finish the test if some of the prerequisites for this test are not satisfied 65 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 66 | #Dont test for <= 9.1 with new endpoint, since it is not supported 67 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 68 | return True 69 | return False 70 | 71 | @add_worker 72 | def worker0(step): 73 | if finish_if_not_capable(): 74 | return 75 | 76 | # do not cleanup server files from previous run 77 | reset_owncloud_account() 78 | 79 | # cleanup all local files for the test 80 | reset_rundir() 81 | 82 | step(1,'Preparation') 83 | d = make_workdir() 84 | 85 | # create the test file 86 | createfile(os.path.join(d,"TEST_FILE_MODIFIED.dat"),'0',count=1,bs=filesize) 87 | modify_file(os.path.join(d,"TEST_FILE_MODIFIED.dat"),'1',count=1,bs=1000) 88 | modify_file(os.path.join(d,"TEST_FILE_MODIFIED.dat"),'2',count=1,bs=1000) 89 | checksum_reference = md5sum(os.path.join(d,"TEST_FILE_MODIFIED.dat")) 90 | 91 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 92 | 93 | k0 = count_files(d) 94 | 95 | step(3,'Add %s files and check if we still have k1+nfiles after resync'%nfiles) 96 | 97 | logger.log(35,"Timestamp %f Files %d Size %d",time.time(),nfiles,filesize) 98 | 99 | for i in range(nfiles): 100 | createfile(os.path.join(d,"TEST_FILE_MODIFIED_%d.dat"%(i)),'0',count=1,bs=filesize) 101 | 102 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 103 | 104 | k1 = count_files(d) 105 | 106 | error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 107 | 108 | step(5,"Modify files") 109 | 110 | for i in range(nfiles): 111 | modify_file(os.path.join(d,"TEST_FILE_MODIFIED_%d.dat"%(i)),'1',count=1,bs=1000) 112 | 113 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 114 | 115 | k2 = count_files(d) 116 | 117 | error_check(k2-k0==nfiles,'Expecting to have %d files: see k2=%d k0=%d'%(nfiles,k2,k0)) 118 | 119 | step(8,'Check moved and modified') 120 | 121 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 122 | 123 | k3 = count_files(d) 124 | error_check(k3-k0==nfiles,'Expecting to have %d files: see k3=%d k0=%d'%(nfiles,k3,k0)) 125 | 126 | step(9, "Final report") 127 | 128 | for i in range(nfiles): 129 | checksum = md5sum(os.path.join(d,"TEST_FILE_MODIFIED_MOVED_%d.dat"%(i))) 130 | error_check(checksum==checksum_reference,'Expecting to have equal checksums, got %s instead of %s'%(checksum,checksum_reference)) 131 | 132 | logger.info('SUCCESS: %d files found',k2) 133 | 134 | @add_worker 135 | def worker1(step): 136 | if finish_if_not_capable(): 137 | return 138 | 139 | step(2,'Preparation') 140 | d = make_workdir() 141 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 142 | checksum_reference = md5sum(os.path.join(d,"TEST_FILE_MODIFIED.dat")) 143 | k0 = count_files(d) 144 | 145 | step(4,'Resync and check files added by worker0') 146 | 147 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 148 | 149 | k1 = count_files(d) 150 | 151 | error_check(k1-k0==nfiles,'Expecting to have %d files more: see k1=%d k0=%d'%(nfiles,k1,k0)) 152 | 153 | step(6,'Resync and check files modified by worker0') 154 | 155 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 156 | 157 | k2 = count_files(d) 158 | 159 | error_check(k2-k0==nfiles,'Expecting to have %d files: see k2=%d k0=%d'%(nfiles,k2,k0)) 160 | 161 | step(7,'Move and modify') 162 | 163 | for i in range(nfiles): 164 | mv(os.path.join(d,"TEST_FILE_MODIFIED_%d.dat"%(i)), os.path.join(d,"TEST_FILE_MODIFIED_MOVED_%d.dat"%(i))) 165 | 166 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 167 | 168 | for i in range(nfiles): 169 | modify_file(os.path.join(d,"TEST_FILE_MODIFIED_MOVED_%d.dat"%(i)),'2',count=1,bs=1000) 170 | 171 | run_ocsync(d, use_new_dav_endpoint=use_new_dav_endpoint) 172 | 173 | k3 = count_files(d) 174 | 175 | error_check(k3-k0==nfiles,'Expecting to have %d files: see k3=%d k0=%d'%(nfiles,k3,k0)) 176 | 177 | step(8,"Final report") 178 | for i in range(nfiles): 179 | checksum = md5sum(os.path.join(d,"TEST_FILE_MODIFIED_MOVED_%d.dat"%(i))) 180 | error_check(checksum==checksum_reference,'Expecting to have equal checksums, got %s instead of %s'%(checksum,checksum_reference)) 181 | 182 | 183 | 184 | 185 | 186 | -------------------------------------------------------------------------------- /etc/smashbox.conf.template-owncloud: -------------------------------------------------------------------------------- 1 | # 2 | # The _open_SmashBox Project. 3 | # 4 | # Author: Jakub T. Moscicki, CERN, 2013 5 | # License: AGPL 6 | # 7 | # this is the main config file template: copy to smashbox.conf and adjust the settings 8 | # 9 | # this template should work without changes if you are running your tests directly on the owncloud application server 10 | # 11 | 12 | # this is the top directory where all local working files are kept (test working direcotires, test logs, test data, temporary filesets, ..) 13 | smashdir = "~/smashdir" 14 | 15 | # name of the account used for testing 16 | # if None then account name is chosen automatically (based on the test name) 17 | oc_account_name=None 18 | 19 | # default number of users for tests involving multiple users (user number is appended to the oc_account_name) 20 | # this only applies to the tests involving multiple users 21 | oc_number_test_users=3 22 | 23 | # name of the group used for testing 24 | oc_group_name=None 25 | 26 | # default number of groups for tests involving multiple groups (group number is appended to the oc_group_name) 27 | # this only applies to the tests involving multiple groups 28 | oc_number_test_groups=1 29 | 30 | # password for test accounts: all test account will have the same password 31 | # if not set then it's an error 32 | oc_account_password="demo" 33 | 34 | # owncloud test server 35 | # if left blank or "localhost" then the real hostname of the localhost will be set 36 | oc_server = '' 37 | 38 | 39 | # root of the owncloud installation as visible in the URL 40 | oc_root = 'owncloud' 41 | 42 | # webdav endpoint URI within the oc_server 43 | import os.path 44 | oc_webdav_endpoint = os.path.join(oc_root,'remote.php/webdav') # standard owncloud server 45 | 46 | # target folder on the server (this may not be compatible with all tests) 47 | oc_server_folder = '' 48 | 49 | # should we use protocols with SSL (https, ownclouds) 50 | oc_ssl_enabled = True 51 | 52 | # how to invoke shell commands on the server 53 | # for localhost there is no problem - leave it blank 54 | # for remote host it may be set like this: "ssh -t -l root $oc_server" 55 | # note: configure ssh for passwordless login 56 | # note: -t option is to make it possible to run sudo 57 | oc_server_shell_cmd = "" 58 | 59 | # Data directory on the owncloud server. 60 | # 61 | oc_server_datadirectory = os.path.join('/var/www/html',oc_root, 'data') 62 | 63 | # a path to server side tools (create_user.php, ...) 64 | # 65 | # it may be specified as relative path "dir" and then resolves to 66 | # /dir where is the top-level of of the tree 67 | # containing THIS configuration file 68 | # 69 | 70 | oc_server_tools_path = "server-tools" 71 | 72 | # a path to ocsync command with options 73 | # this path should work for all client hosts 74 | # 75 | # it may be specified as relative path "dir" and then resolves to 76 | # /dir where is the top-level of of the tree 77 | # containing THIS configuration file 78 | # 79 | oc_sync_cmd = "client/build/mirall/bin/owncloudcmd --trust" 80 | 81 | # number of times to repeat ocsync run every time 82 | oc_sync_repeat = 1 83 | 84 | #################################### 85 | 86 | # unique identifier of your test run 87 | # if None then the runid is chosen automatically (and stored in this variable) 88 | runid = None 89 | 90 | # if True then the local working directory path will have the runid added to it automatically 91 | workdir_runid_enabled=False 92 | 93 | # if True then the runid will be part of the oc_account_name automatically 94 | oc_account_runid_enabled=False 95 | 96 | #################################### 97 | 98 | # this defines the default account cleanup procedure 99 | # - "delete": delete account if exists and then create a new account with the same name 100 | # - "keep": don't delete existing account but create one if needed 101 | # 102 | # these are not implemeted yet: 103 | # - "sync_delete": delete all files via a sync run 104 | # - "webdav_delete": delete all files via webdav DELETE request 105 | # - "filesystem_delete": delete all files directly on the server's filesystem 106 | oc_account_reset_procedure = "delete" 107 | 108 | # this defined the default local run directory reset procedure 109 | # - "delete": delete everything in the local run directory prior to running the test 110 | # - "keep": keep all files (from the previous run) 111 | rundir_reset_procedure = "delete" 112 | 113 | web_user = "www-data" 114 | 115 | oc_admin_user = "at_admin" 116 | oc_admin_password = "admin" 117 | 118 | # cleanup imported namespaces 119 | del os 120 | 121 | # Verbosity of curl client. 122 | # If none then verbosity is on when smashbox run in --debug mode. 123 | # set it to True or False to override 124 | # 125 | pycurl_verbose = None 126 | 127 | # scp port to be used in scp commands, used primarily when copying over the server log file 128 | scp_port = 22 129 | 130 | # user that can r+w the owncloud.log file (needs to be configured for passwordless login) 131 | oc_server_log_user = "www-data" 132 | 133 | # 134 | # Reset the server log file and verify that no exceptions and other known errors have been logged 135 | # 136 | oc_check_server_log = False 137 | 138 | # 139 | # Reset the diagnostic log file and use diagnostics for assertions 140 | # 141 | oc_check_diagnostic_log = False 142 | 143 | from collections import OrderedDict 144 | _configgen = OrderedDict([('KeyRemoverProcessor', 145 | {'keylist': ('_configgen', 'oc_server', 'oc_ssl_enabled', 146 | 'oc_admin_user', 'oc_admin_password', 147 | 'oc_root', 'oc_webdav_endpoint', 'oc_server_shell_cmd', 148 | 'oc_sync_cmd', 'scp_port')}), 149 | ('OverwritterProcessor', 150 | {'dict_to_merge': {}}), 151 | ('RequiredKeysProcessor', 152 | {'keylist': [ 153 | {'name': 'oc_server', 'help_text': 'ip or hostname of the server where owncloud is located, including the port, such as "10.20.30.40:8080"'}, 154 | {'name': 'oc_ssl_enabled', 'type': 'bool', 'default': False, 'help_text': 'if you access to the server through https, set this to True'}, 155 | {'name': 'oc_root', 'help_text': 'the path for the url to be added after the server. To access to "http://server.com/owncloud" use "owncloud", leave it empty if you want to access to "http://server.com/"'}, 156 | {'name': 'oc_webdav_endpoint', 'help_text': 'the path for the webdav endpoint. If the webdav endpoint is in "http://server.com/owncloud/remote.php/webdav" use "owncloud/remote.php/webdav"', 'default': 'remote.php/webdav'}, 157 | {'name': 'oc_admin_user', 'default':'admin'}, 158 | {'name': 'oc_admin_password', 'default': 'Password'}, 159 | {'name': 'oc_server_shell_cmd', 'help_text': 'ssh command to connect to the server such as "ssh -t -l root " (include the server). Leave it empty if the server is localhost'}, 160 | {'name': 'scp_port', 'type': 'int', 'default': 22, 'help_text': 'port for scp commands accessing the owncloud server'}, 161 | {'name': 'oc_sync_cmd', 'default': '/usr/bin/owncloudcmd --trust', 'help_text': 'owncloudcmd command. Use the absolute path to the app and any required option'}, 162 | ], 163 | 'ask': True}), 164 | ('SortProcessor', None)]) 165 | del OrderedDict 166 | -------------------------------------------------------------------------------- /lib/owncloud/test_locking.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from smashbox.owncloudorg.locking import * 4 | from smashbox.utilities import * 5 | import os 6 | import signal 7 | 8 | __doc__ = """ 9 | 10 | Test locking enforcement 11 | +------+------------------------------------+ 12 | | Step | User | 13 | +------+------------------------------------+ 14 | | 2 | Enable QA testing app | 15 | | 3 | Create dir/subdir/ | 16 | | 4 | Populate locks | 17 | | 5 | Try to upload dir/subdir/file2.dat | 18 | | 6 | Remove locks | 19 | | 7 | Upload dir/subdir/file2.dat | 20 | +------+------------------------------------+ 21 | 22 | """ 23 | 24 | 25 | DIR_NAME = 'dir' 26 | SUBDIR_NAME = os.path.join(DIR_NAME, 'subdir') 27 | 28 | testsets = [ 29 | { 30 | 'locks': [ 31 | { 32 | 'lock': LockProvider.LOCK_EXCLUSIVE, 33 | 'path': DIR_NAME 34 | } 35 | ], 36 | 'can_upload': False 37 | }, 38 | { 39 | 'locks': [ 40 | { 41 | 'lock': LockProvider.LOCK_SHARED, 42 | 'path': DIR_NAME 43 | } 44 | ], 45 | 'can_upload': True 46 | }, 47 | { 48 | 'locks': [ 49 | { 50 | 'lock': LockProvider.LOCK_EXCLUSIVE, 51 | 'path': SUBDIR_NAME 52 | } 53 | ], 54 | 'can_upload': False 55 | }, 56 | { 57 | 'locks': [ 58 | { 59 | 'lock': LockProvider.LOCK_SHARED, 60 | 'path': SUBDIR_NAME 61 | } 62 | ], 63 | 'can_upload': True 64 | }, 65 | { 66 | 'locks': [ 67 | { 68 | 'lock': LockProvider.LOCK_EXCLUSIVE, 69 | 'path': DIR_NAME 70 | }, 71 | { 72 | 'lock': LockProvider.LOCK_SHARED, 73 | 'path': SUBDIR_NAME 74 | } 75 | ], 76 | 'can_upload': False 77 | }, 78 | { 79 | 'locks': [ 80 | { 81 | 'lock': LockProvider.LOCK_SHARED, 82 | 'path': DIR_NAME 83 | }, 84 | { 85 | 'lock': LockProvider.LOCK_EXCLUSIVE, 86 | 'path': SUBDIR_NAME 87 | } 88 | ], 89 | 'can_upload': False 90 | }, 91 | { 92 | 'locks': [ 93 | { 94 | 'lock': LockProvider.LOCK_SHARED, 95 | 'path': DIR_NAME 96 | }, 97 | { 98 | 'lock': LockProvider.LOCK_SHARED, 99 | 'path': SUBDIR_NAME 100 | } 101 | ], 102 | 'can_upload': True 103 | } 104 | ] 105 | 106 | use_locks = config.get('locks', testsets[0]['locks']) 107 | can_upload = config.get('can_upload', testsets[0]['can_upload']) 108 | original_cmd = config.oc_sync_cmd 109 | 110 | 111 | @add_worker 112 | def owner_worker(step): 113 | 114 | if compare_client_version('2.1.1', '<='): 115 | # The client has a bug with permissions of folders on the first sync before 2.1.2 116 | logger.warning('Skipping test, because the client version is known to behave incorrectly') 117 | return 118 | 119 | if compare_oc_version('9.0', '<='): 120 | # The server has no fake locking support 121 | logger.warning('Skipping test, because the server has no fake locking support') 122 | return 123 | 124 | oc_api = get_oc_api() 125 | oc_api.login(config.oc_admin_user, config.oc_admin_password) 126 | lock_provider = LockProvider(oc_api) 127 | lock_provider.enable_testing_app() 128 | 129 | if not lock_provider.isUsingDBLocking(): 130 | logger.warning('Skipping test, because DB Locking is not enabled or lock provisioning is not supported') 131 | return 132 | 133 | step(2, 'Create workdir') 134 | d = make_workdir() 135 | 136 | from owncloud import OCSResponseError 137 | try: 138 | lock_provider.unlock() 139 | except OCSResponseError: 140 | fatal_check(False, 'Testing App seems to not be enabled') 141 | 142 | step(3, 'Create test folder') 143 | 144 | mkdir(os.path.join(d, DIR_NAME)) 145 | mkdir(os.path.join(d, SUBDIR_NAME)) 146 | createfile(os.path.join(d, DIR_NAME, 'file.dat'), '0', count=1000, bs=1) 147 | createfile(os.path.join(d, SUBDIR_NAME, 'sub_file.dat'), '0', count=1000, bs=1) 148 | 149 | run_ocsync(d) 150 | 151 | step(4, 'Lock items') 152 | 153 | for lock in use_locks: 154 | fatal_check( 155 | lock_provider.is_locked(lock['lock'], config.oc_account_name, lock['path']) == False, 156 | 'Resource is already locked' 157 | ) 158 | 159 | lock_provider.lock(lock['lock'], config.oc_account_name, lock['path']) 160 | 161 | fatal_check( 162 | lock_provider.is_locked(lock['lock'], config.oc_account_name, lock['path']), 163 | 'Resource should be locked' 164 | ) 165 | 166 | step(5, 'Try to upload a file in locked item') 167 | 168 | createfile(os.path.join(d, SUBDIR_NAME, 'file2.dat'), '0', count=1000, bs=1) 169 | 170 | try: 171 | save_run_ocsync(d, seconds=10, max_sync_retries=1) 172 | except TimeoutError as err: 173 | if compare_client_version('2.1.0', '>='): 174 | # Max retries should terminate in time 175 | error_check(False, err.message) 176 | else: 177 | # Client does not terminate before 2.1: https://github.com/owncloud/client/issues/4037 178 | logger.warning(err.message) 179 | 180 | if can_upload: 181 | expect_webdav_exist(os.path.join(SUBDIR_NAME, 'file2.dat')) 182 | else: 183 | expect_webdav_does_not_exist(os.path.join(SUBDIR_NAME, 'file2.dat')) 184 | 185 | step(6, 'Unlock item and sync again') 186 | 187 | for lock in use_locks: 188 | fatal_check( 189 | lock_provider.is_locked(lock['lock'], config.oc_account_name, lock['path']), 190 | 'Resource is already locked' 191 | ) 192 | 193 | lock_provider.unlock(lock['lock'], config.oc_account_name, lock['path']) 194 | 195 | fatal_check( 196 | lock_provider.is_locked(lock['lock'], config.oc_account_name, lock['path']) == False, 197 | 'Resource should be locked' 198 | ) 199 | 200 | step(7, 'Upload a file in unlocked item') 201 | 202 | run_ocsync(d) 203 | 204 | expect_webdav_exist(os.path.join(SUBDIR_NAME, 'file2.dat')) 205 | 206 | step(8, 'Final - Unlock everything') 207 | 208 | lock_provider.unlock() 209 | lock_provider.disable_testing_app() 210 | 211 | 212 | class TimeoutError(Exception): 213 | pass 214 | 215 | 216 | def handler(signum, frame): 217 | config.oc_sync_cmd = original_cmd 218 | raise TimeoutError('Sync client did not terminate in time') 219 | 220 | 221 | def save_run_ocsync(local_folder, seconds=10, max_sync_retries=1, remote_folder="", n=None, user_num=None): 222 | """ 223 | A save variation of run_ocsync, that terminates after n seconds or x retries depending on the client version 224 | 225 | :param local_folder: The local folder to sync 226 | :param seconds: Number of seconds until the request should be terminated 227 | :param max_sync_retries: Number of retries for each sync 228 | :param remote_folder: The remote target folder to sync to 229 | :param n: Number of syncs 230 | :param user_num: User number 231 | """ 232 | 233 | if compare_client_version('2.1.0', '>='): 234 | pattern = re.compile(r' \-\-max\-sync\-retries \d+') 235 | config.oc_sync_cmd = pattern.sub('', config.oc_sync_cmd) 236 | config.oc_sync_cmd += ' --max-sync-retries %i' % max_sync_retries 237 | 238 | signal.signal(signal.SIGALRM, handler) 239 | signal.alarm(seconds) 240 | 241 | # This run_ocsync() may hang indefinitely 242 | run_ocsync(local_folder, remote_folder, n, user_num) 243 | 244 | signal.alarm(0) 245 | config.oc_sync_cmd = original_cmd 246 | -------------------------------------------------------------------------------- /lib/oc-tests/test_uploadFiles.py: -------------------------------------------------------------------------------- 1 | 2 | __doc__ = """ 3 | 4 | Test uploading a large number of files to a directory and then syncing 5 | 6 | +-----------+----------------------+------------------+----------------------------+ 7 | | Step | Sharer | Sharee One | Sharee Two | 8 | | Number | | | | 9 | +===========+======================+==================+============================| 10 | | 2 | create work dir | create work dir | create work dir | 11 | +-----------+----------------------+------------------+----------------------------+ 12 | | 3 | Create test dir | | | 13 | +-----------+----------------------+------------------+----------------------------+ 14 | | 4 | Shares test dir with | | | 15 | | | Sharee One and Two | | | 16 | +-----------+----------------------+------------------+----------------------------+ 17 | | 5 | | Syncs shared dir | syncs Shared dir | 18 | +-----------+----------------------+------------------+----------------------------+ 19 | | 6 | | creates new | | 20 | | | | files and syncs | | 21 | +-----------+----------------------+------------------+----------------------------+ 22 | | 7 | syncs and validates | | syncs and validates | 23 | | | new files exist | | new files exist | 24 | +-----------+----------------------+------------------+----------------------------+ 25 | | 8 | final step | final step | final step | 26 | +-----------+----------------------+------------------+----------------------------+ 27 | 28 | Data Providers: 29 | 30 | test_sharePermissions: Permissions to be applied to the share 31 | test_numFilesToCreate: Number of files to create 32 | test_filesizeKB: Size of file to create in KB 33 | 34 | 35 | """ 36 | 37 | from smashbox.utilities import * 38 | import glob 39 | 40 | OCS_PERMISSION_READ = 1 41 | OCS_PERMISSION_UPDATE = 2 42 | OCS_PERMISSION_CREATE = 4 43 | OCS_PERMISSION_DELETE = 8 44 | OCS_PERMISSION_SHARE = 16 45 | OCS_PERMISSION_ALL = 31 46 | 47 | filesizeKB = int(config.get('test_filesizeKB',10)) 48 | sharePermissions = config.get('test_sharePermissions', OCS_PERMISSION_ALL) 49 | numFilesToCreate = config.get('test_numFilesToCreate', 1) 50 | 51 | # True => use new webdav endpoint (dav/files) 52 | # False => use old webdav endpoint (webdav) 53 | use_new_dav_endpoint = bool(config.get('use_new_dav_endpoint',True)) 54 | 55 | testsets = [ 56 | { 57 | 'test_sharePermissions':OCS_PERMISSION_ALL, 58 | 'test_numFilesToCreate':5, 59 | 'test_filesizeKB':20000, 60 | 'use_new_dav_endpoint':True 61 | }, 62 | { 63 | 'test_sharePermissions':OCS_PERMISSION_ALL, 64 | 'test_numFilesToCreate':5, 65 | 'test_filesizeKB':20000, 66 | 'use_new_dav_endpoint':False 67 | }, 68 | { 69 | 'test_sharePermissions':OCS_PERMISSION_READ | OCS_PERMISSION_CREATE | OCS_PERMISSION_UPDATE, 70 | 'test_numFilesToCreate':5, 71 | 'test_filesizeKB':20000, 72 | 'use_new_dav_endpoint':True 73 | }, 74 | { 75 | 'test_sharePermissions':OCS_PERMISSION_READ | OCS_PERMISSION_CREATE | OCS_PERMISSION_UPDATE, 76 | 'test_numFilesToCreate':5, 77 | 'test_filesizeKB':20000, 78 | 'use_new_dav_endpoint':False 79 | }, 80 | ] 81 | 82 | def finish_if_not_capable(): 83 | # Finish the test if some of the prerequisites for this test are not satisfied 84 | if compare_oc_version('10.0', '<') and use_new_dav_endpoint == True: 85 | #Dont test for <= 9.1 with new endpoint, since it is not supported 86 | logger.warn("Skipping test since webdav endpoint is not capable for this server version") 87 | return True 88 | return False 89 | 90 | @add_worker 91 | def setup(step): 92 | if finish_if_not_capable(): 93 | return 94 | 95 | step (1, 'create test users') 96 | reset_owncloud_account(num_test_users=config.oc_number_test_users) 97 | check_users(config.oc_number_test_users) 98 | 99 | reset_rundir() 100 | 101 | @add_worker 102 | def sharer(step): 103 | if finish_if_not_capable(): 104 | return 105 | 106 | step (2,'Create workdir') 107 | d = make_workdir() 108 | 109 | step (3,'Create initial test directory') 110 | 111 | procName = reflection.getProcessName() 112 | dirName = "%s/%s"%(procName, 'localShareDir') 113 | localDir = make_workdir(dirName) 114 | 115 | list_files(d) 116 | run_ocsync(d,user_num=1, use_new_dav_endpoint=use_new_dav_endpoint) 117 | list_files(d) 118 | 119 | step (4,'Sharer shares directory') 120 | 121 | user1 = "%s%i"%(config.oc_account_name, 1) 122 | user2 = "%s%i"%(config.oc_account_name, 2) 123 | user3 = "%s%i"%(config.oc_account_name, 3) 124 | 125 | shared = reflection.getSharedObject() 126 | 127 | kwargs = {'perms': sharePermissions} 128 | shared['SHARE_LOCAL_DIR_U2'] = share_file_with_user ('localShareDir', user1, user2, **kwargs) 129 | shared['SHARE_LOCAL_DIR_U3'] = share_file_with_user ('localShareDir', user1, user3, **kwargs) 130 | 131 | step (7, 'Sharer validates newly added files') 132 | 133 | run_ocsync(d,user_num=1, use_new_dav_endpoint=use_new_dav_endpoint) 134 | 135 | list_files(d+'/localShareDir') 136 | checkFilesExist(d) 137 | 138 | step (8, 'Sharer final step') 139 | 140 | @add_worker 141 | def shareeOne(step): 142 | if finish_if_not_capable(): 143 | return 144 | 145 | step (2, 'Sharee One creates workdir') 146 | d = make_workdir() 147 | 148 | step (5,'Sharee One syncs and validates directory exist') 149 | 150 | run_ocsync(d,user_num=2, use_new_dav_endpoint=use_new_dav_endpoint) 151 | list_files(d) 152 | 153 | sharedDir = os.path.join(d,'localShareDir') 154 | logger.info ('Checking that %s is present in local directory for Sharee One', sharedDir) 155 | error_check(os.path.exists(sharedDir), "Directory %s should exist" %sharedDir) 156 | 157 | step (6, 'Sharee One creates files') 158 | 159 | logger.info ('ShareeOne is creating %i files', numFilesToCreate) 160 | if numFilesToCreate == 1: 161 | createfile(os.path.join(d,'localShareDir/TEST_FILE_NEW_USER_SHARE.dat'),'0',count=1000,bs=filesizeKB) 162 | else: 163 | for i in range(1, numFilesToCreate): 164 | filename = "%s%i%s" % ('localShareDir/TEST_FILE_NEW_USER_SHARE_',i,'.dat') 165 | createfile(os.path.join(d,filename),'0',count=1000,bs=filesizeKB) 166 | 167 | run_ocsync(d,user_num=2, use_new_dav_endpoint=use_new_dav_endpoint) 168 | 169 | list_files(d+'/localShareDir') 170 | checkFilesExist(d) 171 | 172 | step (8, 'Sharee One final step') 173 | 174 | @add_worker 175 | def shareeTwo(step): 176 | if finish_if_not_capable(): 177 | return 178 | 179 | step (2, 'Sharee Two creates workdir') 180 | d = make_workdir() 181 | 182 | procName = reflection.getProcessName() 183 | dirName = "%s/%s"%(procName, 'localShareDir') 184 | localDir = make_workdir(dirName) 185 | 186 | step (5, 'Sharee two syncs and validates directory exists') 187 | 188 | run_ocsync(d,user_num=3, use_new_dav_endpoint=use_new_dav_endpoint) 189 | list_files(d) 190 | 191 | sharedDir = os.path.join(d,'localShareDir') 192 | logger.info ('Checking that %s is present in local directory for Sharee One', sharedDir) 193 | error_check(os.path.exists(sharedDir), "Directory %s should exist" %sharedDir) 194 | 195 | step (7, 'Sharee two validates new files exist') 196 | 197 | run_ocsync(d,user_num=3, use_new_dav_endpoint=use_new_dav_endpoint) 198 | 199 | list_files(d+'/localShareDir') 200 | checkFilesExist(d) 201 | 202 | step (8, 'Sharee Two final step') 203 | 204 | def checkFilesExist (tmpDir): 205 | 206 | logger.info ('Checking if files exist in local directory ') 207 | 208 | if numFilesToCreate == 1: 209 | sharedFile = os.path.join(tmpDir,'localShareDir/TEST_FILE_NEW_USER_SHARE.dat') 210 | logger.info ('Checking that %s is present in local directory ', sharedFile) 211 | error_check(os.path.exists(sharedFile), "File %s should exist" %sharedFile) 212 | else: 213 | for i in range(1,numFilesToCreate): 214 | filename = "%s%i%s" % ('localShareDir/TEST_FILE_NEW_USER_SHARE_',i,'.dat') 215 | logger.info ('Checking that %s is present in local directory ', filename) 216 | sharedFile = os.path.join(tmpDir, filename) 217 | error_check(os.path.exists(sharedFile), "File %s should exist" %sharedFile) 218 | 219 | --------------------------------------------------------------------------------