├── .gitignore ├── .hubicfuse.sample ├── Dockerfile ├── LICENSE ├── Makefile.in ├── README.md ├── cloudfsapi.c ├── cloudfsapi.h ├── cloudfuse.c ├── commonfs.c ├── commonfs.h ├── config.h.in ├── configure ├── configure.ac ├── docker-entrypoint.sh ├── docker_mount.sh ├── hubic_token ├── hubic_token_fr.txt └── install-sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by configure 2 | Makefile 3 | config.h 4 | config.log 5 | config.status 6 | 7 | # Binary outputs 8 | cloudfuse 9 | hubicfuse 10 | 11 | project-vc/ -------------------------------------------------------------------------------- /.hubicfuse.sample: -------------------------------------------------------------------------------- 1 | client_id = api_hubic_xxxxx 2 | client_secret = xxxxxx 3 | refresh_token = xxxx 4 | cache_timeout = 600 5 | temp_dir = /tmp/hubicfuse 6 | 7 | get_extended_metadata = true 8 | curl_verbose = false 9 | curl_progress_state = true 10 | cache_statfs_timeout = 15 11 | debug_level = 0 12 | enable_chmod = false 13 | enable_chown = false 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y gcc make curl libfuse-dev pkg-config \ 5 | libcurl4-openssl-dev libxml2-dev libssl-dev libjson-c-dev libmagic-dev && \ 6 | rm -rf /var/lib/apt/lists/* 7 | 8 | COPY . /hubicfuse 9 | WORKDIR /hubicfuse 10 | 11 | RUN ./configure && make 12 | 13 | 14 | ENTRYPOINT ["/hubicfuse/docker-entrypoint.sh"] 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2009 Michael Barton 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in 11 | all copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 | THE SOFTWARE. 20 | 21 | -------------------------------------------------------------------------------- /Makefile.in: -------------------------------------------------------------------------------- 1 | .SUFFIXES: 2 | .SUFFIXES: .c .o 3 | 4 | CC = @CC@ 5 | CPPFLAGS = @CPPFLAGS@ 6 | CFLAGS = @CFLAGS@ @XML_CFLAGS@ @CURL_CFLAGS@ @FUSE_CFLAGS@ @OPENSSL_CFLAGS@ @JSON_CFLAGS@ 7 | LDFLAGS = @LDFLAGS@ 8 | LIBS = @LIBS@ @XML_LIBS@ @CURL_LIBS@ @FUSE_LIBS@ @OPENSSL_LIBS@ @JSON_LIBS@ -lmagic 9 | INSTALL = @INSTALL@ 10 | MKDIR_P = @MKDIR_P@ 11 | prefix = @prefix@ 12 | exec_prefix = @exec_prefix@ 13 | bindir = $(DESTDIR)$(exec_prefix)/bin 14 | 15 | SOURCES=cloudfsapi.c cloudfuse.c commonfs.c 16 | HEADERS=cloudfsapi.h commonfs.h 17 | 18 | all: hubicfuse 19 | 20 | install: all $(bindir) 21 | $(INSTALL) hubicfuse $(bindir)/hubicfuse 22 | 23 | uninstall: 24 | /bin/rm -f $(bindir)/hubicfuse 25 | 26 | $(bindir): 27 | $(MKDIR_P) $(bindir) 28 | 29 | hubicfuse: $(SOURCES) $(HEADERS) 30 | $(CC) $(CPPFLAGS) $(CFLAGS) -o hubicfuse $(SOURCES) $(LIBS) $(LDFLAGS) 31 | 32 | clean: 33 | /bin/rm -fr hubicfuse *.orig config.h *~ aclocal.m4 autom4te.cache 34 | 35 | distclean: clean 36 | /bin/rm -f Makefile config.h config.status config.cache config.log \ 37 | marklib.dvi 38 | 39 | mostlyclean: clean 40 | 41 | maintainer-clean: clean 42 | 43 | debug: CFLAGS += -g 44 | debug: hubicfuse 45 | 46 | config.h.in: stamp-h.in 47 | stamp-h.in: configure.in 48 | autoheader 49 | echo timestamp > stamp-h.in 50 | 51 | config.h: stamp-h 52 | stamp-h: config.h.in config.status 53 | ./config.status 54 | Makefile: Makefile.in config.status 55 | ./config.status 56 | config.status: configure 57 | ./config.status --recheck 58 | 59 | reformat: 60 | astyle --style=allman \ 61 | --convert-tabs \ 62 | --indent=spaces=2 \ 63 | --lineend=linux \ 64 | --max-code-length=79 \ 65 | --pad-oper \ 66 | --pad-header \ 67 | --align-pointer=type \ 68 | --align-reference=name \ 69 | --break-closing-brackets \ 70 | --min-conditional-indent=0 \ 71 | --remove-brackets \ 72 | --remove-comment-prefix \ 73 | *.c *.h 74 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HubicFuse 2 | 3 | A FUSE application which provides access to hubiC's cloud files via a mount-point. 4 | 5 | This version contains support for DLO, symlinks and support to see other tenant's containers. 6 | Those features are coming from https://github.com/LabAdvComp/cloudfuse 7 | 8 | ### BUILDING 9 | 10 | You'll need libcurl, fuse, libssl, and libxml2 (and probably their development packages) installed to build it. 11 | 12 | For CentOS and other RedHat-based systems: 13 | 14 | yum install gcc make fuse-devel curl-devel libxml2-devel \ 15 | openssl-devel json-c-devel file-devel 16 | 17 | PKG_CONFIG_PATH=/lib64/pkgconfig ./configure 18 | 19 | For Debian GNU/Linux and Ubuntu: 20 | 21 | sudo apt install gcc make curl libfuse-dev pkg-config \ 22 | libcurl4-openssl-dev libxml2-dev libssl-dev libjson-c-dev libmagic-dev 23 | 24 | ./configure 25 | 26 | Then just compile and install: 27 | 28 | make 29 | sudo make install 30 | 31 | ### USAGE 32 | 33 | Your hubiC Cloud configuration can be placed in a file named `$HOME/.hubicfuse`. 34 | All the following variables are required: 35 | 36 | client_id=[hubiC client id for the registered application] 37 | client_secret=[hubiC client secret for the registered application] 38 | refresh_token=[A refresh token you got from the script] 39 | 40 | Optional variables: 41 | 42 | get_extended_metadata=[true/false, force download of additional file metadata like atime and mtime on first directory list] 43 | curl_verbose=[true/false, enable verbose output for curl HTTP requests] 44 | curl_progress_state=[true/false, enable verbose progress output for curl HTTP requests. Used for debugging.] 45 | cache_statfs_timeout=[value in seconds, large timeout increases the file access speed] 46 | debug_level=[0 default, 1 extremely verbose for debugging purposes] 47 | enable_chmod=[true/false, false by default, still experimental feature] 48 | enable_chown=[true/false, false by default, still experimental feature] 49 | 50 | `client_id` and `client_secret` can be retrieved from 51 | the [hubiC web interface](https://hubic.com/home/browser/developers/) 52 | 53 | The `refresh_token` can be obtained running the script provided (`hubic_token`) 54 | or with any other method you like if you follow the example at https://api.hubic.com/ 55 | 56 | Then you can call hubicfuse: 57 | 58 | sudo hubicfuse /mnt/hubic -o noauto_cache,sync_read,allow_other 59 | 60 | And finaly, it can be set in /etc/fstab: 61 | 62 | hubicfuse /mnt/hubic fuse user,noauto 0 0 63 | 64 | It also inherits a number of command-line arguments and mount options from 65 | the Fuse framework. The "-h" argument should provide a summary. 66 | 67 | It is also possible to pass a custom hubicfuse settings file so that it is 68 | possible to mount multiple hubiC accounts: 69 | 70 | sudo hubicfuse /mnt/hubic1 -o noauto_cache,sync_read,allow_other,settings_filename=/root/hubic/account1.settings 71 | sudo hubicfuse /mnt/hubic2 -o noauto_cache,sync_read,allow_other,settings_filename=/root/hubic/account2.settings 72 | 73 | And finaly, in /etc/fstab : 74 | 75 | hubicfuse /mnt/hubic1 fuse user,noauto,settings_filename=/root/hubic/account1.settings 0 0 76 | hubicfuse /mnt/hubic2 fuse user,noauto,settings_filename=/root/hubic/account2.settings 0 0 77 | 78 | ### USAGE AS NON-ROOT 79 | 80 | Add the user into the fuse group: 81 | 82 | sudo usermod -a -G fuse [username] 83 | 84 | Mount using the above command without the sudo. The `.hubicfuse` file is searched in the user's home. 85 | 86 | To unmount use: 87 | 88 | fusermount -u [chemin] 89 | 90 | ### USAGE WITH RSYNC 91 | 92 | hubiC protocol has no support for renaming. So be sure to use the 93 | `--inplace` option to avoid a second upload for every uploaded object. 94 | 95 | ### BUGS AND SHORTCOMINGS 96 | 97 | * A segment size is limited to 5Gb (this is not hubicfuse limit, but hubiC implementation). 98 | So segment_above should never exceed 5Gb. 99 | * rename() doesn't work on directories (and probably never will). 100 | * When reading and writing files, it buffers them in a local temp file. 101 | * It keeps an in-memory cache of the directory structure, so it may not be 102 | usable for large file systems. Also, files added by other applications 103 | will not show up until the cache expires. 104 | * The root directory can only contain directories, as these are mapped to 105 | containers in cloudfiles. 106 | * Directory entries are created as empty files with the content-type 107 | "application/directory". 108 | * Cloud Files limits container and object listings to 10,000 items. 109 | cloudfuse won't list more than that many files in a single directory. 110 | * File copy progress when uploading does not work, progress is shown when 111 | file is copied in local cache, then upload operation happens at 100% 112 | 113 | ### RECENT ADDITIONS AND FIXES 114 | 115 | * Support for atime, mtime, chmod, chown. 116 | * Large files (segmented) have correct size listed (was 0 before). 117 | * Multiple speed improvements, minimised the number of HTTP calls and added more caching features. 118 | * Fixed many segmentation faults. 119 | * Cached files are deleted on cache expiration when using a custom temp folder. 120 | * Files copied have attributes preserved. 121 | * Working well with rsync due to mtime support and proper copy operations. 122 | * Debugging support for http progress (track upload / download speed etc.) 123 | * Reduced traffic, skips file uploads to cloud if content does not change (using md5sum compare) 124 | * Major code refactoring, code documentation, extensive debugging, additional config options 125 | * Support for custom hubicfuse settings file in order to mount multiple accounts 126 | 127 | AWESOME CONTRIBUTORS 128 | 129 | * Pascal Obry https://github.com/TurboGit 130 | * Tim Dysinger https://github.com/dysinger 131 | * Chris Wedgwood https://github.com/cwedgwood 132 | * Nick Craig-Wood https://github.com/ncw 133 | * Dillon Amburgey https://github.com/dillona 134 | * Manfred Touron https://github.com/moul 135 | * David Brownlee https://github.com/abs0 136 | * Mike Lundy https://github.com/novas0x2a 137 | * justinb https://github.com/justinsb 138 | * Matt Greenway https://github.com/LabAdvComp 139 | * Dan Cristian https://github.com/dan-cristian 140 | * Nicolas Cailleaux https://github.com/nikokio 141 | 142 | Thanks, and I hope you find it useful. 143 | 144 | Pascal Obry 145 | 146 | -------------------------------------------------------------------------------- /cloudfsapi.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #ifdef __linux__ 10 | #include 11 | #endif 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | #include 22 | #include 23 | #include "commonfs.h" 24 | #include "cloudfsapi.h" 25 | #include "config.h" 26 | #define FUSE_USE_VERSION 30 27 | #include 28 | 29 | #define RHEL5_LIBCURL_VERSION 462597 30 | #define RHEL5_CERTIFICATE_FILE "/etc/pki/tls/certs/ca-bundle.crt" 31 | #define REQUEST_RETRIES 3 32 | #define MAX_FILES 10000 33 | // size of buffer for writing to disk look at ioblksize.h in coreutils 34 | // and try some values on your own system if you want the best performance 35 | #define DISK_BUFF_SIZE 32768 36 | 37 | long segment_size; 38 | long segment_above; 39 | 40 | char* override_storage_url; 41 | char* public_container; 42 | 43 | static char storage_url[MAX_URL_SIZE]; 44 | static char storage_token[MAX_HEADER_SIZE]; 45 | static pthread_mutex_t pool_mut; 46 | static CURL* curl_pool[1024]; 47 | static int curl_pool_count = 0; 48 | extern int debug; 49 | extern int verify_ssl; 50 | extern bool option_get_extended_metadata; 51 | extern bool option_curl_verbose; 52 | extern int option_curl_progress_state; 53 | extern int option_cache_statfs_timeout; 54 | extern bool option_extensive_debug; 55 | extern bool option_enable_chown; 56 | extern bool option_enable_chmod; 57 | static int rhel5_mode = 0; 58 | static struct statvfs statcache = 59 | { 60 | .f_bsize = 4096, 61 | .f_frsize = 4096, 62 | .f_blocks = INT_MAX, 63 | .f_bfree = INT_MAX, 64 | .f_bavail = INT_MAX, 65 | .f_files = MAX_FILES, 66 | .f_ffree = 0, 67 | .f_favail = 0, 68 | .f_namemax = INT_MAX 69 | }; 70 | //used to compute statfs cache interval 71 | static time_t last_stat_read_time = 0; 72 | extern FuseOptions options; 73 | struct MemoryStruct 74 | { 75 | char* memory; 76 | size_t size; 77 | }; 78 | 79 | #ifdef HAVE_OPENSSL 80 | #include 81 | static pthread_mutex_t* ssl_lockarray; 82 | static void lock_callback(int mode, int type, char* file, int line) 83 | { 84 | if (mode & CRYPTO_LOCK) 85 | pthread_mutex_lock(&(ssl_lockarray[type])); 86 | else 87 | pthread_mutex_unlock(&(ssl_lockarray[type])); 88 | } 89 | 90 | static unsigned long thread_id() 91 | { 92 | return (unsigned long)pthread_self(); 93 | } 94 | #endif 95 | 96 | static size_t xml_dispatch(void* ptr, size_t size, size_t nmemb, void* stream) 97 | { 98 | xmlParseChunk((xmlParserCtxtPtr)stream, (char*)ptr, size * nmemb, 0); 99 | return size * nmemb; 100 | } 101 | 102 | static CURL* get_connection(const char* path) 103 | { 104 | pthread_mutex_lock(&pool_mut); 105 | CURL* curl = curl_pool_count ? curl_pool[--curl_pool_count] : curl_easy_init(); 106 | if (!curl) 107 | { 108 | debugf(DBG_LEVEL_NORM, KRED"curl alloc failed"); 109 | pthread_mutex_unlock(&pool_mut); 110 | abort(); 111 | } 112 | pthread_mutex_unlock(&pool_mut); 113 | return curl; 114 | } 115 | 116 | static void return_connection(CURL* curl) 117 | { 118 | pthread_mutex_lock(&pool_mut); 119 | curl_pool[curl_pool_count++] = curl; 120 | pthread_mutex_unlock(&pool_mut); 121 | } 122 | 123 | static void add_header(curl_slist** headers, const char* name, 124 | const char* value) 125 | { 126 | char x_header[MAX_HEADER_SIZE]; 127 | char safe_value[256]; 128 | const char* value_ptr; 129 | 130 | debugf(DBG_LEVEL_EXT, "add_header(%s:%s)", name, value); 131 | if (strlen(value) > 256) 132 | { 133 | debugf(DBG_LEVEL_NORM, KRED"add_header: warning, value size > 256 (%s:%s) ", 134 | name, value); 135 | //hubic will throw an HTTP 400 error on X-Copy-To operation if X-Object-Meta-FilePath header value is larger than 256 chars 136 | //fix for issue #95 https://github.com/TurboGit/hubicfuse/issues/95 137 | if (!strcasecmp(name, "X-Object-Meta-FilePath")) 138 | { 139 | debugf(DBG_LEVEL_NORM, 140 | KRED"add_header: trimming header (%s) value to max allowed", name); 141 | //trim header size to max allowed 142 | strncpy(safe_value, value, 256 - 1); 143 | safe_value[255] = '\0'; 144 | value_ptr = safe_value; 145 | } 146 | else 147 | value_ptr = value; 148 | } 149 | else 150 | value_ptr = value; 151 | 152 | snprintf(x_header, sizeof(x_header), "%s: %s", name, value_ptr); 153 | *headers = curl_slist_append(*headers, x_header); 154 | } 155 | 156 | static size_t header_dispatch(void* ptr, size_t size, size_t nmemb, 157 | void* dir_entry) 158 | { 159 | char* header = (char*)alloca(size * nmemb + 1); 160 | char* head = (char*)alloca(size * nmemb + 1); 161 | char* value = (char*)alloca(size * nmemb + 1); 162 | memcpy(header, (char*)ptr, size * nmemb); 163 | header[size * nmemb] = '\0'; 164 | if (sscanf(header, "%[^:]: %[^\r\n]", head, value) == 2) 165 | { 166 | if (!strncasecmp(head, "x-auth-token", size * nmemb)) 167 | strncpy(storage_token, value, sizeof(storage_token)); 168 | if (!strncasecmp(head, "x-storage-url", size * nmemb)) 169 | strncpy(storage_url, value, sizeof(storage_url)); 170 | if (!strncasecmp(head, "x-account-meta-quota", size * nmemb)) 171 | statcache.f_blocks = (unsigned long) (strtoull(value, NULL, 172 | 10) / statcache.f_frsize); 173 | if (!strncasecmp(head, "x-account-bytes-used", size * nmemb)) 174 | statcache.f_bfree = statcache.f_bavail = statcache.f_blocks - (unsigned long) ( 175 | strtoull(value, NULL, 10) / statcache.f_frsize); 176 | if (!strncasecmp(head, "x-account-object-count", size * nmemb)) 177 | { 178 | unsigned long object_count = strtoul(value, NULL, 10); 179 | statcache.f_ffree = MAX_FILES - object_count; 180 | statcache.f_favail = MAX_FILES - object_count; 181 | } 182 | } 183 | return size * nmemb; 184 | } 185 | 186 | static void header_set_time_from_str(char* time_str, 187 | struct timespec* time_entry) 188 | { 189 | char sec_value[TIME_CHARS] = { 0 }; 190 | char nsec_value[TIME_CHARS] = { 0 }; 191 | time_t sec; 192 | long nsec; 193 | sscanf(time_str, "%[^.].%[^\n]", sec_value, nsec_value); 194 | sec = strtoll(sec_value, NULL, 10);//to allow for larger numbers 195 | nsec = atol(nsec_value); 196 | debugf(DBG_LEVEL_EXTALL, "Received time=%s.%s / %li.%li, existing=%li.%li", 197 | sec_value, nsec_value, sec, nsec, time_entry->tv_sec, time_entry->tv_nsec); 198 | if (sec != time_entry->tv_sec || nsec != time_entry->tv_nsec) 199 | { 200 | debugf(DBG_LEVEL_EXTALL, 201 | "Time changed, setting new time=%li.%li, existing was=%li.%li", 202 | sec, nsec, time_entry->tv_sec, time_entry->tv_nsec); 203 | time_entry->tv_sec = sec; 204 | time_entry->tv_nsec = nsec; 205 | 206 | char time_str_local[TIME_CHARS] = ""; 207 | get_time_as_string((time_t)sec, nsec, time_str_local, sizeof(time_str_local)); 208 | debugf(DBG_LEVEL_EXTALL, "header_set_time_from_str received time=[%s]", 209 | time_str_local); 210 | 211 | get_timespec_as_str(time_entry, time_str_local, sizeof(time_str_local)); 212 | debugf(DBG_LEVEL_EXTALL, "header_set_time_from_str set time=[%s]", 213 | time_str_local); 214 | } 215 | } 216 | 217 | static size_t header_get_meta_dispatch(void* ptr, size_t size, size_t nmemb, 218 | void* userdata) 219 | { 220 | char* header = (char*)alloca(size * nmemb + 1); 221 | char* head = (char*)alloca(size * nmemb + 1); 222 | char* value = (char*)alloca(size * nmemb + 1); 223 | memcpy(header, (char*)ptr, size * nmemb); 224 | header[size * nmemb] = '\0'; 225 | static char storage[MAX_HEADER_SIZE]; 226 | if (sscanf(header, "%[^:]: %[^\r\n]", head, value) == 2) 227 | { 228 | strncpy(storage, head, sizeof(storage)); 229 | dir_entry* de = (dir_entry*)userdata; 230 | if (de != NULL) 231 | { 232 | if (!strncasecmp(head, HEADER_TEXT_ATIME, size * nmemb)) 233 | header_set_time_from_str(value, &de->atime); 234 | if (!strncasecmp(head, HEADER_TEXT_CTIME, size * nmemb)) 235 | header_set_time_from_str(value, &de->ctime); 236 | if (!strncasecmp(head, HEADER_TEXT_MTIME, size * nmemb)) 237 | header_set_time_from_str(value, &de->mtime); 238 | if (!strncasecmp(head, HEADER_TEXT_CHMOD, size * nmemb)) 239 | de->chmod = atoi(value); 240 | if (!strncasecmp(head, HEADER_TEXT_GID, size * nmemb)) 241 | de->gid = atoi(value); 242 | if (!strncasecmp(head, HEADER_TEXT_UID, size * nmemb)) 243 | de->uid = atoi(value); 244 | } 245 | else 246 | debugf(DBG_LEVEL_EXT, 247 | "Unexpected NULL dir_entry on header(%s), file should be in cache already", 248 | storage); 249 | } 250 | else 251 | { 252 | //debugf(DBG_LEVEL_NORM, "Received unexpected header line"); 253 | } 254 | return size * nmemb; 255 | } 256 | 257 | static size_t rw_callback(size_t (*rw)(void*, size_t, size_t, FILE*), 258 | void* ptr, 259 | size_t size, size_t nmemb, void* userp) 260 | { 261 | struct segment_info* info = (struct segment_info*)userp; 262 | size_t mem = size * nmemb; 263 | if (mem < 1 || info->size < 1) 264 | return 0; 265 | 266 | size_t amt_read = rw(ptr, 1, info->size < mem ? info->size : mem, info->fp); 267 | info->size -= amt_read; 268 | return amt_read; 269 | } 270 | 271 | size_t fwrite2(void* ptr, size_t size, size_t nmemb, FILE* filep) 272 | { 273 | return fwrite((const void*)ptr, size, nmemb, filep); 274 | } 275 | 276 | static size_t read_callback(void* ptr, size_t size, size_t nmemb, void* userp) 277 | { 278 | return rw_callback(fread, ptr, size, nmemb, userp); 279 | } 280 | 281 | static size_t write_callback(void* ptr, size_t size, size_t nmemb, void* userp) 282 | { 283 | return rw_callback(fwrite2, ptr, size, nmemb, userp); 284 | } 285 | 286 | //http://curl.haxx.se/libcurl/c/CURLOPT_XFERINFOFUNCTION.html 287 | int progress_callback_xfer(void* clientp, curl_off_t dltotal, curl_off_t dlnow, 288 | curl_off_t ultotal, curl_off_t ulnow) 289 | { 290 | struct curl_progress* myp = (struct curl_progress*)clientp; 291 | CURL* curl = myp->curl; 292 | double curtime = 0; 293 | double dspeed = 0, uspeed = 0; 294 | 295 | curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &curtime); 296 | curl_easy_getinfo(curl, CURLINFO_SPEED_DOWNLOAD, &dspeed); 297 | curl_easy_getinfo(curl, CURLINFO_SPEED_UPLOAD, &uspeed); 298 | 299 | /* under certain circumstances it may be desirable for certain functionality 300 | to only run every N seconds, in order to do this the transaction time can 301 | be used */ 302 | //http://curl.haxx.se/cvssource/src/tool_cb_prg.c 303 | if ((curtime - myp->lastruntime) >= MINIMAL_PROGRESS_FUNCTIONALITY_INTERVAL) 304 | { 305 | myp->lastruntime = curtime; 306 | curl_off_t total; 307 | curl_off_t point; 308 | double frac, percent; 309 | total = dltotal + ultotal; 310 | point = dlnow + ulnow; 311 | frac = (double)point / (double)total; 312 | percent = frac * 100.0f; 313 | debugf(DBG_LEVEL_EXT, "TOTAL TIME: %.0f sec Down=%.0f Kbps UP=%.0f Kbps", 314 | curtime, dspeed / 1024, uspeed / 1024); 315 | debugf(DBG_LEVEL_EXT, "UP: %lld of %lld DOWN: %lld/%lld Completion %.1f %%", 316 | ulnow, ultotal, dlnow, dltotal, percent); 317 | } 318 | return 0; 319 | } 320 | 321 | //http://curl.haxx.se/libcurl/c/CURLOPT_PROGRESSFUNCTION.html 322 | int progress_callback(void* clientp, double dltotal, double dlnow, 323 | double ultotal, double ulnow) 324 | { 325 | return progress_callback_xfer(clientp, (curl_off_t)dltotal, (curl_off_t)dlnow, 326 | (curl_off_t)ultotal, (curl_off_t)ulnow); 327 | } 328 | 329 | 330 | //get the response from HTTP requests, mostly for debug purposes 331 | // http://stackoverflow.com/questions/2329571/c-libcurl-get-output-into-a-string 332 | // http://curl.haxx.se/libcurl/c/getinmemory.html 333 | size_t writefunc_callback(void* contents, size_t size, size_t nmemb, 334 | void* userp) 335 | { 336 | size_t realsize = size * nmemb; 337 | struct MemoryStruct* mem = (struct MemoryStruct*)userp; 338 | 339 | mem->memory = realloc(mem->memory, mem->size + realsize + 1); 340 | if (mem->memory == NULL) 341 | { 342 | /* out of memory! */ 343 | debugf(DBG_LEVEL_NORM, KRED"writefunc_callback: realloc() failed"); 344 | return 0; 345 | } 346 | memcpy(&(mem->memory[mem->size]), contents, realsize); 347 | mem->size += realsize; 348 | mem->memory[mem->size] = 0; 349 | return realsize; 350 | } 351 | 352 | // de_cached_entry must be NULL when the file is already in global cache 353 | // otherwise point to a new dir_entry that will be added to the cache (usually happens on first dir load) 354 | static int send_request_size(const char* method, const char* path, void* fp, 355 | xmlParserCtxtPtr xmlctx, curl_slist* extra_headers, 356 | off_t file_size, int is_segment, 357 | dir_entry* de_cached_entry, const char* unencoded_path) 358 | { 359 | debugf(DBG_LEVEL_EXT, "send_request_size(%s) (%s)", method, path); 360 | char url[MAX_URL_SIZE]; 361 | char orig_path[MAX_URL_SIZE]; 362 | char header_data[MAX_HEADER_SIZE]; 363 | 364 | char* slash; 365 | long response = -1; 366 | int tries = 0; 367 | 368 | //needed to keep the response data, for debug purposes 369 | struct MemoryStruct chunk; 370 | 371 | if (!storage_url[0]) 372 | { 373 | debugf(DBG_LEVEL_NORM, KRED"send_request with no storage_url?"); 374 | abort(); 375 | } 376 | //char *encoded_path = curl_escape(path, 0); 377 | 378 | while ((slash = strstr(path, "%2F")) || (slash = strstr(path, "%2f"))) 379 | { 380 | *slash = '/'; 381 | memmove(slash + 1, slash + 3, strlen(slash + 3) + 1); 382 | } 383 | while (*path == '/') 384 | path++; 385 | snprintf(url, sizeof(url), "%s/%s", storage_url, path); 386 | snprintf(orig_path, sizeof(orig_path), "/%s", path); 387 | 388 | // retry on failures 389 | for (tries = 0; tries < REQUEST_RETRIES; tries++) 390 | { 391 | chunk.memory = malloc(1); /* will be grown as needed by the realloc above */ 392 | chunk.size = 0; /* no data at this point */ 393 | CURL* curl = get_connection(path); 394 | if (rhel5_mode) 395 | curl_easy_setopt(curl, CURLOPT_CAINFO, RHEL5_CERTIFICATE_FILE); 396 | curl_slist* headers = NULL; 397 | curl_easy_setopt(curl, CURLOPT_URL, url); 398 | curl_easy_setopt(curl, CURLOPT_HEADER, 0); 399 | curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); 400 | //reversed logic, 0=to enable curl progress 401 | curl_easy_setopt(curl, CURLOPT_NOPROGRESS, option_curl_progress_state ? 0 : 1); 402 | curl_easy_setopt(curl, CURLOPT_USERAGENT, USER_AGENT); 403 | curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, verify_ssl ? 1 : 0); 404 | curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, verify_ssl); 405 | curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 10); 406 | curl_easy_setopt(curl, CURLOPT_VERBOSE, option_curl_verbose ? 1 : 0); 407 | add_header(&headers, "X-Auth-Token", storage_token); 408 | dir_entry* de; 409 | if (de_cached_entry == NULL) 410 | de = check_path_info(unencoded_path); 411 | else 412 | { 413 | // updating metadata on a file about to be added to cache (for x-copy, dest meta = src meta) 414 | de = de_cached_entry; 415 | debugf(DBG_LEVEL_EXTALL, "send_request_size: using param dir_entry(%s)", 416 | orig_path); 417 | } 418 | if (!de) 419 | debugf(DBG_LEVEL_EXTALL, 420 | "send_request_size: "KYEL"file not in cache (%s)(%s)(%s)", orig_path, path, 421 | unencoded_path); 422 | else 423 | { 424 | // add headers to save utimens attribs only on upload 425 | if (!strcasecmp(method, "PUT") || !strcasecmp(method, "MKDIR")) 426 | { 427 | debugf(DBG_LEVEL_EXTALL, "send_request_size: Saving utimens for file %s", 428 | orig_path); 429 | debugf(DBG_LEVEL_EXTALL, 430 | "send_request_size: Cached utime for path=%s ctime=%li.%li mtime=%li.%li atime=%li.%li", 431 | orig_path, 432 | de->ctime.tv_sec, de->ctime.tv_nsec, de->mtime.tv_sec, de->mtime.tv_nsec, 433 | de->atime.tv_sec, de->atime.tv_nsec); 434 | 435 | char atime_str_nice[TIME_CHARS] = "", mtime_str_nice[TIME_CHARS] = "", 436 | ctime_str_nice[TIME_CHARS] = ""; 437 | get_timespec_as_str(&(de->atime), atime_str_nice, sizeof(atime_str_nice)); 438 | debugf(DBG_LEVEL_EXTALL, KCYN"send_request_size: atime=[%s]", atime_str_nice); 439 | get_timespec_as_str(&(de->mtime), mtime_str_nice, sizeof(mtime_str_nice)); 440 | debugf(DBG_LEVEL_EXTALL, KCYN"send_request_size: mtime=[%s]", mtime_str_nice); 441 | get_timespec_as_str(&(de->ctime), ctime_str_nice, sizeof(ctime_str_nice)); 442 | debugf(DBG_LEVEL_EXTALL, KCYN"send_request_size: ctime=[%s]", ctime_str_nice); 443 | 444 | char mtime_str[TIME_CHARS], atime_str[TIME_CHARS], ctime_str[TIME_CHARS]; 445 | char string_float[TIME_CHARS]; 446 | snprintf(mtime_str, TIME_CHARS, "%lu.%lu", de->mtime.tv_sec, 447 | de->mtime.tv_nsec); 448 | snprintf(atime_str, TIME_CHARS, "%lu.%lu", de->atime.tv_sec, 449 | de->atime.tv_nsec); 450 | snprintf(ctime_str, TIME_CHARS, "%lu.%lu", de->ctime.tv_sec, 451 | de->ctime.tv_nsec); 452 | add_header(&headers, HEADER_TEXT_FILEPATH, orig_path); 453 | add_header(&headers, HEADER_TEXT_MTIME, mtime_str); 454 | add_header(&headers, HEADER_TEXT_ATIME, atime_str); 455 | add_header(&headers, HEADER_TEXT_CTIME, ctime_str); 456 | add_header(&headers, HEADER_TEXT_MTIME_DISPLAY, mtime_str_nice); 457 | add_header(&headers, HEADER_TEXT_ATIME_DISPLAY, atime_str_nice); 458 | add_header(&headers, HEADER_TEXT_CTIME_DISPLAY, ctime_str_nice); 459 | 460 | char gid_str[INT_CHAR_LEN], uid_str[INT_CHAR_LEN], chmod_str[INT_CHAR_LEN]; 461 | snprintf(gid_str, INT_CHAR_LEN, "%d", de->gid); 462 | snprintf(uid_str, INT_CHAR_LEN, "%d", de->uid); 463 | snprintf(chmod_str, INT_CHAR_LEN, "%d", de->chmod); 464 | add_header(&headers, HEADER_TEXT_GID, gid_str); 465 | add_header(&headers, HEADER_TEXT_UID, uid_str); 466 | add_header(&headers, HEADER_TEXT_CHMOD, chmod_str); 467 | } 468 | else 469 | debugf(DBG_LEVEL_EXTALL, "send_request_size: not setting utimes (%s)", 470 | orig_path); 471 | } 472 | if (!strcasecmp(method, "MKDIR")) 473 | { 474 | curl_easy_setopt(curl, CURLOPT_UPLOAD, 1); 475 | curl_easy_setopt(curl, CURLOPT_INFILESIZE, 0); 476 | add_header(&headers, "Content-Type", "application/directory"); 477 | } 478 | else if (!strcasecmp(method, "MKLINK") && fp) 479 | { 480 | rewind(fp); 481 | curl_easy_setopt(curl, CURLOPT_UPLOAD, 1); 482 | curl_easy_setopt(curl, CURLOPT_INFILESIZE, file_size); 483 | curl_easy_setopt(curl, CURLOPT_READDATA, fp); 484 | add_header(&headers, "Content-Type", "application/link"); 485 | } 486 | else if (!strcasecmp(method, "PUT")) 487 | { 488 | //http://blog.chmouel.com/2012/02/06/anatomy-of-a-swift-put-query-to-object-server/ 489 | debugf(DBG_LEVEL_EXT, "send_request_size: PUT (%s)", orig_path); 490 | curl_easy_setopt(curl, CURLOPT_UPLOAD, 1); 491 | if (fp) 492 | { 493 | curl_easy_setopt(curl, CURLOPT_INFILESIZE, file_size); 494 | curl_easy_setopt(curl, CURLOPT_READDATA, fp); 495 | } 496 | else 497 | curl_easy_setopt(curl, CURLOPT_INFILESIZE, 0); 498 | if (is_segment) 499 | curl_easy_setopt(curl, CURLOPT_READFUNCTION, read_callback); 500 | //enable progress reporting 501 | //http://curl.haxx.se/libcurl/c/progressfunc.html 502 | struct curl_progress prog; 503 | prog.lastruntime = 0; 504 | prog.curl = curl; 505 | curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progress_callback); 506 | /* pass the struct pointer into the progress function */ 507 | curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, &prog); 508 | //get the response for debug purposes 509 | /* send all data to this function */ 510 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc_callback); 511 | /* we pass our 'chunk' struct to the callback function */ 512 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, (void*)&chunk); 513 | } 514 | else if (!strcasecmp(method, "GET")) 515 | { 516 | if (is_segment) 517 | { 518 | debugf(DBG_LEVEL_EXT, "send_request_size: GET SEGMENT (%s)", orig_path); 519 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback); 520 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, fp); 521 | } 522 | else if (fp) 523 | { 524 | debugf(DBG_LEVEL_EXT, "send_request_size: GET FP (%s)", orig_path); 525 | rewind(fp); // make sure the file is ready for a-writin' 526 | fflush(fp); 527 | if (ftruncate(fileno(fp), 0) < 0) 528 | { 529 | debugf(DBG_LEVEL_NORM, 530 | KRED"ftruncate failed. I don't know what to do about that."); 531 | abort(); 532 | } 533 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, fp); 534 | curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, &header_get_meta_dispatch); 535 | // sample by UThreadCurl.cpp, https://bitbucket.org/pamungkas5/bcbcurl/src 536 | // and http://www.codeproject.com/Articles/838366/BCBCurl-a-LibCurl-based-download-manager 537 | curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void*)de); 538 | 539 | struct curl_progress prog; 540 | prog.lastruntime = 0; 541 | prog.curl = curl; 542 | curl_easy_setopt(curl, CURLOPT_PROGRESSFUNCTION, progress_callback); 543 | curl_easy_setopt(curl, CURLOPT_PROGRESSDATA, &prog); 544 | } 545 | else if (xmlctx) 546 | { 547 | debugf(DBG_LEVEL_EXT, "send_request_size: GET XML (%s)", orig_path); 548 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, xmlctx); 549 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, &xml_dispatch); 550 | } 551 | else 552 | { 553 | //asumming retrieval of headers only 554 | debugf(DBG_LEVEL_EXT, "send_request_size: GET HEADERS only(%s)"); 555 | curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, &header_get_meta_dispatch); 556 | curl_easy_setopt(curl, CURLOPT_HEADERDATA, (void*)de); 557 | curl_easy_setopt(curl, CURLOPT_NOBODY, 1); 558 | } 559 | } 560 | else 561 | { 562 | debugf(DBG_LEVEL_EXT, "send_request_size: catch_all (%s)"); 563 | // this posts an HEAD request (e.g. for statfs) 564 | curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, method); 565 | curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, &header_dispatch); 566 | } 567 | /* add the headers from extra_headers if any */ 568 | curl_slist* extra; 569 | for (extra = extra_headers; extra; extra = extra->next) 570 | { 571 | debugf(DBG_LEVEL_EXT, "adding header: %s", extra->data); 572 | headers = curl_slist_append(headers, extra->data); 573 | } 574 | curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); 575 | debugf(DBG_LEVEL_EXT, "status: send_request_size(%s) started HTTP REQ:%s", 576 | orig_path, url); 577 | curl_easy_perform(curl); 578 | double total_time; 579 | char* effective_url; 580 | curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); 581 | curl_easy_getinfo(curl, CURLINFO_EFFECTIVE_URL, &effective_url); 582 | curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &total_time); 583 | debugf(DBG_LEVEL_EXT, 584 | "status: send_request_size(%s) completed HTTP REQ:%s total_time=%.1f seconds", 585 | orig_path, effective_url, total_time); 586 | curl_slist_free_all(headers); 587 | curl_easy_reset(curl); 588 | return_connection(curl); 589 | 590 | if (response != 404 && (response >= 400 || response < 200)) 591 | { 592 | /* 593 | Now, our chunk.memory points to a memory block that is chunk.size 594 | bytes big and contains the remote file. 595 | */ 596 | debugf(DBG_LEVEL_NORM, 597 | KRED"send_request_size: error message, size=%lu, [HTTP %d] (%s)(%s)", 598 | (long)chunk.size, response, method, path); 599 | debugf(DBG_LEVEL_NORM, KRED"send_request_size: error message=[%s]", 600 | chunk.memory); 601 | } 602 | free(chunk.memory); 603 | 604 | if ((response >= 200 && response < 400) || (!strcasecmp(method, "DELETE") 605 | && response == 409)) 606 | { 607 | debugf(DBG_LEVEL_NORM, 608 | "exit 0: send_request_size(%s) speed=%.1f sec "KCYN"(%s) "KGRN"[HTTP OK]", 609 | orig_path, total_time, method); 610 | return response; 611 | } 612 | //handle cases when file is not found, no point in retrying, will exit 613 | if (response == 404) 614 | { 615 | debugf(DBG_LEVEL_NORM, 616 | "send_request_size: not found error for (%s)(%s), ignored "KYEL"[HTTP 404].", 617 | method, path); 618 | return response; 619 | } 620 | else 621 | { 622 | debugf(DBG_LEVEL_NORM, 623 | "send_request_size: httpcode=%d (%s)(%s), retrying "KRED"[HTTP ERR]", response, 624 | method, path); 625 | //todo: try to list response content for debug purposes 626 | sleep(8 << tries); // backoff 627 | } 628 | if (response == 401 && !cloudfs_connect()) 629 | { 630 | // re-authenticate on 401s 631 | debugf(DBG_LEVEL_NORM, KYEL"exit 1: send_request_size(%s) (%s) [HTTP REAUTH]", 632 | path, method); 633 | return response; 634 | } 635 | if (xmlctx) 636 | xmlCtxtResetPush(xmlctx, NULL, 0, NULL, NULL); 637 | } 638 | debugf(DBG_LEVEL_NORM, "exit 2: send_request_size(%s)"KCYN"(%s) response=%d", 639 | path, method, response); 640 | return response; 641 | } 642 | 643 | int send_request(char* method, const char* path, FILE* fp, 644 | xmlParserCtxtPtr xmlctx, curl_slist* extra_headers, dir_entry* de_cached_entry, 645 | const char* unencoded_path) 646 | { 647 | long flen = 0; 648 | if (fp) 649 | { 650 | // if we don't flush the size will probably be zero 651 | fflush(fp); 652 | flen = cloudfs_file_size(fileno(fp)); 653 | } 654 | return send_request_size(method, path, fp, xmlctx, extra_headers, flen, 0, 655 | de_cached_entry, unencoded_path); 656 | } 657 | 658 | //thread that downloads or uploads large file segments 659 | void* upload_segment(void* seginfo) 660 | { 661 | struct segment_info* info = (struct segment_info*)seginfo; 662 | 663 | char seg_path[MAX_URL_SIZE] = { 0 }; 664 | //set pointer to the segment start index in the complete large file (several threads will write to same large file) 665 | fseek(info->fp, info->part * info->segment_size, SEEK_SET); 666 | setvbuf(info->fp, NULL, _IOFBF, DISK_BUFF_SIZE); 667 | 668 | snprintf(seg_path, MAX_URL_SIZE, "%s%08i", info->seg_base, info->part); 669 | char* encoded = curl_escape(seg_path, 0); 670 | 671 | debugf(DBG_LEVEL_EXT, KCYN"upload_segment(%s) part=%d size=%d seg_size=%d %s", 672 | info->method, info->part, info->size, info->segment_size, seg_path); 673 | 674 | int response = send_request_size(info->method, encoded, info, NULL, NULL, 675 | info->size, 1, NULL, seg_path); 676 | 677 | if (!(response >= 200 && response < 300)) 678 | fprintf(stderr, "Segment upload %s failed with response %d", seg_path, 679 | response); 680 | 681 | curl_free(encoded); 682 | fclose(info->fp); 683 | pthread_exit(NULL); 684 | } 685 | 686 | // segment_size is the globabl config variable and size_of_segment is local 687 | //TODO: return whether the upload/download failed or not 688 | void run_segment_threads(const char* method, int segments, int full_segments, 689 | int remaining, 690 | FILE* fp, char* seg_base, int size_of_segments) 691 | { 692 | debugf(DBG_LEVEL_EXT, "run_segment_threads(%s)", method); 693 | char file_path[PATH_MAX] = { 0 }; 694 | struct segment_info* info = (struct segment_info*) 695 | malloc(segments * sizeof(struct segment_info)); 696 | 697 | pthread_t* threads = (pthread_t*)malloc(segments * sizeof(pthread_t)); 698 | #ifdef __linux__ 699 | snprintf(file_path, PATH_MAX, "/proc/self/fd/%d", fileno(fp)); 700 | debugf(DBG_LEVEL_NORM, "On run segment filepath=%s", file_path); 701 | #else 702 | //TODO: I haven't actually tested this 703 | if (fcntl(fileno(fp), F_GETPATH, file_path) == -1) 704 | fprintf(stderr, "couldn't get the path name\n"); 705 | #endif 706 | 707 | int i, ret; 708 | for (i = 0; i < segments; i++) 709 | { 710 | info[i].method = method; 711 | info[i].fp = fopen(file_path, method[0] == 'G' ? "r+" : "r"); 712 | info[i].part = i; 713 | info[i].segment_size = size_of_segments; 714 | info[i].size = i < full_segments ? size_of_segments : remaining; 715 | info[i].seg_base = seg_base; 716 | pthread_create(&threads[i], NULL, upload_segment, (void*) & (info[i])); 717 | } 718 | 719 | for (i = 0; i < segments; i++) 720 | { 721 | if ((ret = pthread_join(threads[i], NULL)) != 0) 722 | fprintf(stderr, "error waiting for thread %d, status = %d\n", i, ret); 723 | } 724 | free(info); 725 | free(threads); 726 | debugf(DBG_LEVEL_EXT, "exit: run_segment_threads(%s)", method); 727 | } 728 | 729 | void split_path(const char* path, char* seg_base, char* container, 730 | char* object) 731 | { 732 | char* string = strdup(path); 733 | snprintf(seg_base, MAX_URL_SIZE, "%s", strsep(&string, "/")); 734 | strncat(container, strsep(&string, "/"), 735 | MAX_URL_SIZE - strnlen(container, MAX_URL_SIZE)); 736 | char* _object = strsep(&string, "/"); 737 | char* remstr; 738 | 739 | while (remstr = strsep(&string, "/")) 740 | { 741 | strncat(container, "/", 742 | MAX_URL_SIZE - strnlen(container, MAX_URL_SIZE)); 743 | strncat(container, _object, 744 | MAX_URL_SIZE - strnlen(container, MAX_URL_SIZE)); 745 | _object = remstr; 746 | } 747 | //fixed: when removing root folders this will generate a segfault 748 | //issue #83, https://github.com/TurboGit/hubicfuse/issues/83 749 | if (_object == NULL) 750 | _object = object; 751 | else 752 | strncpy(object, _object, MAX_URL_SIZE); 753 | free(string); 754 | } 755 | 756 | //checks on the cloud if this file (seg_path) have an associated segment folder 757 | int internal_is_segmented(const char* seg_path, const char* object, 758 | const char* parent_path) 759 | { 760 | debugf(DBG_LEVEL_EXT, "internal_is_segmented(%s)", seg_path); 761 | //try to avoid an additional http request for small files 762 | bool potentially_segmented; 763 | dir_entry* de = check_path_info(parent_path); 764 | if (!de) 765 | { 766 | //when files in folders are first loaded the path will not be yet in cache, so need 767 | //to force segment meta download for segmented files 768 | potentially_segmented = true; 769 | } 770 | else 771 | { 772 | //potentially segmented, assumption is that 0 size files are potentially segmented 773 | //while size>0 is for sure not segmented, so no point in making an expensive HTTP GET call 774 | potentially_segmented = (de->size == 0 && !de->isdir) ? true : false; 775 | } 776 | debugf(DBG_LEVEL_EXT, "internal_is_segmented: potentially segmented=%d", 777 | potentially_segmented); 778 | dir_entry* seg_dir; 779 | if (potentially_segmented && cloudfs_list_directory(seg_path, &seg_dir)) 780 | { 781 | if (seg_dir && seg_dir->isdir) 782 | { 783 | do 784 | { 785 | if (!strncmp(seg_dir->name, object, MAX_URL_SIZE)) 786 | { 787 | debugf(DBG_LEVEL_EXT, "exit 0: internal_is_segmented(%s) "KGRN"TRUE", 788 | seg_path); 789 | return 1; 790 | } 791 | } 792 | while ((seg_dir = seg_dir->next)); 793 | } 794 | } 795 | debugf(DBG_LEVEL_EXT, "exit 1: internal_is_segmented(%s) "KYEL"FALSE", 796 | seg_path); 797 | return 0; 798 | } 799 | 800 | int is_segmented(const char* path) 801 | { 802 | debugf(DBG_LEVEL_EXT, "is_segmented(%s)", path); 803 | char container[MAX_URL_SIZE] = { 0 }; 804 | char object[MAX_URL_SIZE] = { 0 }; 805 | char seg_base[MAX_URL_SIZE] = { 0 }; 806 | split_path(path, seg_base, container, object); 807 | char seg_path[MAX_URL_SIZE]; 808 | snprintf(seg_path, MAX_URL_SIZE, "%s/%s_segments", seg_base, container); 809 | return internal_is_segmented(seg_path, object, path); 810 | } 811 | 812 | //returns segmented file properties by parsing and retrieving the folder structure on the cloud 813 | //added totalsize as parameter to return the file size on list directory for segmented files 814 | //old implementation returns file size=0 (issue #91) 815 | int format_segments(const char* path, char* seg_base, long* segments, 816 | long* full_segments, long* remaining, long* size_of_segments, long* total_size) 817 | { 818 | debugf(DBG_LEVEL_EXT, "format_segments(%s)", path); 819 | char container[MAX_URL_SIZE] = ""; 820 | char object[MAX_URL_SIZE] = ""; 821 | 822 | split_path(path, seg_base, container, object); 823 | 824 | char seg_path[MAX_URL_SIZE]; 825 | snprintf(seg_path, MAX_URL_SIZE, "%s/%s_segments", seg_base, container); 826 | 827 | if (internal_is_segmented(seg_path, object, path)) 828 | { 829 | char manifest[MAX_URL_SIZE]; 830 | dir_entry* seg_dir; 831 | 832 | snprintf(manifest, MAX_URL_SIZE, "%s/%s", seg_path, object); 833 | debugf(DBG_LEVEL_EXT, KMAG"format_segments manifest(%s)", manifest); 834 | if (!cloudfs_list_directory(manifest, &seg_dir)) 835 | { 836 | debugf(DBG_LEVEL_EXT, "exit 0: format_segments(%s)", path); 837 | return 0; 838 | } 839 | 840 | // snprintf seesaw between manifest and seg_path to get 841 | // the total_size and the segment size as well as the actual objects 842 | char* timestamp = seg_dir->name; 843 | snprintf(seg_path, MAX_URL_SIZE, "%s/%s", manifest, timestamp); 844 | debugf(DBG_LEVEL_EXT, KMAG"format_segments seg_path(%s)", seg_path); 845 | if (!cloudfs_list_directory(seg_path, &seg_dir)) 846 | { 847 | debugf(DBG_LEVEL_EXT, "exit 1: format_segments(%s)", path); 848 | return 0; 849 | } 850 | 851 | char* str_size = seg_dir->name; 852 | snprintf(manifest, MAX_URL_SIZE, "%s/%s", seg_path, str_size); 853 | debugf(DBG_LEVEL_EXT, KMAG"format_segments manifest2(%s) size=%s", manifest, 854 | str_size); 855 | if (!cloudfs_list_directory(manifest, &seg_dir)) 856 | { 857 | debugf(DBG_LEVEL_EXT, "exit 2: format_segments(%s)", path); 858 | return 0; 859 | } 860 | 861 | //following folder name actually represents the parent file size 862 | char* str_segment = seg_dir->name; 863 | snprintf(seg_path, MAX_URL_SIZE, "%s/%s", manifest, str_segment); 864 | debugf(DBG_LEVEL_EXT, KMAG"format_segments seg_path2(%s)", seg_path); 865 | //here is where we get a list with all segment files composing the parent large file 866 | if (!cloudfs_list_directory(seg_path, &seg_dir)) 867 | { 868 | debugf(DBG_LEVEL_EXT, "exit 3: format_segments(%s)", path); 869 | return 0; 870 | } 871 | 872 | *total_size = strtoll(str_size, NULL, 10); 873 | *size_of_segments = strtoll(str_segment, NULL, 10); 874 | *remaining = *total_size % *size_of_segments; 875 | *full_segments = *total_size / *size_of_segments; 876 | *segments = *full_segments + (*remaining > 0); 877 | 878 | snprintf(manifest, MAX_URL_SIZE, "%s_segments/%s/%s/%s/%s/", 879 | container, object, timestamp, str_size, str_segment); 880 | 881 | char tmp[MAX_URL_SIZE]; 882 | strncpy(tmp, seg_base, MAX_URL_SIZE); 883 | snprintf(seg_base, MAX_URL_SIZE, "%s/%s", tmp, manifest); 884 | debugf(DBG_LEVEL_EXT, KMAG"format_segments seg_base(%s)", seg_base); 885 | debugf(DBG_LEVEL_EXT, 886 | KMAG"exit 4: format_segments(%s) total=%d size_of_segments=%d remaining=%d, full_segments=%d segments=%d", 887 | path, &total_size, &size_of_segments, &remaining, &full_segments, &segments); 888 | return 1; 889 | } 890 | else 891 | { 892 | debugf(DBG_LEVEL_EXT, KMAG"exit 5: format_segments(%s) not segmented?", path); 893 | return 0; 894 | } 895 | } 896 | 897 | /* 898 | Public interface 899 | */ 900 | 901 | void cloudfs_init() 902 | { 903 | LIBXML_TEST_VERSION 904 | xmlXPathInit(); 905 | curl_global_init(CURL_GLOBAL_ALL); 906 | pthread_mutex_init(&pool_mut, NULL); 907 | curl_version_info_data* cvid = curl_version_info(CURLVERSION_NOW); 908 | 909 | // CentOS/RHEL 5 get stupid mode, because they have a broken libcurl 910 | if (cvid->version_num == RHEL5_LIBCURL_VERSION) 911 | { 912 | debugf(DBG_LEVEL_NORM, "RHEL5 mode enabled."); 913 | rhel5_mode = 1; 914 | } 915 | 916 | if (!strncasecmp(cvid->ssl_version, "openssl", 7)) 917 | { 918 | #ifdef HAVE_OPENSSL 919 | int i; 920 | ssl_lockarray = (pthread_mutex_t*)OPENSSL_malloc(CRYPTO_num_locks() * 921 | sizeof(pthread_mutex_t)); 922 | for (i = 0; i < CRYPTO_num_locks(); i++) 923 | pthread_mutex_init(&(ssl_lockarray[i]), NULL); 924 | CRYPTO_set_id_callback((unsigned long (*)())thread_id); 925 | CRYPTO_set_locking_callback((void (*)())lock_callback); 926 | #endif 927 | } 928 | else if (!strncasecmp(cvid->ssl_version, "nss", 3)) 929 | { 930 | // allow https to continue working after forking (for RHEL/CentOS 6) 931 | setenv("NSS_STRICT_NOFORK", "DISABLED", 1); 932 | } 933 | } 934 | 935 | void cloudfs_free() 936 | { 937 | debugf(DBG_LEVEL_EXT, "Destroy mutex"); 938 | pthread_mutex_destroy(&pool_mut); 939 | int n; 940 | for (n = 0; n < curl_pool_count; ++n) 941 | { 942 | debugf(DBG_LEVEL_EXT, "Cleaning curl conn %d", n); 943 | curl_easy_cleanup(curl_pool[n]); 944 | } 945 | } 946 | 947 | int file_is_readable(const char* fname) 948 | { 949 | FILE* file; 950 | if ( file = fopen( fname, "r" ) ) 951 | { 952 | fclose( file ); 953 | return 1; 954 | } 955 | return 0; 956 | } 957 | 958 | const char* get_file_mimetype ( const char* path ) 959 | { 960 | if ( file_is_readable( path ) == 1 ) 961 | { 962 | magic_t magic; 963 | const char* mime; 964 | 965 | magic = magic_open( MAGIC_MIME_TYPE ); 966 | magic_load( magic, NULL ); 967 | magic_compile( magic, NULL ); 968 | mime = magic_file( magic, path ); 969 | magic_close( magic ); 970 | 971 | return mime; 972 | } 973 | const char* error = "application/octet-stream"; 974 | return error; 975 | } 976 | 977 | 978 | int cloudfs_object_read_fp(const char* path, FILE* fp) 979 | { 980 | debugf(DBG_LEVEL_EXT, "cloudfs_object_read_fp(%s)", path); 981 | long flen; 982 | fflush(fp); 983 | const char* filemimetype = get_file_mimetype( path ); 984 | 985 | // determine the size of the file and segment if it is above the threshhold 986 | fseek(fp, 0, SEEK_END); 987 | flen = ftell(fp); 988 | 989 | // delete the previously uploaded segments 990 | if (is_segmented(path)) 991 | { 992 | if (!cloudfs_delete_object(path)) 993 | debugf(DBG_LEVEL_NORM, 994 | KRED"cloudfs_object_read_fp: couldn't delete existing file"); 995 | else 996 | debugf(DBG_LEVEL_EXT, KYEL"cloudfs_object_read_fp: deleted existing file"); 997 | } 998 | 999 | struct timespec now; 1000 | if (flen >= segment_above) 1001 | { 1002 | int i; 1003 | long remaining = flen % segment_size; 1004 | int full_segments = flen / segment_size; 1005 | int segments = full_segments + (remaining > 0); 1006 | 1007 | // The best we can do here is to get the current time that way tools that 1008 | // use the mtime can at least check if the file was changing after now 1009 | clock_gettime(CLOCK_REALTIME, &now); 1010 | char string_float[TIME_CHARS]; 1011 | snprintf(string_float, TIME_CHARS, "%lu.%lu", now.tv_sec, now.tv_nsec); 1012 | char meta_mtime[TIME_CHARS]; 1013 | snprintf(meta_mtime, TIME_CHARS, "%f", atof(string_float)); 1014 | char seg_base[MAX_URL_SIZE] = ""; 1015 | char container[MAX_URL_SIZE] = ""; 1016 | char object[MAX_URL_SIZE] = ""; 1017 | split_path(path, seg_base, container, object); 1018 | char manifest[MAX_URL_SIZE]; 1019 | snprintf(manifest, MAX_URL_SIZE, "%s_segments", container); 1020 | // create the segments container 1021 | cloudfs_create_directory(manifest); 1022 | // reusing manifest 1023 | // TODO: check how addition of meta_mtime in manifest impacts utimens implementation 1024 | snprintf(manifest, MAX_URL_SIZE, "%s_segments/%s/%s/%ld/%ld/", 1025 | container, object, meta_mtime, flen, segment_size); 1026 | char tmp[MAX_URL_SIZE]; 1027 | strncpy(tmp, seg_base, MAX_URL_SIZE); 1028 | snprintf(seg_base, MAX_URL_SIZE, "%s/%s", tmp, manifest); 1029 | 1030 | run_segment_threads("PUT", segments, full_segments, remaining, fp, 1031 | seg_base, segment_size); 1032 | 1033 | char* encoded = curl_escape(path, 0); 1034 | curl_slist* headers = NULL; 1035 | add_header(&headers, "x-object-manifest", manifest); 1036 | add_header(&headers, "Content-Length", "0"); 1037 | add_header(&headers, "Content-Type", filemimetype); 1038 | int response = send_request_size("PUT", encoded, NULL, NULL, headers, 0, 0, 1039 | NULL, path); 1040 | curl_slist_free_all(headers); 1041 | curl_free(encoded); 1042 | debugf(DBG_LEVEL_EXT, 1043 | "exit 0: cloudfs_object_read_fp(%s) uploaded ok, response=%d", path, response); 1044 | return (response >= 200 && response < 300); 1045 | } 1046 | else 1047 | { 1048 | // assume enters here when file is composed of only one segment (small files) 1049 | debugf(DBG_LEVEL_EXT, "cloudfs_object_read_fp(%s) "KYEL"unknown state", path); 1050 | } 1051 | rewind(fp); 1052 | char* encoded = curl_escape(path, 0); 1053 | dir_entry* de = path_info(path); 1054 | if (!de) 1055 | debugf(DBG_LEVEL_EXT, "cloudfs_object_read_fp(%s) not in cache", path); 1056 | else 1057 | debugf(DBG_LEVEL_EXT, "cloudfs_object_read_fp(%s) found in cache", path); 1058 | int response = send_request("PUT", encoded, fp, NULL, NULL, NULL, path); 1059 | curl_free(encoded); 1060 | debugf(DBG_LEVEL_EXT, "exit 1: cloudfs_object_read_fp(%s)", path); 1061 | return (response >= 200 && response < 300); 1062 | } 1063 | 1064 | //write file downloaded from cloud to local file 1065 | int cloudfs_object_write_fp(const char* path, FILE* fp) 1066 | { 1067 | debugf(DBG_LEVEL_EXT, "cloudfs_object_write_fp(%s)", path); 1068 | char* encoded = curl_escape(path, 0); 1069 | char seg_base[MAX_URL_SIZE] = ""; 1070 | 1071 | long segments; 1072 | long full_segments; 1073 | long remaining; 1074 | long size_of_segments; 1075 | long total_size; 1076 | 1077 | //checks if this file is a segmented one 1078 | if (format_segments(path, seg_base, &segments, &full_segments, &remaining, 1079 | &size_of_segments, &total_size)) 1080 | { 1081 | rewind(fp); 1082 | fflush(fp); 1083 | if (ftruncate(fileno(fp), 0) < 0) 1084 | { 1085 | debugf(DBG_LEVEL_NORM, 1086 | KRED"ftruncate failed. I don't know what to do about that."); 1087 | abort(); 1088 | } 1089 | run_segment_threads("GET", segments, full_segments, remaining, fp, 1090 | seg_base, size_of_segments); 1091 | debugf(DBG_LEVEL_EXT, "exit 0: cloudfs_object_write_fp(%s)", path); 1092 | return 1; 1093 | } 1094 | 1095 | int response = send_request("GET", encoded, fp, NULL, NULL, NULL, path); 1096 | curl_free(encoded); 1097 | fflush(fp); 1098 | if ((response >= 200 && response < 300) || ftruncate(fileno(fp), 0)) 1099 | { 1100 | debugf(DBG_LEVEL_EXT, "exit 1: cloudfs_object_write_fp(%s)", path); 1101 | return 1; 1102 | } 1103 | rewind(fp); 1104 | debugf(DBG_LEVEL_EXT, "exit 2: cloudfs_object_write_fp(%s)", path); 1105 | return 0; 1106 | } 1107 | 1108 | int cloudfs_object_truncate(const char* path, off_t size) 1109 | { 1110 | char* encoded = curl_escape(path, 0); 1111 | int response; 1112 | if (size == 0) 1113 | { 1114 | FILE* fp = fopen("/dev/null", "r"); 1115 | response = send_request("PUT", encoded, fp, NULL, NULL, NULL, path); 1116 | fclose(fp); 1117 | } 1118 | else 1119 | { 1120 | //TODO: this is busted 1121 | response = send_request("GET", encoded, NULL, NULL, NULL, NULL, path); 1122 | } 1123 | curl_free(encoded); 1124 | return (response >= 200 && response < 300); 1125 | } 1126 | 1127 | //get metadata from cloud, like time attribs. create new entry if not cached yet. 1128 | void get_file_metadata(dir_entry* de) 1129 | { 1130 | if (de->size == 0 && !de->isdir && !de->metadata_downloaded) 1131 | { 1132 | //this can be a potential segmented file, try to read segments size 1133 | debugf(DBG_LEVEL_EXT, KMAG"ZERO size file=%s", de->full_name); 1134 | char seg_base[MAX_URL_SIZE] = ""; 1135 | long segments; 1136 | long full_segments; 1137 | long remaining; 1138 | long size_of_segments; 1139 | long total_size; 1140 | if (format_segments(de->full_name, seg_base, &segments, &full_segments, 1141 | &remaining, 1142 | &size_of_segments, &total_size)) 1143 | de->size = total_size; 1144 | } 1145 | if (option_get_extended_metadata) 1146 | { 1147 | debugf(DBG_LEVEL_EXT, KCYN "get_file_metadata(%s)", de->full_name); 1148 | //retrieve additional file metadata with a quick HEAD query 1149 | char* encoded = curl_escape(de->full_name, 0); 1150 | de->metadata_downloaded = true; 1151 | int response = send_request("GET", encoded, NULL, NULL, NULL, de, 1152 | de->full_name); 1153 | curl_free(encoded); 1154 | debugf(DBG_LEVEL_EXT, KCYN "exit: get_file_metadata(%s)", de->full_name); 1155 | } 1156 | return; 1157 | } 1158 | 1159 | //get list of folders from cloud 1160 | // return 1 for OK, 0 for error 1161 | int cloudfs_list_directory(const char* path, dir_entry** dir_list) 1162 | { 1163 | debugf(DBG_LEVEL_EXT, "cloudfs_list_directory(%s)", path); 1164 | char container[MAX_PATH_SIZE * 3] = ""; 1165 | char object[MAX_PATH_SIZE] = ""; 1166 | char last_subdir[MAX_PATH_SIZE] = ""; 1167 | int prefix_length = 0; 1168 | int response = 0; 1169 | int retval = 0; 1170 | int entry_count = 0; 1171 | 1172 | *dir_list = NULL; 1173 | xmlNode* onode = NULL, *anode = NULL, *text_node = NULL; 1174 | xmlParserCtxtPtr xmlctx = xmlCreatePushParserCtxt(NULL, NULL, "", 0, NULL); 1175 | if (!strcmp(path, "") || !strcmp(path, "/")) 1176 | { 1177 | path = ""; 1178 | strncpy(container, "/?format=xml", sizeof(container)); 1179 | } 1180 | else 1181 | { 1182 | sscanf(path, "/%[^/]/%[^\n]", container, object); 1183 | char* encoded_container = curl_escape(container, 0); 1184 | char* encoded_object = curl_escape(object, 0); 1185 | 1186 | // The empty path doesn't get a trailing slash, everything else does 1187 | char* trailing_slash; 1188 | prefix_length = strlen(object); 1189 | if (object[0] == 0) 1190 | trailing_slash = ""; 1191 | else 1192 | { 1193 | trailing_slash = "/"; 1194 | prefix_length++; 1195 | } 1196 | snprintf(container, sizeof(container), "%s?format=xml&delimiter=/&prefix=%s%s", 1197 | encoded_container, encoded_object, trailing_slash); 1198 | curl_free(encoded_container); 1199 | curl_free(encoded_object); 1200 | } 1201 | 1202 | if ((!strcmp(path, "") || !strcmp(path, "/")) && *override_storage_url) 1203 | response = 404; 1204 | else 1205 | { 1206 | // this was generating 404 err on non segmented files (small files) 1207 | response = send_request("GET", container, NULL, xmlctx, NULL, NULL, path); 1208 | } 1209 | 1210 | if (response >= 200 && response < 300) 1211 | xmlParseChunk(xmlctx, "", 0, 1); 1212 | if (response >= 200 && response < 300 && xmlctx->wellFormed ) 1213 | { 1214 | xmlNode* root_element = xmlDocGetRootElement(xmlctx->myDoc); 1215 | for (onode = root_element->children; onode; onode = onode->next) 1216 | { 1217 | if (onode->type != XML_ELEMENT_NODE) continue; 1218 | char is_object = !strcasecmp((const char*)onode->name, "object"); 1219 | char is_container = !strcasecmp((const char*)onode->name, "container"); 1220 | char is_subdir = !strcasecmp((const char*)onode->name, "subdir"); 1221 | 1222 | if (is_object || is_container || is_subdir) 1223 | { 1224 | entry_count++; 1225 | dir_entry* de = init_dir_entry(); 1226 | // useful docs on nodes here: http://developer.openstack.org/api-ref-objectstorage-v1.html 1227 | if (is_container || is_subdir) 1228 | de->content_type = strdup("application/directory"); 1229 | for (anode = onode->children; anode; anode = anode->next) 1230 | { 1231 | char* content = ""; 1232 | for (text_node = anode->children; text_node; text_node = text_node->next) 1233 | { 1234 | if (text_node->type == XML_TEXT_NODE) 1235 | { 1236 | content = (char*)text_node->content; 1237 | //debugf(DBG_LEVEL_NORM, "List dir anode=%s content=%s", (const char *)anode->name, content); 1238 | } 1239 | else 1240 | { 1241 | //debugf(DBG_LEVEL_NORM, "List dir anode=%s", (const char *)anode->name); 1242 | } 1243 | } 1244 | if (!strcasecmp((const char*)anode->name, "name")) 1245 | { 1246 | de->name = strdup(content + prefix_length); 1247 | // Remove trailing slash 1248 | char* slash = strrchr(de->name, '/'); 1249 | if (slash && (0 == *(slash + 1))) 1250 | *slash = 0; 1251 | if (asprintf(&(de->full_name), "%s/%s", path, de->name) < 0) 1252 | de->full_name = NULL; 1253 | } 1254 | if (!strcasecmp((const char*)anode->name, "bytes")) 1255 | de->size = strtoll(content, NULL, 10); 1256 | if (!strcasecmp((const char*)anode->name, "content_type")) 1257 | { 1258 | de->content_type = strdup(content); 1259 | char* semicolon = strchr(de->content_type, ';'); 1260 | if (semicolon) 1261 | *semicolon = '\0'; 1262 | } 1263 | if (!strcasecmp((const char*)anode->name, "hash")) 1264 | de->md5sum = strdup(content); 1265 | if (!strcasecmp((const char*)anode->name, "last_modified")) 1266 | { 1267 | time_t last_modified_t = get_time_from_str_as_gmt(content); 1268 | char local_time_str[64]; 1269 | time_t local_time_t = get_time_as_local(last_modified_t, local_time_str, 1270 | sizeof(local_time_str)); 1271 | de->last_modified = local_time_t; 1272 | de->ctime.tv_sec = local_time_t; 1273 | de->ctime.tv_nsec = 0; 1274 | //initialise all fields with hubic last modified date in case the file does not have extended attributes set 1275 | de->mtime.tv_sec = local_time_t; 1276 | de->mtime.tv_nsec = 0; 1277 | de->atime.tv_sec = local_time_t; 1278 | de->atime.tv_nsec = 0; 1279 | //todo: how can we retrieve and set nanoseconds, are stored by hubic? 1280 | } 1281 | } 1282 | de->isdir = de->content_type && 1283 | ((strstr(de->content_type, "application/folder") != NULL) || 1284 | (strstr(de->content_type, "application/directory") != NULL)); 1285 | de->islink = de->content_type && 1286 | ((strstr(de->content_type, "application/link") != NULL)); 1287 | if (de->isdir) 1288 | { 1289 | //i guess this will remove a dir_entry from cache if is there already 1290 | if (!strncasecmp(de->name, last_subdir, sizeof(last_subdir))) 1291 | { 1292 | //todo: check why is needed and if memory is freed properly, seems to generate many missed delete operations 1293 | //cloudfs_free_dir_list(de); 1294 | debugf(DBG_LEVEL_EXT, 1295 | "cloudfs_list_directory: "KYEL"ignore "KNRM"cloudfs_free_dir_list(%s) command", 1296 | de->name); 1297 | continue; 1298 | } 1299 | strncpy(last_subdir, de->name, sizeof(last_subdir)); 1300 | } 1301 | de->next = *dir_list; 1302 | *dir_list = de; 1303 | char time_str[TIME_CHARS] = { 0 }; 1304 | get_timespec_as_str(&(de->mtime), time_str, sizeof(time_str)); 1305 | debugf(DBG_LEVEL_EXT, KCYN"new dir_entry %s size=%d %s dir=%d lnk=%d mod=[%s]", 1306 | de->full_name, de->size, de->content_type, de->isdir, de->islink, time_str); 1307 | } 1308 | else 1309 | debugf(DBG_LEVEL_EXT, "unknown element: %s", onode->name); 1310 | } 1311 | retval = 1; 1312 | } 1313 | else if ((!strcmp(path, "") || !strcmp(path, "/")) && *override_storage_url) 1314 | { 1315 | entry_count = 1; 1316 | debugf(DBG_LEVEL_NORM, "Init cache entry container=[%s]", public_container); 1317 | dir_entry* de = init_dir_entry(); 1318 | de->name = strdup(public_container); 1319 | struct tm last_modified; 1320 | //todo: check what this default time means? 1321 | strptime("1388434648.01238", "%FT%T", &last_modified); 1322 | de->last_modified = mktime(&last_modified); 1323 | de->content_type = strdup("application/directory"); 1324 | if (asprintf(&(de->full_name), "%s/%s", path, de->name) < 0) 1325 | de->full_name = NULL; 1326 | de->isdir = 1; 1327 | de->islink = 0; 1328 | de->size = 4096; 1329 | de->next = *dir_list; 1330 | *dir_list = de; 1331 | retval = 1; 1332 | } 1333 | xmlFreeDoc(xmlctx->myDoc); 1334 | xmlFreeParserCtxt(xmlctx); 1335 | debugf(DBG_LEVEL_EXT, "exit: cloudfs_list_directory(%s)", path); 1336 | return retval; 1337 | } 1338 | 1339 | int cloudfs_delete_object(const char* path) 1340 | { 1341 | debugf(DBG_LEVEL_EXT, "cloudfs_delete_object(%s)", path); 1342 | char seg_base[MAX_URL_SIZE] = ""; 1343 | 1344 | long segments; 1345 | long full_segments; 1346 | long remaining; 1347 | long size_of_segments; 1348 | long total_size; 1349 | 1350 | if (format_segments(path, seg_base, &segments, &full_segments, &remaining, 1351 | &size_of_segments, &total_size)) 1352 | { 1353 | int response; 1354 | int i; 1355 | char seg_path[MAX_URL_SIZE] = ""; 1356 | for (i = 0; i < segments; i++) 1357 | { 1358 | snprintf(seg_path, MAX_URL_SIZE, "%s%08i", seg_base, i); 1359 | char* encoded = curl_escape(seg_path, 0); 1360 | response = send_request("DELETE", encoded, NULL, NULL, NULL, NULL, seg_path); 1361 | if (response < 200 || response >= 300) 1362 | { 1363 | debugf(DBG_LEVEL_EXT, "exit 1: cloudfs_delete_object(%s) response=%d", path, 1364 | response); 1365 | return 0; 1366 | } 1367 | } 1368 | } 1369 | 1370 | char* encoded = curl_escape(path, 0); 1371 | int response = send_request("DELETE", encoded, NULL, NULL, NULL, NULL, path); 1372 | curl_free(encoded); 1373 | int ret = (response >= 200 && response < 300); 1374 | debugf(DBG_LEVEL_EXT, "status: cloudfs_delete_object(%s) response=%d", path, 1375 | response); 1376 | if (response == 409) 1377 | { 1378 | debugf(DBG_LEVEL_EXT, "status: cloudfs_delete_object(%s) NOT EMPTY", path); 1379 | ret = -1; 1380 | } 1381 | return ret; 1382 | } 1383 | 1384 | //fixme: this op does not preserve src attributes (e.g. will make rsync not work well) 1385 | // https://ask.openstack.org/en/question/14307/is-there-a-way-to-moverename-an-object/ 1386 | // this operation also causes an HTTP 400 error if X-Object-Meta-FilePath value is larger than 256 chars 1387 | int cloudfs_copy_object(const char* src, const char* dst) 1388 | { 1389 | debugf(DBG_LEVEL_EXT, "cloudfs_copy_object(%s, %s) lensrc=%d, lendst=%d", src, 1390 | dst, strlen(src), strlen(dst)); 1391 | 1392 | char* dst_encoded = curl_escape(dst, 0); 1393 | char* src_encoded = curl_escape(src, 0); 1394 | 1395 | //convert encoded string (slashes are encoded as well) to encoded string with slashes 1396 | char* slash; 1397 | while ((slash = strstr(src_encoded, "%2F")) 1398 | || (slash = strstr(src_encoded, "%2f"))) 1399 | { 1400 | *slash = '/'; 1401 | memmove(slash + 1, slash + 3, strlen(slash + 3) + 1); 1402 | } 1403 | 1404 | curl_slist* headers = NULL; 1405 | add_header(&headers, "X-Copy-From", src_encoded); 1406 | add_header(&headers, "Content-Length", "0"); 1407 | //get source file entry 1408 | dir_entry* de_src = check_path_info(src); 1409 | if (de_src) 1410 | debugf(DBG_LEVEL_EXT, "status cloudfs_copy_object(%s, %s): src file found", 1411 | src, dst); 1412 | else 1413 | debugf(DBG_LEVEL_NORM, 1414 | KRED"status cloudfs_copy_object(%s, %s): src file NOT found", src, dst); 1415 | //pass src metadata so that PUT will set time attributes of the src file 1416 | int response = send_request("PUT", dst_encoded, NULL, NULL, headers, de_src, 1417 | dst); 1418 | curl_free(dst_encoded); 1419 | curl_free(src_encoded); 1420 | curl_slist_free_all(headers); 1421 | debugf(DBG_LEVEL_EXT, "exit: cloudfs_copy_object(%s,%s) response=%d", src, dst, 1422 | response); 1423 | return (response >= 200 && response < 300); 1424 | } 1425 | 1426 | // http://developer.openstack.org/api-ref-objectstorage-v1.html#updateObjectMeta 1427 | int cloudfs_update_meta(dir_entry* de) 1428 | { 1429 | int response = cloudfs_copy_object(de->full_name, de->full_name); 1430 | return response; 1431 | } 1432 | 1433 | //optimised with cache 1434 | int cloudfs_statfs(const char* path, struct statvfs* stat) 1435 | { 1436 | time_t now = get_time_now(); 1437 | int lapsed = now - last_stat_read_time; 1438 | if (lapsed > option_cache_statfs_timeout) 1439 | { 1440 | //todo: check why stat head request is always set to /, why not path? 1441 | int response = send_request("HEAD", "/", NULL, NULL, NULL, NULL, "/"); 1442 | *stat = statcache; 1443 | debugf(DBG_LEVEL_EXT, 1444 | "exit: cloudfs_statfs (new recent values, was cached since %d seconds)", 1445 | lapsed); 1446 | last_stat_read_time = now; 1447 | return (response >= 200 && response < 300); 1448 | } 1449 | else 1450 | { 1451 | debugf(DBG_LEVEL_EXT, 1452 | "exit: cloudfs_statfs (old values, cached since %d seconds)", lapsed); 1453 | return 1; 1454 | } 1455 | } 1456 | 1457 | int cloudfs_create_symlink(const char* src, const char* dst) 1458 | { 1459 | char* dst_encoded = curl_escape(dst, 0); 1460 | FILE* lnk = tmpfile(); 1461 | fwrite(src, 1, strlen(src), lnk); 1462 | fwrite("\0", 1, 1, lnk); 1463 | int response = send_request("MKLINK", dst_encoded, lnk, NULL, NULL, NULL, dst); 1464 | curl_free(dst_encoded); 1465 | fclose(lnk); 1466 | return (response >= 200 && response < 300); 1467 | } 1468 | 1469 | int cloudfs_create_directory(const char* path) 1470 | { 1471 | debugf(DBG_LEVEL_EXT, "cloudfs_create_directory(%s)", path); 1472 | char* encoded = curl_escape(path, 0); 1473 | int response = send_request("MKDIR", encoded, NULL, NULL, NULL, NULL, path); 1474 | curl_free(encoded); 1475 | debugf(DBG_LEVEL_EXT, "cloudfs_create_directory(%s) response=%d", path, 1476 | response); 1477 | return (response >= 200 && response < 300); 1478 | } 1479 | 1480 | off_t cloudfs_file_size(int fd) 1481 | { 1482 | struct stat buf; 1483 | fstat(fd, &buf); 1484 | return buf.st_size; 1485 | } 1486 | 1487 | void cloudfs_verify_ssl(int vrfy) 1488 | { 1489 | verify_ssl = vrfy ? 2 : 0; 1490 | } 1491 | 1492 | void cloudfs_option_get_extended_metadata(int option) 1493 | { 1494 | option_get_extended_metadata = option ? true : false; 1495 | } 1496 | 1497 | void cloudfs_option_curl_verbose(int option) 1498 | { 1499 | option_curl_verbose = option ? true : false; 1500 | } 1501 | 1502 | static struct 1503 | { 1504 | char client_id [MAX_HEADER_SIZE]; 1505 | char client_secret[MAX_HEADER_SIZE]; 1506 | char refresh_token[MAX_HEADER_SIZE]; 1507 | } reconnect_args; 1508 | 1509 | void cloudfs_set_credentials(char* client_id, char* client_secret, 1510 | char* refresh_token) 1511 | { 1512 | strncpy(reconnect_args.client_id , client_id , 1513 | sizeof(reconnect_args.client_id )); 1514 | strncpy(reconnect_args.client_secret, client_secret, 1515 | sizeof(reconnect_args.client_secret)); 1516 | strncpy(reconnect_args.refresh_token, refresh_token, 1517 | sizeof(reconnect_args.refresh_token)); 1518 | } 1519 | 1520 | struct htmlString 1521 | { 1522 | char* text; 1523 | size_t size; 1524 | }; 1525 | 1526 | static size_t writefunc_string(void* contents, size_t size, size_t nmemb, 1527 | void* data) 1528 | { 1529 | struct htmlString* mem = (struct htmlString*) data; 1530 | size_t realsize = size * nmemb; 1531 | mem->text = realloc(mem->text, mem->size + realsize + 1); 1532 | if (mem->text == NULL) /* out of memory! */ 1533 | { 1534 | perror(__FILE__); 1535 | exit(EXIT_FAILURE); 1536 | } 1537 | 1538 | memcpy(&(mem->text[mem->size]), contents, realsize); 1539 | mem->size += realsize; 1540 | return realsize; 1541 | } 1542 | 1543 | char* htmlStringGet(CURL* curl) 1544 | { 1545 | struct htmlString chunk; 1546 | chunk.text = malloc(sizeof(char)); 1547 | chunk.size = 0; 1548 | chunk.text[0] = '\0';//added to avoid valgrind unitialised warning 1549 | 1550 | curl_easy_setopt(curl, CURLOPT_WRITEDATA, &chunk); 1551 | do 1552 | { 1553 | curl_easy_perform(curl); 1554 | } 1555 | while (chunk.size == 0); 1556 | 1557 | chunk.text[chunk.size] = '\0'; 1558 | return chunk.text; 1559 | } 1560 | 1561 | /* thanks to http://devenix.wordpress.com */ 1562 | char* unbase64(unsigned char* input, int length) 1563 | { 1564 | BIO* b64, *bmem; 1565 | 1566 | char* buffer = (char*)malloc(length); 1567 | memset(buffer, 0, length); 1568 | 1569 | b64 = BIO_new(BIO_f_base64()); 1570 | bmem = BIO_new_mem_buf(input, length); 1571 | bmem = BIO_push(b64, bmem); 1572 | BIO_set_flags(bmem, BIO_FLAGS_BASE64_NO_NL); 1573 | 1574 | BIO_read(bmem, buffer, length); 1575 | 1576 | BIO_free_all(bmem); 1577 | 1578 | return buffer; 1579 | } 1580 | 1581 | int safe_json_string(json_object* jobj, char* buffer, char* name) 1582 | { 1583 | int result = 0; 1584 | 1585 | if (jobj) 1586 | { 1587 | json_object* o; 1588 | int found; 1589 | found = json_object_object_get_ex(jobj, name, &o); 1590 | if (found) 1591 | { 1592 | strcpy (buffer, json_object_get_string(o)); 1593 | result = 1; 1594 | } 1595 | } 1596 | 1597 | if (!result) 1598 | debugf(DBG_LEVEL_NORM, KRED"HUBIC cannot get json field '%s'\n", name); 1599 | 1600 | return result; 1601 | } 1602 | 1603 | int cloudfs_connect() 1604 | { 1605 | #define HUBIC_TOKEN_URL "https://api.hubic.com/oauth/token" 1606 | #define HUBIC_CRED_URL "https://api.hubic.com/1.0/account/credentials" 1607 | #define HUBIC_CLIENT_ID (reconnect_args.client_id) 1608 | #define HUBIC_CLIENT_SECRET (reconnect_args.client_secret) 1609 | #define HUBIC_REFRESH_TOKEN (reconnect_args.refresh_token) 1610 | #define HUBIC_OPTIONS_SIZE 2048 1611 | 1612 | long response = -1; 1613 | char url[HUBIC_OPTIONS_SIZE]; 1614 | char payload[HUBIC_OPTIONS_SIZE]; 1615 | struct json_object* json_obj; 1616 | 1617 | pthread_mutex_lock(&pool_mut); 1618 | debugf(DBG_LEVEL_NORM, "Authenticating... (client_id = '%s')", 1619 | HUBIC_CLIENT_ID); 1620 | storage_token[0] = storage_url[0] = '\0'; 1621 | CURL* curl = curl_easy_init(); 1622 | 1623 | /* curl default options */ 1624 | curl_easy_setopt(curl, CURLOPT_VERBOSE, debug); 1625 | curl_easy_setopt(curl, CURLOPT_USERAGENT, USER_AGENT); 1626 | curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1); 1627 | curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, verify_ssl ? 1 : 0); 1628 | curl_easy_setopt(curl, CURLOPT_SSL_VERIFYHOST, verify_ssl); 1629 | curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10); 1630 | curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 10); 1631 | curl_easy_setopt(curl, CURLOPT_FORBID_REUSE, 1); 1632 | curl_easy_setopt(curl, CURLOPT_VERBOSE, 0L); 1633 | curl_easy_setopt(curl, CURLOPT_POST, 0L); 1634 | curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 0); 1635 | curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, writefunc_string); 1636 | 1637 | /* Step 1 : request a token - Not needed anymore with refresh_token */ 1638 | /* Step 2 : get request code - Not needed anymore with refresh_token */ 1639 | /* Step 3 : get access token */ 1640 | 1641 | sprintf(payload, "refresh_token=%s&grant_type=refresh_token", 1642 | HUBIC_REFRESH_TOKEN); 1643 | curl_easy_setopt(curl, CURLOPT_URL, HUBIC_TOKEN_URL); 1644 | curl_easy_setopt(curl, CURLOPT_POST, 1L); 1645 | curl_easy_setopt(curl, CURLOPT_HEADER, 0); 1646 | curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload); 1647 | curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, strlen(payload)); 1648 | curl_easy_setopt(curl, CURLOPT_USERNAME, HUBIC_CLIENT_ID); 1649 | curl_easy_setopt(curl, CURLOPT_PASSWORD, HUBIC_CLIENT_SECRET); 1650 | curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC); 1651 | 1652 | char* json_str = htmlStringGet(curl); 1653 | json_obj = json_tokener_parse(json_str); 1654 | debugf(DBG_LEVEL_NORM, "HUBIC TOKEN_URL result: '%s'\n", json_str); 1655 | free(json_str); 1656 | 1657 | char access_token[HUBIC_OPTIONS_SIZE]; 1658 | char token_type[HUBIC_OPTIONS_SIZE]; 1659 | int expire_sec; 1660 | int found; 1661 | json_object* o; 1662 | 1663 | if (!safe_json_string(json_obj, access_token, "access_token")) 1664 | goto error; 1665 | if (!safe_json_string(json_obj, token_type, "token_type")) 1666 | goto error; 1667 | 1668 | found = json_object_object_get_ex(json_obj, "expires_in", &o); 1669 | expire_sec = json_object_get_int(o); 1670 | debugf(DBG_LEVEL_NORM, "HUBIC Access token: %s\n", access_token); 1671 | debugf(DBG_LEVEL_NORM, "HUBIC Token type : %s\n", token_type); 1672 | debugf(DBG_LEVEL_NORM, "HUBIC Expire in : %d\n", expire_sec); 1673 | 1674 | /* Step 4 : request OpenStack storage URL */ 1675 | curl_easy_setopt(curl, CURLOPT_URL, HUBIC_CRED_URL); 1676 | curl_easy_setopt(curl, CURLOPT_POST, 0L); 1677 | curl_easy_setopt(curl, CURLOPT_HEADER, 0); 1678 | curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_NONE); 1679 | 1680 | /* create the Bearer authentication header */ 1681 | curl_slist* headers = NULL; 1682 | sprintf (payload, "Bearer %s", access_token); 1683 | add_header(&headers, "Authorization", payload); 1684 | curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); 1685 | 1686 | char token[HUBIC_OPTIONS_SIZE]; 1687 | char endpoint[HUBIC_OPTIONS_SIZE]; 1688 | char expires[HUBIC_OPTIONS_SIZE]; 1689 | json_str = htmlStringGet(curl); 1690 | json_obj = json_tokener_parse(json_str); 1691 | debugf(DBG_LEVEL_NORM, "CRED_URL result: '%s'\n", json_str); 1692 | free(json_str); 1693 | 1694 | if (!safe_json_string(json_obj, token, "token")) 1695 | goto error; 1696 | if (!safe_json_string(json_obj, endpoint, "endpoint")) 1697 | goto error; 1698 | if (!safe_json_string(json_obj, expires, "expires")) 1699 | goto error; 1700 | 1701 | /* set the global storage_url and storage_token, the only parameters needed for swift */ 1702 | strcpy (storage_url, endpoint); 1703 | strcpy (storage_token, token); 1704 | curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response); 1705 | curl_easy_cleanup(curl); 1706 | pthread_mutex_unlock(&pool_mut); 1707 | return (response >= 200 && response < 300 && storage_token[0] 1708 | && storage_url[0]); 1709 | 1710 | error: 1711 | pthread_mutex_unlock(&pool_mut); 1712 | return 0; 1713 | } 1714 | -------------------------------------------------------------------------------- /cloudfsapi.h: -------------------------------------------------------------------------------- 1 | #ifndef _CLOUDFSAPI_H 2 | #define _CLOUDFSAPI_H 3 | 4 | #include 5 | #include 6 | #define FUSE_USE_VERSION 30 7 | #include 8 | #include 9 | 10 | #define BUFFER_INITIAL_SIZE 4096 11 | #define MAX_HEADER_SIZE 8192 12 | 13 | #define MAX_URL_SIZE (MAX_PATH_SIZE * 3) 14 | #define USER_AGENT "CloudFuse" 15 | #define OPTION_SIZE 1024 16 | 17 | typedef struct curl_slist curl_slist; 18 | 19 | #define MINIMAL_PROGRESS_FUNCTIONALITY_INTERVAL 5 20 | struct curl_progress 21 | { 22 | double lastruntime; 23 | CURL* curl; 24 | }; 25 | 26 | typedef struct options 27 | { 28 | char cache_timeout[OPTION_SIZE]; 29 | char verify_ssl[OPTION_SIZE]; 30 | char segment_size[OPTION_SIZE]; 31 | char segment_above[OPTION_SIZE]; 32 | char storage_url[OPTION_SIZE]; 33 | char container[OPTION_SIZE]; 34 | char temp_dir[OPTION_SIZE]; 35 | char client_id[OPTION_SIZE]; 36 | char client_secret[OPTION_SIZE]; 37 | char refresh_token[OPTION_SIZE]; 38 | } FuseOptions; 39 | 40 | typedef struct extra_options 41 | { 42 | char settings_filename[MAX_PATH_SIZE]; 43 | char get_extended_metadata[OPTION_SIZE]; 44 | char curl_verbose[OPTION_SIZE]; 45 | char cache_statfs_timeout[OPTION_SIZE]; 46 | char debug_level[OPTION_SIZE]; 47 | char curl_progress_state[OPTION_SIZE]; 48 | char enable_chmod[OPTION_SIZE]; 49 | char enable_chown[OPTION_SIZE]; 50 | char help[OPTION_SIZE]; 51 | } ExtraFuseOptions; 52 | 53 | void cloudfs_init(void); 54 | void cloudfs_free(void); 55 | void cloudfs_set_credentials(char* client_id, char* client_secret, 56 | char* refresh_token); 57 | int cloudfs_connect(void); 58 | 59 | struct segment_info 60 | { 61 | FILE* fp; 62 | int part; 63 | long size; 64 | long segment_size; 65 | char* seg_base; 66 | const char* method; 67 | }; 68 | 69 | extern long segment_size; 70 | extern long segment_above; 71 | 72 | extern char* override_storage_url; 73 | extern char* public_container; 74 | 75 | int file_is_readable(const char* fname); 76 | const char* get_file_mimetype ( const char* filename ); 77 | int cloudfs_object_read_fp(const char* path, FILE* fp); 78 | int cloudfs_object_write_fp(const char* path, FILE* fp); 79 | int cloudfs_list_directory(const char* path, dir_entry**); 80 | int cloudfs_delete_object(const char* path); 81 | int cloudfs_copy_object(const char* src, const char* dst); 82 | int cloudfs_create_symlink(const char* src, const char* dst); 83 | int cloudfs_create_directory(const char* label); 84 | int cloudfs_object_truncate(const char* path, off_t size); 85 | off_t cloudfs_file_size(int fd); 86 | int cloudfs_statfs(const char* path, struct statvfs* stat); 87 | void cloudfs_verify_ssl(int dbg); 88 | void cloudfs_option_get_extended_metadata(int option); 89 | void cloudfs_option_curl_verbose(int option); 90 | void get_file_metadata(dir_entry* de); 91 | int cloudfs_update_meta(dir_entry* de); 92 | #endif 93 | -------------------------------------------------------------------------------- /cloudfuse.c: -------------------------------------------------------------------------------- 1 | #define FUSE_USE_VERSION 30 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include "commonfs.h" 19 | #include "cloudfsapi.h" 20 | #include "config.h" 21 | 22 | extern char* temp_dir; 23 | extern pthread_mutex_t dcachemut; 24 | extern pthread_mutexattr_t mutex_attr; 25 | extern int debug; 26 | extern int cache_timeout; 27 | extern int option_cache_statfs_timeout; 28 | extern int option_debug_level; 29 | extern bool option_get_extended_metadata; 30 | extern bool option_curl_progress_state; 31 | extern bool option_enable_chown; 32 | extern bool option_enable_chmod; 33 | extern size_t file_buffer_size; 34 | 35 | static int cfs_getattr(const char* path, struct stat* stbuf) 36 | { 37 | debugf(DBG_LEVEL_NORM, KBLU "cfs_getattr(%s)", path); 38 | 39 | 40 | //return standard values for root folder 41 | if (!strcmp(path, "/")) 42 | { 43 | stbuf->st_uid = geteuid(); 44 | stbuf->st_gid = getegid(); 45 | stbuf->st_mode = S_IFDIR | 0755; 46 | stbuf->st_nlink = 2; 47 | debug_list_cache_content(); 48 | debugf(DBG_LEVEL_NORM, KBLU "exit 0: cfs_getattr(%s)", path); 49 | return 0; 50 | } 51 | //get file. if not in cache will be downloaded. 52 | dir_entry* de = path_info(path); 53 | if (!de) 54 | { 55 | debug_list_cache_content(); 56 | debugf(DBG_LEVEL_NORM, KBLU"exit 1: cfs_getattr(%s) "KYEL"not-in-cache/cloud", 57 | path); 58 | return -ENOENT; 59 | } 60 | 61 | //lazzy download of file metadata, only when really needed 62 | if (option_get_extended_metadata && !de->metadata_downloaded) 63 | get_file_metadata(de); 64 | if (option_enable_chown) 65 | { 66 | stbuf->st_uid = de->uid; 67 | stbuf->st_gid = de->gid; 68 | } 69 | else 70 | { 71 | stbuf->st_uid = geteuid(); 72 | stbuf->st_gid = getegid(); 73 | } 74 | // change needed due to utimens 75 | stbuf->st_atime = de->atime.tv_sec; 76 | stbuf->st_mtime = de->mtime.tv_sec; 77 | stbuf->st_ctime = de->ctime.tv_sec; 78 | #ifdef __APPLE__ 79 | stbuf->st_atimespec.tv_nsec = de->atime.tv_nsec; 80 | stbuf->st_mtimespec.tv_nsec = de->mtime.tv_nsec; 81 | stbuf->st_ctimespec.tv_nsec = de->ctime.tv_nsec; 82 | #else 83 | stbuf->st_atim.tv_nsec = de->atime.tv_nsec; 84 | stbuf->st_mtim.tv_nsec = de->mtime.tv_nsec; 85 | stbuf->st_ctim.tv_nsec = de->ctime.tv_nsec; 86 | #endif 87 | char time_str[TIME_CHARS] = ""; 88 | get_timespec_as_str(&(de->atime), time_str, sizeof(time_str)); 89 | debugf(DBG_LEVEL_EXT, KCYN"cfs_getattr: atime=[%s]", time_str); 90 | get_timespec_as_str(&(de->mtime), time_str, sizeof(time_str)); 91 | debugf(DBG_LEVEL_EXT, KCYN"cfs_getattr: mtime=[%s]", time_str); 92 | get_timespec_as_str(&(de->ctime), time_str, sizeof(time_str)); 93 | debugf(DBG_LEVEL_EXT, KCYN"cfs_getattr: ctime=[%s]", time_str); 94 | 95 | int default_mode_dir, default_mode_file; 96 | 97 | if (option_enable_chmod) 98 | { 99 | default_mode_dir = de->chmod; 100 | default_mode_file = de->chmod; 101 | } 102 | else 103 | { 104 | default_mode_dir = 0755; 105 | default_mode_file = 0666; 106 | } 107 | 108 | if (de->isdir) 109 | { 110 | stbuf->st_size = 0; 111 | stbuf->st_mode = S_IFDIR | default_mode_dir; 112 | stbuf->st_nlink = 2; 113 | } 114 | else if (de->islink) 115 | { 116 | stbuf->st_size = 1; 117 | stbuf->st_mode = S_IFLNK | default_mode_dir; 118 | stbuf->st_nlink = 1; 119 | stbuf->st_size = de->size; 120 | /* calc. blocks as if 4K blocksize filesystem; stat uses units of 512B */ 121 | stbuf->st_blocks = ((4095 + de->size) / 4096) * 8; 122 | } 123 | else 124 | { 125 | stbuf->st_size = de->size; 126 | /* calc. blocks as if 4K blocksize filesystem; stat uses units of 512B */ 127 | stbuf->st_blocks = ((4095 + de->size) / 4096) * 8; 128 | stbuf->st_mode = S_IFREG | default_mode_file; 129 | stbuf->st_nlink = 1; 130 | } 131 | debugf(DBG_LEVEL_NORM, KBLU "exit 2: cfs_getattr(%s)", path); 132 | return 0; 133 | } 134 | 135 | static int cfs_fgetattr(const char* path, struct stat* stbuf, 136 | struct fuse_file_info* info) 137 | { 138 | debugf(DBG_LEVEL_NORM, KBLU "cfs_fgetattr(%s)", path); 139 | openfile* of = (openfile*)(uintptr_t)info->fh; 140 | if (of) 141 | { 142 | //get file. if not in cache will be downloaded. 143 | dir_entry* de = path_info(path); 144 | if (!de) 145 | { 146 | debug_list_cache_content(); 147 | debugf(DBG_LEVEL_NORM, KBLU"exit 1: cfs_fgetattr(%s) "KYEL"not-in-cache/cloud", 148 | path); 149 | return -ENOENT; 150 | } 151 | int default_mode_file; 152 | if (option_enable_chmod) 153 | default_mode_file = de->chmod; 154 | else 155 | default_mode_file = 0666; 156 | 157 | stbuf->st_size = cloudfs_file_size(of->fd); 158 | stbuf->st_mode = S_IFREG | default_mode_file; 159 | stbuf->st_nlink = 1; 160 | debugf(DBG_LEVEL_NORM, KBLU "exit 0: cfs_fgetattr(%s)", path); 161 | return 0; 162 | } 163 | debugf(DBG_LEVEL_NORM, KRED "exit 1: cfs_fgetattr(%s)", path); 164 | return -ENOENT; 165 | } 166 | 167 | static int cfs_readdir(const char* path, void* buf, fuse_fill_dir_t filldir, 168 | off_t offset, struct fuse_file_info* info) 169 | { 170 | debugf(DBG_LEVEL_NORM, KBLU "cfs_readdir(%s)", path); 171 | dir_entry* de; 172 | if (!caching_list_directory(path, &de)) 173 | { 174 | debug_list_cache_content(); 175 | debugf(DBG_LEVEL_NORM, KRED "exit 0: cfs_readdir(%s)", path); 176 | return -ENOLINK; 177 | } 178 | filldir(buf, ".", NULL, 0); 179 | filldir(buf, "..", NULL, 0); 180 | for (; de; de = de->next) 181 | filldir(buf, de->name, NULL, 0); 182 | debug_list_cache_content(); 183 | debugf(DBG_LEVEL_NORM, KBLU "exit 1: cfs_readdir(%s)", path); 184 | return 0; 185 | } 186 | 187 | static int cfs_mkdir(const char* path, mode_t mode) 188 | { 189 | debugf(DBG_LEVEL_NORM, KBLU "cfs_mkdir(%s)", path); 190 | int response = cloudfs_create_directory(path); 191 | if (response) 192 | { 193 | update_dir_cache(path, 0, 1, 0); 194 | debug_list_cache_content(); 195 | debugf(DBG_LEVEL_NORM, KBLU "exit 0: cfs_mkdir(%s)", path); 196 | return 0; 197 | } 198 | debugf(DBG_LEVEL_NORM, KRED "exit 1: cfs_mkdir(%s) response=%d", path, 199 | response); 200 | return -ENOENT; 201 | } 202 | 203 | static int cfs_create(const char* path, mode_t mode, 204 | struct fuse_file_info* info) 205 | { 206 | debugf(DBG_LEVEL_NORM, KBLU "cfs_create(%s)", path); 207 | FILE* temp_file; 208 | int errsv; 209 | char file_path_safe[NAME_MAX] = ""; 210 | 211 | if (*temp_dir) 212 | { 213 | get_safe_cache_file_path(path, file_path_safe, temp_dir); 214 | temp_file = fopen(file_path_safe, "w+b"); 215 | errsv = errno; 216 | if (temp_file == NULL) 217 | { 218 | debugf(DBG_LEVEL_NORM, KRED 219 | "exit 0: cfs_create cannot open temp file %s.error %s\n", file_path_safe, 220 | strerror(errsv)); 221 | return -EIO; 222 | } 223 | } 224 | else 225 | { 226 | temp_file = tmpfile(); 227 | errsv = errno; 228 | if (temp_file == NULL) 229 | { 230 | debugf(DBG_LEVEL_NORM, KRED 231 | "exit 1: cfs_create cannot open tmp file for path %s.error %s\n", path, 232 | strerror(errsv)); 233 | return -EIO; 234 | } 235 | } 236 | openfile* of = (openfile*)malloc(sizeof(openfile)); 237 | of->fd = dup(fileno(temp_file)); 238 | fclose(temp_file); 239 | of->flags = info->flags; 240 | info->fh = (uintptr_t)of; 241 | update_dir_cache(path, 0, 0, 0); 242 | info->direct_io = 1; 243 | dir_entry* de = check_path_info(path); 244 | if (de) 245 | { 246 | debugf(DBG_LEVEL_EXT, KCYN"cfs_create(%s): found in cache", path); 247 | struct timespec now; 248 | clock_gettime(CLOCK_REALTIME, &now); 249 | debugf(DBG_LEVEL_EXT, KCYN"cfs_create(%s) set utimes as now", path); 250 | de->atime.tv_sec = now.tv_sec; 251 | de->atime.tv_nsec = now.tv_nsec; 252 | de->mtime.tv_sec = now.tv_sec; 253 | de->mtime.tv_nsec = now.tv_nsec; 254 | de->ctime.tv_sec = now.tv_sec; 255 | de->ctime.tv_nsec = now.tv_nsec; 256 | 257 | char time_str[TIME_CHARS] = ""; 258 | get_timespec_as_str(&(de->atime), time_str, sizeof(time_str)); 259 | debugf(DBG_LEVEL_EXT, KCYN"cfs_create: atime=[%s]", time_str); 260 | get_timespec_as_str(&(de->mtime), time_str, sizeof(time_str)); 261 | debugf(DBG_LEVEL_EXT, KCYN"cfs_create: mtime=[%s]", time_str); 262 | get_timespec_as_str(&(de->ctime), time_str, sizeof(time_str)); 263 | debugf(DBG_LEVEL_EXT, KCYN"cfs_create: ctime=[%s]", time_str); 264 | 265 | //set chmod & chown 266 | de->chmod = mode; 267 | de->uid = geteuid(); 268 | de->gid = getegid(); 269 | } 270 | else 271 | debugf(DBG_LEVEL_EXT, KBLU "cfs_create(%s) "KYEL"dir-entry not found", path); 272 | debugf(DBG_LEVEL_NORM, KBLU "exit 2: cfs_create(%s)=(%s) result=%d:%s", path, 273 | file_path_safe, errsv, strerror(errsv)); 274 | return 0; 275 | } 276 | 277 | // open (download) file from cloud 278 | // todo: implement etag optimisation, download only if content changed, http://www.17od.com/2012/12/19/ten-useful-openstack-swift-features/ 279 | static int cfs_open(const char* path, struct fuse_file_info* info) 280 | { 281 | debugf(DBG_LEVEL_NORM, KBLU "cfs_open(%s)", path); 282 | FILE* temp_file = NULL; 283 | int errsv; 284 | dir_entry* de = path_info(path); 285 | 286 | if (*temp_dir) 287 | { 288 | char file_path_safe[NAME_MAX]; 289 | get_safe_cache_file_path(path, file_path_safe, temp_dir); 290 | 291 | debugf(DBG_LEVEL_EXT, "cfs_open: try open (%s)", file_path_safe); 292 | if (access(file_path_safe, F_OK) != -1) 293 | { 294 | // file exists 295 | temp_file = fopen(file_path_safe, "r"); 296 | errsv = errno; 297 | if (temp_file == NULL) 298 | { 299 | debugf(DBG_LEVEL_NORM, 300 | KRED"exit 0: cfs_open can't open temp_file=[%s] err=%d:%s", file_path_safe, 301 | errsv, strerror(errsv)); 302 | return -ENOENT; 303 | } 304 | else 305 | debugf(DBG_LEVEL_EXT, "cfs_open: file exists"); 306 | } 307 | else 308 | { 309 | errsv = errno; 310 | debugf(DBG_LEVEL_EXT, "cfs_open: file not in cache, err=%s", strerror(errsv)); 311 | //FIXME: commented out as this condition will not be meet in some odd cases and program will crash 312 | //if (!(info->flags & O_WRONLY)) { 313 | debugf(DBG_LEVEL_EXT, "cfs_open: opening for write"); 314 | 315 | // we need to lock on the filename another process could open the file 316 | // while we are writing to it and then only read part of the file 317 | 318 | // duplicate the directory caching datastructure to make the code easier 319 | // to understand. 320 | 321 | // each file in the cache needs: 322 | // filename, is_writing, last_closed, is_removing 323 | // the first time a file is opened a new entry is created in the cache 324 | // setting the filename and is_writing to true. This check needs to be 325 | // wrapped with a lock. 326 | // 327 | // each time a file is closed we set the last_closed for the file to now 328 | // and we check the cache for files whose last 329 | // closed is greater than cache_timeout, then start a new thread rming 330 | // that file. 331 | 332 | // TODO: just to prevent this craziness for now 333 | temp_file = fopen(file_path_safe, "w+b"); 334 | errsv = errno; 335 | if (temp_file == NULL) 336 | { 337 | debugf(DBG_LEVEL_NORM, 338 | KRED"exit 1: cfs_open cannot open temp_file=[%s] err=%d:%s", file_path_safe, 339 | errsv, strerror(errsv)); 340 | return -ENOENT; 341 | } 342 | 343 | if (!cloudfs_object_write_fp(path, temp_file)) 344 | { 345 | fclose(temp_file); 346 | debugf(DBG_LEVEL_NORM, KRED "exit 2: cfs_open(%s) cannot download/write", 347 | path); 348 | return -ENOENT; 349 | } 350 | } 351 | } 352 | else 353 | { 354 | temp_file = tmpfile(); 355 | if (temp_file == NULL) 356 | { 357 | debugf(DBG_LEVEL_NORM, KRED"exit 3: cfs_open cannot create temp_file err=%s", 358 | strerror(errno)); 359 | return -ENOENT; 360 | } 361 | 362 | if (!(info->flags & O_TRUNC)) 363 | { 364 | if (!cloudfs_object_write_fp(path, temp_file) && !(info->flags & O_CREAT)) 365 | { 366 | fclose(temp_file); 367 | debugf(DBG_LEVEL_NORM, KRED"exit 4: cfs_open(%s) cannot download/write", path); 368 | return -ENOENT; 369 | } 370 | } 371 | } 372 | 373 | update_dir_cache(path, (de ? de->size : 0), 0, 0); 374 | openfile* of = (openfile*)malloc(sizeof(openfile)); 375 | of->fd = dup(fileno(temp_file)); 376 | if (of->fd == -1) 377 | { 378 | //FIXME: potential leak if free not used? 379 | free(of); 380 | debugf(DBG_LEVEL_NORM, KRED "exit 5: cfs_open(%s) of->fd", path); 381 | return -ENOENT; 382 | } 383 | fclose(temp_file); 384 | //TODO: why this allocation to of? 385 | of->flags = info->flags; 386 | info->fh = (uintptr_t)of; 387 | info->direct_io = 1; 388 | info->nonseekable = 0; 389 | //FIXME: potential leak if free(of) not used? although if free(of) is used will generate bad descriptor errors 390 | debugf(DBG_LEVEL_NORM, KBLU "exit 6: cfs_open(%s)", path); 391 | return 0; 392 | } 393 | 394 | static int cfs_read(const char* path, char* buf, size_t size, off_t offset, 395 | struct fuse_file_info* info) 396 | { 397 | debugf(DBG_LEVEL_EXTALL, KBLU "cfs_read(%s) buffsize=%lu offset=%lu", path, 398 | size, offset); 399 | file_buffer_size = size; 400 | debug_print_descriptor(info); 401 | int result = pread(((openfile*)(uintptr_t)info->fh)->fd, buf, size, offset); 402 | debugf(DBG_LEVEL_EXTALL, KBLU "exit: cfs_read(%s) result=%s", path, 403 | strerror(errno)); 404 | return result; 405 | } 406 | 407 | //todo: flush will upload a file again even if just file attributes are changed. 408 | //optimisation needed to detect if content is changed and to only save meta when just attribs are modified. 409 | static int cfs_flush(const char* path, struct fuse_file_info* info) 410 | { 411 | debugf(DBG_LEVEL_NORM, KBLU "cfs_flush(%s)", path); 412 | debug_print_descriptor(info); 413 | openfile* of = (openfile*)(uintptr_t)info->fh; 414 | int errsv = 0; 415 | 416 | if (of) 417 | { 418 | // get the actual file size and truncate it. This ensures that if the new file is smaller 419 | // than the previous one, the proper size is uploaded. 420 | struct stat stbuf; 421 | cfs_getattr(path, &stbuf); 422 | update_dir_cache(path, stbuf.st_size, 0, 0); 423 | ftruncate(of->fd, stbuf.st_size); 424 | 425 | if (of->flags & O_RDWR || of->flags & O_WRONLY) 426 | { 427 | FILE* fp = fdopen(dup(of->fd), "r"); 428 | errsv = errno; 429 | if (fp != NULL) 430 | { 431 | rewind(fp); 432 | //calculate md5 hash, compare with cloud hash to determine if file content is changed 433 | char md5_file_hash_str[MD5_DIGEST_HEXA_STRING_LEN] = "\0"; 434 | file_md5(fp, md5_file_hash_str); 435 | dir_entry* de = check_path_info(path); 436 | if (de && de->md5sum != NULL && (!strcasecmp(md5_file_hash_str, de->md5sum))) 437 | { 438 | //file content is identical, no need to upload entire file, just update metadata 439 | debugf(DBG_LEVEL_NORM, KBLU 440 | "cfs_flush(%s): skip full upload as content did not change", path); 441 | cloudfs_update_meta(de); 442 | } 443 | else 444 | { 445 | rewind(fp); 446 | debugf(DBG_LEVEL_NORM, KBLU 447 | "cfs_flush(%s): perform full upload as content changed (or no file found in cache)", 448 | path); 449 | if (!cloudfs_object_read_fp(path, fp)) 450 | { 451 | fclose(fp); 452 | errsv = errno; 453 | debugf(DBG_LEVEL_NORM, KRED"exit 0: cfs_flush(%s) result=%d:%s", path, errsv, 454 | strerror(errno)); 455 | return -ENOENT; 456 | } 457 | } 458 | fclose(fp); 459 | errsv = errno; 460 | } 461 | else 462 | debugf(DBG_LEVEL_EXT, KRED "status: cfs_flush, err=%d:%s", errsv, 463 | strerror(errno)); 464 | } 465 | } 466 | debugf(DBG_LEVEL_NORM, KBLU "exit 1: cfs_flush(%s) result=%d:%s", path, errsv, 467 | strerror(errno)); 468 | return 0; 469 | } 470 | 471 | static int cfs_release(const char* path, struct fuse_file_info* info) 472 | { 473 | debugf(DBG_LEVEL_NORM, KBLU "cfs_release(%s)", path); 474 | close(((openfile*)(uintptr_t)info->fh)->fd); 475 | debugf(DBG_LEVEL_NORM, KBLU "exit: cfs_release(%s)", path); 476 | return 0; 477 | } 478 | 479 | static int cfs_rmdir(const char* path) 480 | { 481 | debugf(DBG_LEVEL_NORM, KBLU "cfs_rmdir(%s)", path); 482 | int success = cloudfs_delete_object(path); 483 | if (success == -1) 484 | { 485 | debugf(DBG_LEVEL_NORM, KBLU "exit 0: cfs_rmdir(%s)", path); 486 | return -ENOTEMPTY; 487 | } 488 | if (success) 489 | { 490 | dir_decache(path); 491 | debugf(DBG_LEVEL_NORM, KBLU "exit 1: cfs_rmdir(%s)", path); 492 | return 0; 493 | } 494 | debugf(DBG_LEVEL_NORM, KBLU "exit 2: cfs_rmdir(%s)", path); 495 | return -ENOENT; 496 | } 497 | 498 | static int cfs_ftruncate(const char* path, off_t size, 499 | struct fuse_file_info* info) 500 | { 501 | debugf(DBG_LEVEL_NORM, KBLU "cfs_ftruncate(%s) size=%lu", path, size); 502 | file_buffer_size = size; 503 | openfile* of = (openfile*)(uintptr_t)info->fh; 504 | if (ftruncate(of->fd, size)) 505 | return -errno; 506 | lseek(of->fd, 0, SEEK_SET); 507 | update_dir_cache(path, size, 0, 0); 508 | debugf(DBG_LEVEL_NORM, KBLU "exit: cfs_ftruncate(%s)", path); 509 | return 0; 510 | } 511 | 512 | static int cfs_write(const char* path, const char* buf, size_t length, 513 | off_t offset, struct fuse_file_info* info) 514 | { 515 | debugf(DBG_LEVEL_EXTALL, KBLU "cfs_write(%s) bufflength=%lu offset=%lu", path, 516 | length, offset); 517 | // FIXME: Potential inconsistent cache update if pwrite fails? 518 | update_dir_cache(path, offset + length, 0, 0); 519 | //int result = pwrite(info->fh, buf, length, offset); 520 | int result = pwrite(((openfile*)(uintptr_t)info->fh)->fd, buf, length, offset); 521 | int errsv = errno; 522 | if (errsv == 0) 523 | debugf(DBG_LEVEL_EXTALL, KBLU "exit 0: cfs_write(%s) result=%d:%s", path, 524 | errsv, strerror(errsv)); 525 | else 526 | debugf(DBG_LEVEL_EXTALL, KBLU "exit 1: cfs_write(%s) "KRED"result=%d:%s", path, 527 | errsv, strerror(errsv)); 528 | return result; 529 | } 530 | 531 | static int cfs_unlink(const char* path) 532 | { 533 | debugf(DBG_LEVEL_NORM, KBLU "cfs_unlink(%s)", path); 534 | int success = cloudfs_delete_object(path); 535 | if (success == -1) 536 | { 537 | debugf(DBG_LEVEL_NORM, KRED "exit 0: cfs_unlink(%s)", path); 538 | return -EACCES; 539 | } 540 | if (success) 541 | { 542 | dir_decache(path); 543 | debugf(DBG_LEVEL_NORM, KBLU "exit 1: cfs_unlink(%s)", path); 544 | return 0; 545 | } 546 | debugf(DBG_LEVEL_NORM, KRED "exit 2: cfs_unlink(%s)", path); 547 | return -ENOENT; 548 | } 549 | 550 | static int cfs_fsync(const char* path, int idunno, struct fuse_file_info* info) 551 | { 552 | debugf(DBG_LEVEL_NORM, "cfs_fsync(%s)", path); 553 | return 0; 554 | } 555 | 556 | static int cfs_truncate(const char* path, off_t size) 557 | { 558 | debugf(DBG_LEVEL_NORM, "cfs_truncate(%s) size=%lu", path, size); 559 | cloudfs_object_truncate(path, size); 560 | update_dir_cache(path, size, 0, 0); 561 | debugf(DBG_LEVEL_NORM, "exit: cfs_truncate(%s)", path); 562 | return 0; 563 | } 564 | 565 | //this is called regularly on copy (via mc), is optimised (cached) 566 | static int cfs_statfs(const char* path, struct statvfs* stat) 567 | { 568 | debugf(DBG_LEVEL_NORM, KBLU "cfs_statfs(%s)", path); 569 | if (cloudfs_statfs(path, stat)) 570 | { 571 | debugf(DBG_LEVEL_NORM, KBLU "exit 0: cfs_statfs(%s)", path); 572 | return 0; 573 | } 574 | else 575 | { 576 | debugf(DBG_LEVEL_NORM, KRED"exit 1: cfs_statfs(%s) not-found", path); 577 | return -EIO; 578 | } 579 | } 580 | 581 | static int cfs_chown(const char* path, uid_t uid, gid_t gid) 582 | { 583 | debugf(DBG_LEVEL_NORM, KBLU "cfs_chown(%s,%d,%d)", path, uid, gid); 584 | dir_entry* de = check_path_info(path); 585 | if (de) 586 | { 587 | if (de->uid != uid || de->gid != gid) 588 | { 589 | debugf(DBG_LEVEL_NORM, "cfs_chown(%s): change from uid:gid %d:%d to %d:%d", 590 | path, de->uid, de->gid, uid, gid); 591 | de->uid = uid; 592 | de->gid = gid; 593 | //issue a PUT request to update metadata (quick request just to update headers) 594 | int response = cloudfs_update_meta(de); 595 | } 596 | } 597 | return 0; 598 | } 599 | 600 | static int cfs_chmod(const char* path, mode_t mode) 601 | { 602 | debugf(DBG_LEVEL_NORM, KBLU"cfs_chmod(%s,%d)", path, mode); 603 | dir_entry* de = check_path_info(path); 604 | if (de) 605 | { 606 | if (de->chmod != mode) 607 | { 608 | debugf(DBG_LEVEL_NORM, "cfs_chmod(%s): change mode from %d to %d", path, 609 | de->chmod, mode); 610 | de->chmod = mode; 611 | //todo: issue a PUT request to update metadata (empty request just to update headers?) 612 | int response = cloudfs_update_meta(de); 613 | } 614 | } 615 | return 0; 616 | } 617 | 618 | static int cfs_rename(const char* src, const char* dst) 619 | { 620 | debugf(DBG_LEVEL_NORM, KBLU"cfs_rename(%s, %s)", src, dst); 621 | dir_entry* src_de = path_info(src); 622 | if (!src_de) 623 | { 624 | debugf(DBG_LEVEL_NORM, KRED"exit 0: cfs_rename(%s,%s) not-found", src, dst); 625 | return -ENOENT; 626 | } 627 | if (src_de->isdir) 628 | { 629 | debugf(DBG_LEVEL_NORM, KRED"exit 1: cfs_rename(%s,%s) cannot rename dirs!", 630 | src, dst); 631 | return -EISDIR; 632 | } 633 | if (cloudfs_copy_object(src, dst)) 634 | { 635 | /* FIXME this isn't quite right as doesn't preserve last modified */ 636 | //fix done in cloudfs_copy_object() 637 | update_dir_cache(dst, src_de->size, 0, 0); 638 | int result = cfs_unlink(src); 639 | 640 | dir_entry* dst_de = path_info(dst); 641 | if (!dst_de) 642 | debugf(DBG_LEVEL_NORM, KRED"cfs_rename(%s,%s) dest-not-found-in-cache", src, 643 | dst); 644 | else 645 | { 646 | debugf(DBG_LEVEL_NORM, KBLU"cfs_rename(%s,%s) upload ok", src, dst); 647 | //copy attributes, shortcut, rather than forcing a download from cloud 648 | copy_dir_entry(src_de, dst_de); 649 | } 650 | 651 | debugf(DBG_LEVEL_NORM, KBLU"exit 3: cfs_rename(%s,%s)", src, dst); 652 | return result; 653 | } 654 | debugf(DBG_LEVEL_NORM, KRED"exit 4: cfs_rename(%s,%s) io error", src, dst); 655 | return -EIO; 656 | } 657 | 658 | static int cfs_symlink(const char* src, const char* dst) 659 | { 660 | debugf(DBG_LEVEL_NORM, KBLU"cfs_symlink(%s, %s)", src, dst); 661 | if (cloudfs_create_symlink(src, dst)) 662 | { 663 | update_dir_cache(dst, 1, 0, 1); 664 | debugf(DBG_LEVEL_NORM, KBLU"exit0: cfs_symlink(%s, %s)", src, dst); 665 | return 0; 666 | } 667 | debugf(DBG_LEVEL_NORM, KRED"exit1: cfs_symlink(%s, %s) io error", src, dst); 668 | return -EIO; 669 | } 670 | 671 | static int cfs_readlink(const char* path, char* buf, size_t size) 672 | { 673 | debugf(DBG_LEVEL_NORM, KBLU"cfs_readlink(%s)", path); 674 | //fixme: use temp file specified in config 675 | FILE* temp_file = tmpfile(); 676 | int ret = 0; 677 | 678 | if (!cloudfs_object_write_fp(path, temp_file)) 679 | { 680 | debugf(DBG_LEVEL_NORM, KRED"exit 1: cfs_readlink(%s) not found", path); 681 | ret = -ENOENT; 682 | } 683 | 684 | if (!pread(fileno(temp_file), buf, size, 0)) 685 | { 686 | debugf(DBG_LEVEL_NORM, KRED"exit 2: cfs_readlink(%s) not found", path); 687 | ret = -ENOENT; 688 | } 689 | 690 | fclose(temp_file); 691 | debugf(DBG_LEVEL_NORM, KBLU"exit 3: cfs_readlink(%s)", path); 692 | return ret; 693 | } 694 | 695 | static void* cfs_init(struct fuse_conn_info* conn) 696 | { 697 | signal(SIGPIPE, SIG_IGN); 698 | return NULL; 699 | } 700 | 701 | //http://man7.org/linux/man-pages/man2/utimensat.2.html 702 | static int cfs_utimens(const char* path, const struct timespec times[2]) 703 | { 704 | debugf(DBG_LEVEL_NORM, KBLU "cfs_utimens(%s)", path); 705 | dir_entry* path_de = path_info(path); 706 | if (!path_de) 707 | { 708 | debugf(DBG_LEVEL_NORM, KRED"exit 0: cfs_utimens(%s) file not in cache", path); 709 | return -ENOENT; 710 | } 711 | struct timespec now; 712 | clock_gettime(CLOCK_REALTIME, &now); 713 | 714 | if (path_de->atime.tv_sec != times[0].tv_sec 715 | || path_de->atime.tv_nsec != times[0].tv_nsec || 716 | path_de->mtime.tv_sec != times[1].tv_sec 717 | || path_de->mtime.tv_nsec != times[1].tv_nsec) 718 | { 719 | debugf(DBG_LEVEL_EXT, KCYN 720 | "cfs_utimens: change for %s, prev: atime=%li.%li mtime=%li.%li, new: atime=%li.%li mtime=%li.%li", 721 | path, 722 | path_de->atime.tv_sec, path_de->atime.tv_nsec, path_de->mtime.tv_sec, 723 | path_de->mtime.tv_nsec, 724 | times[0].tv_sec, times[0].tv_nsec, times[1].tv_sec, times[1].tv_nsec); 725 | char time_str[TIME_CHARS] = ""; 726 | get_timespec_as_str(×[1], time_str, sizeof(time_str)); 727 | debugf(DBG_LEVEL_EXT, KCYN"cfs_utimens: set mtime=[%s]", time_str); 728 | get_timespec_as_str(×[0], time_str, sizeof(time_str)); 729 | debugf(DBG_LEVEL_EXT, KCYN"cfs_utimens: set atime=[%s]", time_str); 730 | path_de->atime = times[0]; 731 | path_de->mtime = times[1]; 732 | // not sure how to best obtain ctime from fuse source file. just record current date. 733 | path_de->ctime = now; 734 | //calling a meta cloud update here is not always needed. 735 | //touch for example opens and closes/flush the file. 736 | //worth implementing a meta cache functionality to avoid multiple uploads on meta changes 737 | //when changing timestamps on very large files, touch command will trigger 2 x long and useless file uploads on cfs_flush() 738 | } 739 | else 740 | debugf(DBG_LEVEL_EXT, KCYN"cfs_utimens: a/m/time not changed"); 741 | debugf(DBG_LEVEL_NORM, KBLU "exit 1: cfs_utimens(%s)", path); 742 | return 0; 743 | } 744 | 745 | 746 | int cfs_setxattr(const char* path, const char* name, const char* value, 747 | size_t size, int flags) 748 | { 749 | return 0; 750 | } 751 | 752 | int cfs_getxattr(const char* path, const char* name, char* value, size_t size) 753 | { 754 | return 0; 755 | } 756 | 757 | int cfs_removexattr(const char* path, const char* name) 758 | { 759 | return 0; 760 | } 761 | 762 | int cfs_listxattr(const char* path, char* list, size_t size) 763 | { 764 | return 0; 765 | } 766 | 767 | FuseOptions options = 768 | { 769 | .cache_timeout = "600", 770 | .verify_ssl = "true", 771 | .segment_size = "1073741824", 772 | .segment_above = "2147483647", 773 | .storage_url = "", 774 | .container = "", 775 | //.temp_dir = "/tmp/", 776 | .temp_dir = "", 777 | .client_id = "", 778 | .client_secret = "", 779 | .refresh_token = "" 780 | }; 781 | 782 | ExtraFuseOptions extra_options = 783 | { 784 | .settings_filename = "", 785 | .get_extended_metadata = "false", 786 | .curl_verbose = "false", 787 | .cache_statfs_timeout = 0, 788 | .debug_level = 0, 789 | .curl_progress_state = "false", 790 | .enable_chown = "false", 791 | .enable_chmod = "false", 792 | .help = "false" 793 | }; 794 | 795 | int parse_option(void* data, const char* arg, int key, 796 | struct fuse_args* outargs) 797 | { 798 | if (sscanf(arg, " cache_timeout = %[^\r\n ]", options.cache_timeout) || 799 | sscanf(arg, " verify_ssl = %[^\r\n ]", options.verify_ssl) || 800 | sscanf(arg, " segment_above = %[^\r\n ]", options.segment_above) || 801 | sscanf(arg, " segment_size = %[^\r\n ]", options.segment_size) || 802 | sscanf(arg, " storage_url = %[^\r\n ]", options.storage_url) || 803 | sscanf(arg, " container = %[^\r\n ]", options.container) || 804 | sscanf(arg, " temp_dir = %[^\r\n ]", options.temp_dir) || 805 | sscanf(arg, " client_id = %[^\r\n ]", options.client_id) || 806 | sscanf(arg, " client_secret = %[^\r\n ]", options.client_secret) || 807 | sscanf(arg, " refresh_token = %[^\r\n ]", options.refresh_token) || 808 | 809 | sscanf(arg, " get_extended_metadata = %[^\r\n ]", 810 | extra_options.get_extended_metadata) || 811 | sscanf(arg, " curl_verbose = %[^\r\n ]", extra_options.curl_verbose) || 812 | sscanf(arg, " cache_statfs_timeout = %[^\r\n ]", 813 | extra_options.cache_statfs_timeout) || 814 | sscanf(arg, " debug_level = %[^\r\n ]", extra_options.debug_level) || 815 | sscanf(arg, " curl_progress_state = %[^\r\n ]", 816 | extra_options.curl_progress_state) || 817 | sscanf(arg, " enable_chmod = %[^\r\n ]", extra_options.enable_chmod) || 818 | sscanf(arg, " enable_chown = %[^\r\n ]", extra_options.enable_chown) 819 | ) 820 | return 0; 821 | 822 | if (!strncmp(arg, "settings_filename=", 18)) 823 | { 824 | arg += 18; 825 | strncpy(extra_options.settings_filename, arg, MAX_PATH_SIZE); 826 | return 0; 827 | } 828 | 829 | // Detect help for help enrichment 830 | if (!strcmp(arg, "-h") || !strcmp(arg, "--help")) 831 | strcpy(extra_options.help, "true"); 832 | 833 | if (!strcmp(arg, "-f") || !strcmp(arg, "-d") || !strcmp(arg, "debug")) 834 | cloudfs_debug(1); 835 | 836 | return 1; 837 | } 838 | 839 | //allows memory leaks inspections 840 | void interrupt_handler(int sig) 841 | { 842 | debugf(DBG_LEVEL_NORM, "Got interrupt signal %d, cleaning memory", sig); 843 | //TODO: clean memory allocations 844 | //http://www.cprogramming.com/debugging/valgrind.html 845 | cloudfs_free(); 846 | //TODO: clear dir cache 847 | pthread_mutex_destroy(&dcachemut); 848 | exit(0); 849 | } 850 | 851 | void initialise_options() 852 | { 853 | //todo: handle param init consistently, quite heavy implementation 854 | cloudfs_verify_ssl(!strcasecmp(options.verify_ssl, "true")); 855 | cloudfs_option_get_extended_metadata(!strcasecmp( 856 | extra_options.get_extended_metadata, "true")); 857 | cloudfs_option_curl_verbose(!strcasecmp(extra_options.curl_verbose, "true")); 858 | //lean way to init params, to be used as reference 859 | if (*extra_options.debug_level) 860 | option_debug_level = atoi(extra_options.debug_level); 861 | if (*extra_options.cache_statfs_timeout) 862 | option_cache_statfs_timeout = atoi(extra_options.cache_statfs_timeout); 863 | if (*extra_options.curl_progress_state) 864 | option_curl_progress_state = !strcasecmp(extra_options.curl_progress_state, 865 | "true"); 866 | if (*extra_options.enable_chmod) 867 | option_enable_chmod = !strcasecmp(extra_options.enable_chmod, "true"); 868 | if (*extra_options.enable_chown) 869 | option_enable_chown = !strcasecmp(extra_options.enable_chown, "true"); 870 | } 871 | 872 | int main(int argc, char** argv) 873 | { 874 | #if __x86_64__ || __ppc64__ 875 | const unsigned long MAX_SEGMENT_SIZE = (unsigned long)5 * (unsigned long)(1 << 30); 876 | #else 877 | const unsigned long MAX_SEGMENT_SIZE = (unsigned long)2 * (unsigned long)(1 << 30); 878 | #endif 879 | 880 | if (debug) 881 | fprintf(stderr, "Starting hubicfuse on homedir %s!\n", get_home_dir()); 882 | 883 | signal(SIGINT, interrupt_handler); 884 | 885 | int return_code; 886 | FILE* settings; 887 | struct fuse_args args = FUSE_ARGS_INIT(argc, argv); 888 | char default_settings[MAX_PATH_SIZE]; 889 | 890 | // Default value for extra_options.settings_filename 891 | snprintf(default_settings, MAX_PATH_SIZE, "%s/.hubicfuse", get_home_dir()); 892 | strncpy(extra_options.settings_filename, default_settings, MAX_PATH_SIZE); 893 | 894 | // Reading FUSE options 895 | fuse_opt_parse(&args, &options, NULL, parse_option); 896 | 897 | // Reading hubiC settings 898 | if ((settings = fopen(extra_options.settings_filename, "r"))) 899 | { 900 | char line[OPTION_SIZE]; 901 | while (fgets(line, sizeof(line), settings)) 902 | parse_option(NULL, line, -1, &args); 903 | fclose(settings); 904 | } 905 | 906 | cache_timeout = atoi(options.cache_timeout); 907 | segment_size = atoll(options.segment_size); 908 | segment_above = atoll(options.segment_above); 909 | 910 | // check consistency 911 | 912 | if (segment_above > MAX_SEGMENT_SIZE) 913 | { 914 | printf ("A segment cannot be larger than 5Gb\n"); 915 | return 1; 916 | } 917 | if (segment_size > segment_above) 918 | { 919 | printf ("segment_size must be smaller than segment_above\n"); 920 | return 1; 921 | } 922 | 923 | // this is ok since main is on the stack during the entire execution 924 | override_storage_url = options.storage_url; 925 | public_container = options.container; 926 | temp_dir = options.temp_dir; 927 | 928 | if (!*options.client_id || !*options.client_secret || !*options.refresh_token) 929 | { 930 | fprintf(stderr, 931 | "Unable to determine client_id, client_secret or refresh_token.\n\n"); 932 | fprintf(stderr, "These can be set either as mount options or in " 933 | "a file named %s\n\n", default_settings); 934 | fprintf(stderr, " client_id=[App's id]\n"); 935 | fprintf(stderr, " client_secret=[App's secret]\n"); 936 | fprintf(stderr, " refresh_token=[Get it running hubic_token]\n"); 937 | fprintf(stderr, "The following settings are optional:\n\n"); 938 | fprintf(stderr, 939 | " cache_timeout=[Seconds for directory caching, default 600]\n"); 940 | fprintf(stderr, " verify_ssl=[false to disable SSL cert verification]\n"); 941 | fprintf(stderr, 942 | " segment_size=[Size to use when creating DLOs, default 1073741824]\n"); 943 | fprintf(stderr, 944 | " segment_above=[File size at which to use segments, default 2147483648]\n"); 945 | fprintf(stderr, 946 | " storage_url=[Storage URL for other tenant to view container]\n"); 947 | fprintf(stderr, 948 | " container=[Public container to view of tenant specified by storage_url]\n"); 949 | fprintf(stderr, " temp_dir=[Directory to store temp files]\n"); 950 | fprintf(stderr, 951 | " get_extended_metadata=[true to enable download of utime, chmod, chown file attributes (but slower)]\n"); 952 | fprintf(stderr, 953 | " curl_verbose=[true to debug info on curl requests (lots of output)]\n"); 954 | fprintf(stderr, 955 | " curl_progress_state=[true to enable progress callback enabled. Mostly used for debugging]\n"); 956 | fprintf(stderr, 957 | " cache_statfs_timeout=[number of seconds to cache requests to statfs (cloud statistics), 0 for no cache]\n"); 958 | fprintf(stderr, 959 | " debug_level=[0 to 2, 0 for minimal verbose debugging. No debug if -d or -f option is not provided.]\n"); 960 | fprintf(stderr, " enable_chmod=[true to enable chmod support on fuse]\n"); 961 | fprintf(stderr, " enable_chown=[true to enable chown support on fuse]\n"); 962 | return 1; 963 | } 964 | cloudfs_init(); 965 | initialise_options(); 966 | if (debug) 967 | { 968 | fprintf(stderr, "settings_filename = %s\n", extra_options.settings_filename); 969 | fprintf(stderr, "debug_level = %d\n", option_debug_level); 970 | fprintf(stderr, "get_extended_metadata = %d\n", option_get_extended_metadata); 971 | fprintf(stderr, "curl_progress_state = %d\n", option_curl_progress_state); 972 | fprintf(stderr, "enable_chmod = %d\n", option_enable_chmod); 973 | fprintf(stderr, "enable_chown = %d\n", option_enable_chown); 974 | } 975 | cloudfs_set_credentials(options.client_id, options.client_secret, 976 | options.refresh_token); 977 | 978 | if (!cloudfs_connect()) 979 | { 980 | fprintf(stderr, "Failed to authenticate.\n"); 981 | return 1; 982 | } 983 | //todo: check why in some cases the define below is not available (when running the binary on symbolic linked folders) 984 | #ifndef HAVE_OPENSSL 985 | #warning Compiling without libssl, will run single-threaded. 986 | fuse_opt_add_arg(&args, "-s"); 987 | #endif 988 | 989 | struct fuse_operations cfs_oper = 990 | { 991 | .readdir = cfs_readdir, 992 | .mkdir = cfs_mkdir, 993 | .read = cfs_read, 994 | .create = cfs_create, 995 | .open = cfs_open, 996 | .fgetattr = cfs_fgetattr, 997 | .getattr = cfs_getattr, 998 | .flush = cfs_flush, 999 | .release = cfs_release, 1000 | .rmdir = cfs_rmdir, 1001 | .ftruncate = cfs_ftruncate, 1002 | .truncate = cfs_truncate, 1003 | .write = cfs_write, 1004 | .unlink = cfs_unlink, 1005 | .fsync = cfs_fsync, 1006 | .statfs = cfs_statfs, 1007 | .chmod = cfs_chmod, 1008 | .chown = cfs_chown, 1009 | .rename = cfs_rename, 1010 | .symlink = cfs_symlink, 1011 | .readlink = cfs_readlink, 1012 | .init = cfs_init, 1013 | .utimens = cfs_utimens, 1014 | #ifdef HAVE_SETXATTR 1015 | .setxattr = cfs_setxattr, 1016 | .getxattr = cfs_getxattr, 1017 | .listxattr = cfs_listxattr, 1018 | .removexattr = cfs_removexattr, 1019 | #endif 1020 | }; 1021 | 1022 | pthread_mutexattr_init(&mutex_attr); 1023 | pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE); 1024 | pthread_mutex_init(&dcachemut, &mutex_attr); 1025 | return_code = fuse_main(args.argc, args.argv, &cfs_oper, &options); 1026 | 1027 | if (return_code > 0 && !strcmp(extra_options.help, "true")) 1028 | { 1029 | fprintf(stderr, "\nhubiC options:\n"); 1030 | fprintf(stderr, " -o settings_filename=FILE use FILE as hubiC settings\n"); 1031 | fprintf(stderr, " instead of %s\n", default_settings); 1032 | } 1033 | 1034 | return return_code; 1035 | } 1036 | -------------------------------------------------------------------------------- /commonfs.c: -------------------------------------------------------------------------------- 1 | #define _GNU_SOURCE 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #ifdef __linux__ 10 | #include 11 | #endif 12 | #include 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #define FUSE_USE_VERSION 30 21 | #include 22 | #include 23 | #include 24 | #include 25 | #include "commonfs.h" 26 | #include "config.h" 27 | 28 | pthread_mutex_t dcachemut; 29 | pthread_mutexattr_t mutex_attr; 30 | dir_cache* dcache; 31 | char* temp_dir; 32 | int cache_timeout; 33 | int debug = 0; 34 | int verify_ssl = 2; 35 | bool option_get_extended_metadata = false; 36 | bool option_curl_verbose = false; 37 | int option_cache_statfs_timeout = 0; 38 | int option_debug_level = 0; 39 | int option_curl_progress_state = 1;//1 to disable curl progress 40 | bool option_enable_chown = false; 41 | bool option_enable_chmod = false; 42 | bool option_enable_progressive_upload = false; 43 | bool option_enable_progressive_download = false; 44 | size_t file_buffer_size = 0; 45 | 46 | // needed to get correct GMT / local time 47 | // hubic stores time as GMT so we have to do conversions 48 | // http://zhu-qy.blogspot.ro/2012/11/ref-how-to-convert-from-utc-to-local.html 49 | time_t my_timegm(struct tm* tm) 50 | { 51 | time_t epoch = 0; 52 | time_t offset = mktime(gmtime(&epoch)); 53 | time_t utc = mktime(tm); 54 | return difftime(utc, offset); 55 | } 56 | 57 | //expect time_str as a friendly string format 58 | time_t get_time_from_str_as_gmt(char* time_str) 59 | { 60 | struct tm val_time_tm; 61 | time_t val_time_t; 62 | strptime(time_str, "%FT%T", &val_time_tm); 63 | val_time_tm.tm_isdst = -1; 64 | val_time_t = my_timegm(&val_time_tm); 65 | return val_time_t; 66 | } 67 | 68 | time_t get_time_as_local(time_t time_t_val, char time_str[], int char_buf_size) 69 | { 70 | struct tm loc_time_tm; 71 | loc_time_tm = *localtime(&time_t_val); 72 | if (time_str != NULL) 73 | { 74 | //debugf(DBG_LEVEL_NORM, 0,"Local len=%d size=%d pass=%d", strlen(time_str), sizeof(time_str), char_buf_size); 75 | strftime(time_str, char_buf_size, "%c", &loc_time_tm); 76 | //debugf(DBG_LEVEL_NORM, 0,"Local timestr=[%s] size=%d", time_str, strlen(time_str)); 77 | } 78 | //debugf(DBG_LEVEL_NORM, 0,"Local time_t %li", mktime(&loc_time_tm)); 79 | return mktime(&loc_time_tm); 80 | } 81 | 82 | int get_time_as_string(time_t time_t_val, long nsec, char* time_str, 83 | int time_str_len) 84 | { 85 | struct tm time_val_tm; 86 | time_t safe_input_time; 87 | //if time is incorrect (too long) you get segfault, need to check length and trim 88 | if (time_t_val > INT_MAX) 89 | { 90 | debugf(DBG_LEVEL_NORM, 91 | KRED"get_time_as_string: input time length too long, %lu > max=%lu, trimming!", 92 | time_t_val, INT_MAX); 93 | safe_input_time = 0;//(int)time_t_val; 94 | } 95 | else 96 | safe_input_time = time_t_val; 97 | time_val_tm = *gmtime(&safe_input_time); 98 | int str_len = strftime(time_str, time_str_len, HUBIC_DATE_FORMAT, 99 | &time_val_tm); 100 | char nsec_str[TIME_CHARS]; 101 | sprintf(nsec_str, "%ld", nsec); 102 | strcat(time_str, nsec_str); 103 | return str_len + strlen(nsec_str); 104 | } 105 | 106 | time_t get_time_now() 107 | { 108 | struct timespec now; 109 | clock_gettime(CLOCK_REALTIME, &now); 110 | return now.tv_sec; 111 | } 112 | 113 | size_t get_time_now_as_str(char* time_str, int time_str_len) 114 | { 115 | time_t now = time(0); 116 | struct tm tstruct; 117 | tstruct = *localtime(&now); 118 | // Visit http://en.cppreference.com/w/cpp/chrono/c/strftime 119 | // for more information about date/time format 120 | size_t result = strftime(time_str, time_str_len, HUBIC_DATE_FORMAT, &tstruct); 121 | return result; 122 | } 123 | 124 | int get_timespec_as_str(const struct timespec* times, char* time_str, 125 | int time_str_len) 126 | { 127 | return get_time_as_string(times->tv_sec, times->tv_nsec, time_str, 128 | time_str_len); 129 | } 130 | 131 | char* str2md5(const char* str, int length) 132 | { 133 | int n; 134 | MD5_CTX c; 135 | unsigned char digest[16]; 136 | char* out = (char*)malloc(33); 137 | 138 | MD5_Init(&c); 139 | while (length > 0) 140 | { 141 | if (length > 512) 142 | MD5_Update(&c, str, 512); 143 | else 144 | MD5_Update(&c, str, length); 145 | length -= 512; 146 | str += 512; 147 | } 148 | MD5_Final(digest, &c); 149 | for (n = 0; n < 16; ++n) 150 | snprintf(&(out[n * 2]), 16 * 2, "%02x", (unsigned int)digest[n]); 151 | return out; 152 | } 153 | 154 | // http://stackoverflow.com/questions/10324611/how-to-calculate-the-md5-hash-of-a-large-file-in-c 155 | int file_md5(FILE* file_handle, char* md5_file_str) 156 | { 157 | if (file_handle == NULL) 158 | { 159 | debugf(DBG_LEVEL_NORM, KRED"file_md5: NULL file handle"); 160 | return 0; 161 | } 162 | unsigned char c[MD5_DIGEST_LENGTH]; 163 | int i; 164 | MD5_CTX mdContext; 165 | int bytes; 166 | char mdchar[3];//2 chars for md5 + null string terminator 167 | unsigned char* data_buf = malloc(1024 * sizeof(unsigned char)); 168 | MD5_Init(&mdContext); 169 | while ((bytes = fread(data_buf, 1, 1024, file_handle)) != 0) 170 | MD5_Update(&mdContext, data_buf, bytes); 171 | MD5_Final(c, &mdContext); 172 | for (i = 0; i < MD5_DIGEST_LENGTH; i++) 173 | { 174 | snprintf(mdchar, 3, "%02x", c[i]); 175 | strcat(md5_file_str, mdchar); 176 | } 177 | free(data_buf); 178 | return 0; 179 | } 180 | 181 | int get_safe_cache_file_path(const char* path, char* file_path_safe, 182 | char* temp_dir) 183 | { 184 | char tmp_path[PATH_MAX]; 185 | strncpy(tmp_path, path, PATH_MAX); 186 | char* pch; 187 | while ((pch = strchr(tmp_path, '/'))) 188 | * pch = '.'; 189 | char file_path[PATH_MAX] = ""; 190 | //temp file name had process pid in it, removed as on restart files are left in cache (pid changes) 191 | snprintf(file_path, PATH_MAX, TEMP_FILE_NAME_FORMAT, temp_dir, tmp_path); 192 | //fixme check if sizeof or strlen is suitable 193 | int file_path_len = sizeof(file_path); 194 | //the file path name using this format can go beyond NAME_MAX size and will generate error on fopen 195 | //solution: cap file length to NAME_MAX, use a prefix from original path for debug purposes and add md5 id 196 | char* md5_path = str2md5(file_path, file_path_len); 197 | int md5len = strlen(md5_path); 198 | size_t safe_len_prefix = min(NAME_MAX - md5len, file_path_len); 199 | strncpy(file_path_safe, file_path, safe_len_prefix); 200 | strncpy(file_path_safe + safe_len_prefix, md5_path, md5len); 201 | //sometimes above copy process produces longer strings that NAME_MAX, force a null terminated string 202 | file_path_safe[safe_len_prefix + md5len - 1] = '\0'; 203 | free(md5_path); 204 | return strlen(file_path_safe); 205 | } 206 | 207 | void get_file_path_from_fd(int fd, char* path, int size_path) 208 | { 209 | char proc_path[MAX_PATH_SIZE]; 210 | /* Read out the link to our file descriptor. */ 211 | sprintf(proc_path, "/proc/self/fd/%d", fd); 212 | memset(path, 0, size_path); 213 | if (readlink(proc_path, path, size_path - 1) == -1) 214 | debugf(DBG_LEVEL_NORM, KRED 215 | "get_file_path_from_fd: cannot open %d\n", fd); 216 | } 217 | 218 | //for file descriptor debugging 219 | void debug_print_flags(int flags) 220 | { 221 | int accmode, val; 222 | accmode = flags & O_ACCMODE; 223 | if (accmode == O_RDONLY) debugf(DBG_LEVEL_EXTALL, KYEL"read only"); 224 | else if (accmode == O_WRONLY) debugf(DBG_LEVEL_EXTALL, KYEL"write only"); 225 | else if (accmode == O_RDWR) debugf(DBG_LEVEL_EXTALL, KYEL"read write"); 226 | else debugf(DBG_LEVEL_EXT, KYEL"unknown access mode"); 227 | 228 | if (val & O_APPEND) debugf(DBG_LEVEL_EXTALL, KYEL", append"); 229 | if (val & O_NONBLOCK) debugf(DBG_LEVEL_EXTALL, KYEL", nonblocking"); 230 | #if !defined(_POSIX_SOURCE) && defined(O_SYNC) 231 | if (val & O_SYNC) debugf(DBG_LEVEL_EXT, 0, 232 | KRED ", synchronous writes"); 233 | #endif 234 | 235 | } 236 | 237 | //for file descriptor debugging 238 | void debug_print_descriptor(struct fuse_file_info* info) 239 | { 240 | char file_path[MAX_PATH_SIZE]; 241 | openfile* of = (openfile *)(uintptr_t)info->fh; 242 | get_file_path_from_fd(of->fd, file_path, sizeof(file_path)); 243 | debugf(DBG_LEVEL_EXT, KCYN "descriptor localfile=[%s] fd=%lld", file_path, 244 | of->fd); 245 | debug_print_flags(info->flags); 246 | } 247 | 248 | void dir_for(const char* path, char* dir) 249 | { 250 | strncpy(dir, path, MAX_PATH_SIZE); 251 | char* slash = strrchr(dir, '/'); 252 | if (slash) 253 | *slash = '\0'; 254 | } 255 | 256 | //prints cache content for debug purposes 257 | void debug_list_cache_content() 258 | { 259 | return;//disabled 260 | dir_cache* cw; 261 | dir_entry* de; 262 | for (cw = dcache; cw; cw = cw->next) 263 | { 264 | debugf(DBG_LEVEL_EXT, "LIST-CACHE: DIR[%s]", cw->path); 265 | for (de = cw->entries; de; de = de->next) 266 | debugf(DBG_LEVEL_EXT, "LIST-CACHE: FOLDER[%s]", de->full_name); 267 | } 268 | } 269 | 270 | int delete_file(char* path) 271 | { 272 | debugf(DBG_LEVEL_NORM, KYEL"delete_file(%s)", path); 273 | char file_path_safe[NAME_MAX] = ""; 274 | get_safe_cache_file_path(path, file_path_safe, temp_dir); 275 | int result = unlink(file_path_safe); 276 | debugf(DBG_LEVEL_EXT, KYEL"delete_file(%s) (%s) result=%s", path, 277 | file_path_safe, strerror(result)); 278 | return result; 279 | } 280 | 281 | //adding a directory in cache 282 | dir_cache* new_cache(const char* path) 283 | { 284 | debugf(DBG_LEVEL_NORM, KCYN"new_cache(%s)", path); 285 | dir_cache* cw = (dir_cache*)calloc(sizeof(dir_cache), 1); 286 | cw->path = strdup(path); 287 | cw->prev = NULL; 288 | cw->entries = NULL; 289 | cw->cached = time(NULL); 290 | //added cache by access 291 | cw->accessed_in_cache = time(NULL); 292 | cw->was_deleted = false; 293 | if (dcache) 294 | dcache->prev = cw; 295 | cw->next = dcache; 296 | dir_cache* result; 297 | result = (dcache = cw); 298 | debugf(DBG_LEVEL_EXT, "exit: new_cache(%s)", path); 299 | return result; 300 | } 301 | 302 | //todo: check if the program behaves ok when free_dir 303 | //is made on a folder that has an operation in progress 304 | void cloudfs_free_dir_list(dir_entry* dir_list) 305 | { 306 | //check for NULL as dir might be already removed from cache by other thread 307 | debugf(DBG_LEVEL_NORM, "cloudfs_free_dir_list(%s)", dir_list->full_name); 308 | while (dir_list) 309 | { 310 | dir_entry* de = dir_list; 311 | dir_list = dir_list->next; 312 | //remove file from disk cache, fix for issue #89, https://github.com/TurboGit/hubicfuse/issues/89 313 | delete_file(de->full_name); 314 | free(de->name); 315 | free(de->full_name); 316 | free(de->content_type); 317 | //TODO free all added fields 318 | free(de->md5sum); 319 | free(de); 320 | } 321 | } 322 | 323 | void dir_decache(const char* path) 324 | { 325 | dir_cache* cw; 326 | debugf(DBG_LEVEL_NORM, "dir_decache(%s)", path); 327 | pthread_mutex_lock(&dcachemut); 328 | dir_entry* de, *tmpde; 329 | char dir[MAX_PATH_SIZE]; 330 | dir_for(path, dir); 331 | for (cw = dcache; cw; cw = cw->next) 332 | { 333 | debugf(DBG_LEVEL_EXT, "dir_decache: parse(%s)", cw->path); 334 | if (!strcmp(cw->path, path)) 335 | { 336 | if (cw == dcache) 337 | dcache = cw->next; 338 | if (cw->prev) 339 | cw->prev->next = cw->next; 340 | if (cw->next) 341 | cw->next->prev = cw->prev; 342 | debugf(DBG_LEVEL_EXT, "dir_decache: free_dir1(%s)", cw->path); 343 | //fixme: this sometimes is NULL and generates segfaults, checking first 344 | if (cw->entries != NULL) 345 | cloudfs_free_dir_list(cw->entries); 346 | free(cw->path); 347 | free(cw); 348 | } 349 | else if (cw->entries && !strcmp(dir, cw->path)) 350 | { 351 | if (!strcmp(cw->entries->full_name, path)) 352 | { 353 | de = cw->entries; 354 | cw->entries = de->next; 355 | de->next = NULL; 356 | debugf(DBG_LEVEL_EXT, "dir_decache: free_dir2()"); 357 | cloudfs_free_dir_list(de); 358 | } 359 | else for (de = cw->entries; de->next; de = de->next) 360 | { 361 | if (!strcmp(de->next->full_name, path)) 362 | { 363 | tmpde = de->next; 364 | de->next = de->next->next; 365 | tmpde->next = NULL; 366 | debugf(DBG_LEVEL_EXT, "dir_decache: free_dir3()", cw->path); 367 | cloudfs_free_dir_list(tmpde); 368 | break; 369 | } 370 | } 371 | } 372 | } 373 | pthread_mutex_unlock(&dcachemut); 374 | } 375 | 376 | dir_entry* init_dir_entry() 377 | { 378 | dir_entry* de = (dir_entry*)malloc(sizeof(dir_entry)); 379 | de->metadata_downloaded = false; 380 | de->size = 0; 381 | de->next = NULL; 382 | de->md5sum = NULL; 383 | de->accessed_in_cache = time(NULL); 384 | de->last_modified = time(NULL); 385 | de->mtime.tv_sec = time(NULL); 386 | de->atime.tv_sec = time(NULL); 387 | de->ctime.tv_sec = time(NULL); 388 | de->mtime.tv_nsec = 0; 389 | de->atime.tv_nsec = 0; 390 | de->ctime.tv_nsec = 0; 391 | de->chmod = 0; 392 | de->gid = 0; 393 | de->uid = 0; 394 | return de; 395 | } 396 | 397 | void copy_dir_entry(dir_entry* src, dir_entry* dst) 398 | { 399 | dst->atime.tv_sec = src->atime.tv_sec; 400 | dst->atime.tv_nsec = src->atime.tv_nsec; 401 | dst->mtime.tv_sec = src->mtime.tv_sec; 402 | dst->mtime.tv_nsec = src->mtime.tv_nsec; 403 | dst->ctime.tv_sec = src->ctime.tv_sec; 404 | dst->ctime.tv_nsec = src->ctime.tv_nsec; 405 | dst->chmod = src->chmod; 406 | //todo: copy md5sum as well 407 | } 408 | 409 | //check for file in cache, if found size will be updated, if not found 410 | //and this is a dir, a new dir cache entry is created 411 | void update_dir_cache(const char* path, off_t size, int isdir, int islink) 412 | { 413 | debugf(DBG_LEVEL_EXTALL, KCYN "update_dir_cache(%s)", path); 414 | pthread_mutex_lock(&dcachemut); 415 | dir_cache* cw; 416 | dir_entry* de; 417 | char dir[MAX_PATH_SIZE]; 418 | dir_for(path, dir); 419 | for (cw = dcache; cw; cw = cw->next) 420 | { 421 | if (!strcmp(cw->path, dir)) 422 | { 423 | for (de = cw->entries; de; de = de->next) 424 | { 425 | if (!strcmp(de->full_name, path)) 426 | { 427 | de->size = size; 428 | pthread_mutex_unlock(&dcachemut); 429 | debugf(DBG_LEVEL_EXTALL, "exit 0: update_dir_cache(%s)", path); 430 | return; 431 | } 432 | } 433 | de = init_dir_entry(); 434 | de->size = size; 435 | de->isdir = isdir; 436 | de->islink = islink; 437 | de->name = strdup(&path[strlen(cw->path) + 1]); 438 | de->full_name = strdup(path); 439 | //fixed: the conditions below were mixed up dir -> link? 440 | if (islink) 441 | de->content_type = strdup("application/link"); 442 | if (isdir) 443 | de->content_type = strdup("application/directory"); 444 | else 445 | de->content_type = strdup("application/octet-stream"); 446 | de->next = cw->entries; 447 | cw->entries = de; 448 | if (isdir) 449 | new_cache(path); 450 | break; 451 | } 452 | } 453 | debugf(DBG_LEVEL_EXTALL, "exit 1: update_dir_cache(%s)", path); 454 | pthread_mutex_unlock(&dcachemut); 455 | } 456 | 457 | //returns first file entry in linked list. if not in cache will be downloaded. 458 | int caching_list_directory(const char* path, dir_entry** list) 459 | { 460 | debugf(DBG_LEVEL_EXT, "caching_list_directory(%s)", path); 461 | pthread_mutex_lock(&dcachemut); 462 | bool new_entry = false; 463 | if (!strcmp(path, "/")) 464 | path = ""; 465 | dir_cache* cw; 466 | for (cw = dcache; cw; cw = cw->next) 467 | { 468 | if (cw->was_deleted == true) 469 | { 470 | debugf(DBG_LEVEL_EXT, 471 | KMAG"caching_list_directory status: dir(%s) is empty as cached expired, reload from cloud", 472 | cw->path); 473 | if (!cloudfs_list_directory(cw->path, list)) 474 | debugf(DBG_LEVEL_EXT, 475 | KMAG"caching_list_directory status: cannot reload dir(%s)", cw->path); 476 | else 477 | { 478 | debugf(DBG_LEVEL_EXT, KMAG"caching_list_directory status: reloaded dir(%s)", 479 | cw->path); 480 | //cw->entries = *list; 481 | cw->was_deleted = false; 482 | cw->cached = time(NULL); 483 | } 484 | } 485 | if (cw->was_deleted == false) 486 | { 487 | if (!strcmp(cw->path, path)) 488 | break; 489 | } 490 | } 491 | if (!cw) 492 | { 493 | //trying to download this entry from cloud, list will point to cached or downloaded entries 494 | if (!cloudfs_list_directory(path, list)) 495 | { 496 | //download was not ok 497 | pthread_mutex_unlock(&dcachemut); 498 | debugf(DBG_LEVEL_EXT, 499 | "exit 0: caching_list_directory(%s) "KYEL"[CACHE-DIR-MISS]", path); 500 | return 0; 501 | } 502 | debugf(DBG_LEVEL_EXT, 503 | "caching_list_directory: new_cache(%s) "KYEL"[CACHE-CREATE]", path); 504 | cw = new_cache(path); 505 | new_entry = true; 506 | } 507 | else if (cache_timeout > 0 && (time(NULL) - cw->cached > cache_timeout)) 508 | { 509 | if (!cloudfs_list_directory(path, list)) 510 | { 511 | //mutex unlock was forgotten 512 | pthread_mutex_unlock(&dcachemut); 513 | debugf(DBG_LEVEL_EXT, "exit 1: caching_list_directory(%s)", path); 514 | return 0; 515 | } 516 | //fixme: this frees dir subentries but leaves the dir parent entry, this confuses path_info 517 | //which believes this dir has no entries 518 | if (cw->entries != NULL) 519 | { 520 | cloudfs_free_dir_list(cw->entries); 521 | cw->was_deleted = true; 522 | cw->cached = time(NULL); 523 | debugf(DBG_LEVEL_EXT, "caching_list_directory(%s) "KYEL"[CACHE-EXPIRED]", 524 | path); 525 | } 526 | else 527 | { 528 | debugf(DBG_LEVEL_EXT, 529 | "got NULL on caching_list_directory(%s) "KYEL"[CACHE-EXPIRED w NULL]", path); 530 | pthread_mutex_unlock(&dcachemut); 531 | return 0; 532 | } 533 | } 534 | else 535 | { 536 | debugf(DBG_LEVEL_EXT, "caching_list_directory(%s) "KGRN"[CACHE-DIR-HIT]", 537 | path); 538 | *list = cw->entries; 539 | } 540 | //adding new dir file list to global cache, now this dir becomes visible in cache 541 | cw->entries = *list; 542 | pthread_mutex_unlock(&dcachemut); 543 | debugf(DBG_LEVEL_EXT, "exit 2: caching_list_directory(%s)", path); 544 | return 1; 545 | } 546 | 547 | dir_entry* path_info(const char* path) 548 | { 549 | debugf(DBG_LEVEL_EXT, "path_info(%s)", path); 550 | char dir[MAX_PATH_SIZE]; 551 | dir_for(path, dir); 552 | dir_entry* tmp; 553 | if (!caching_list_directory(dir, &tmp)) 554 | { 555 | debugf(DBG_LEVEL_EXT, "exit 0: path_info(%s) "KYEL"[CACHE-DIR-MISS]", dir); 556 | return NULL; 557 | } 558 | else 559 | debugf(DBG_LEVEL_EXT, "path_info(%s) "KGRN"[CACHE-DIR-HIT]", dir); 560 | //iterate in file list obtained from cache or downloaded 561 | for (; tmp; tmp = tmp->next) 562 | { 563 | if (!strcmp(tmp->full_name, path)) 564 | { 565 | debugf(DBG_LEVEL_EXT, "exit 1: path_info(%s) "KGRN"[CACHE-FILE-HIT]", path); 566 | return tmp; 567 | } 568 | } 569 | //miss in case the file is not found on a cached folder 570 | debugf(DBG_LEVEL_EXT, "exit 2: path_info(%s) "KYEL"[CACHE-MISS]", path); 571 | return NULL; 572 | } 573 | 574 | 575 | //retrieve folder from local cache if exists, return null if does not exist (rather than download) 576 | int check_caching_list_directory(const char* path, dir_entry** list) 577 | { 578 | debugf(DBG_LEVEL_EXT, "check_caching_list_directory(%s)", path); 579 | pthread_mutex_lock(&dcachemut); 580 | if (!strcmp(path, "/")) 581 | path = ""; 582 | dir_cache* cw; 583 | for (cw = dcache; cw; cw = cw->next) 584 | if (!strcmp(cw->path, path)) 585 | { 586 | *list = cw->entries; 587 | pthread_mutex_unlock(&dcachemut); 588 | debugf(DBG_LEVEL_EXT, 589 | "exit 0: check_caching_list_directory(%s) "KGRN"[CACHE-DIR-HIT]", path); 590 | return 1; 591 | } 592 | pthread_mutex_unlock(&dcachemut); 593 | debugf(DBG_LEVEL_EXT, 594 | "exit 1: check_caching_list_directory(%s) "KYEL"[CACHE-DIR-MISS]", path); 595 | return 0; 596 | } 597 | 598 | dir_entry* check_parent_folder_for_file(const char* path) 599 | { 600 | char dir[MAX_PATH_SIZE]; 601 | dir_for(path, dir); 602 | dir_entry* tmp; 603 | if (!check_caching_list_directory(dir, &tmp)) 604 | return NULL; 605 | else 606 | return tmp; 607 | } 608 | 609 | //check if local path is in cache, without downloading from cloud if not in cache 610 | dir_entry* check_path_info(const char* path) 611 | { 612 | debugf(DBG_LEVEL_EXT, "check_path_info(%s)", path); 613 | char dir[MAX_PATH_SIZE]; 614 | dir_for(path, dir); 615 | dir_entry* tmp; 616 | 617 | //get parent folder cache entry 618 | if (!check_caching_list_directory(dir, &tmp)) 619 | { 620 | debugf(DBG_LEVEL_EXT, "exit 0: check_path_info(%s) "KYEL"[CACHE-MISS]", path); 621 | return NULL; 622 | } 623 | for (; tmp; tmp = tmp->next) 624 | { 625 | if (!strcmp(tmp->full_name, path)) 626 | { 627 | debugf(DBG_LEVEL_EXT, "exit 1: check_path_info(%s) "KGRN"[CACHE-HIT]", path); 628 | return tmp; 629 | } 630 | } 631 | if (!strcmp(path, "/")) 632 | debugf(DBG_LEVEL_EXT, 633 | "exit 2: check_path_info(%s) "KYEL"ignoring root [CACHE-MISS]", path); 634 | else 635 | debugf(DBG_LEVEL_EXT, "exit 3: check_path_info(%s) "KYEL"[CACHE-MISS]", path); 636 | return NULL; 637 | } 638 | 639 | 640 | char* get_home_dir() 641 | { 642 | char* home; 643 | if ((home = getenv("HOME")) && !access(home, R_OK)) 644 | return home; 645 | struct passwd* pwd = getpwuid(geteuid()); 646 | if ((home = pwd->pw_dir) && !access(home, R_OK)) 647 | return home; 648 | return "~"; 649 | } 650 | 651 | void cloudfs_debug(int dbg) 652 | { 653 | debug = dbg; 654 | } 655 | 656 | void debugf(int level, char* fmt, ...) 657 | { 658 | if (debug) 659 | { 660 | if (level <= option_debug_level) 661 | { 662 | #ifdef SYS_gettid 663 | pid_t thread_id = syscall(SYS_gettid); 664 | #else 665 | int thread_id = 0; 666 | #error "SYS_gettid unavailable on this system" 667 | #endif 668 | va_list args; 669 | char prefix[] = "==DBG %d [%s]:%d=="; 670 | char line[4096]; 671 | char time_str[TIME_CHARS]; 672 | get_time_now_as_str(time_str, sizeof(time_str)); 673 | sprintf(line, prefix, level, time_str, thread_id); 674 | fputs(line, stderr); 675 | va_start(args, fmt); 676 | vfprintf(stderr, fmt, args); 677 | va_end(args); 678 | fputs(KNRM, stderr); 679 | putc('\n', stderr); 680 | putc('\r', stderr); 681 | } 682 | } 683 | } 684 | -------------------------------------------------------------------------------- /commonfs.h: -------------------------------------------------------------------------------- 1 | #ifndef _COMMONFS_H 2 | #define _COMMONFS_H 3 | #define FUSE_USE_VERSION 30 4 | #include 5 | 6 | typedef enum { false, true } bool; 7 | #define MAX_PATH_SIZE (1024 + 256 + 3) 8 | #define THREAD_NAMELEN 16 9 | // 64 bit time + nanoseconds 10 | #define TIME_CHARS 32 11 | #define DBG_LEVEL_NORM 0 12 | #define DBG_LEVEL_EXT 1 13 | #define DBG_LEVEL_EXTALL 2 14 | #define INT_CHAR_LEN 16 15 | #define MD5_DIGEST_HEXA_STRING_LEN (2 * MD5_DIGEST_LENGTH + 1) 16 | 17 | // utimens support 18 | #define HEADER_TEXT_MTIME "X-Object-Meta-Mtime" 19 | #define HEADER_TEXT_ATIME "X-Object-Meta-Atime" 20 | #define HEADER_TEXT_CTIME "X-Object-Meta-Ctime" 21 | #define HEADER_TEXT_MTIME_DISPLAY "X-Object-Meta-Mtime-Display" 22 | #define HEADER_TEXT_ATIME_DISPLAY "X-Object-Meta-Atime-Display" 23 | #define HEADER_TEXT_CTIME_DISPLAY "X-Object-Meta-Ctime-Display" 24 | #define HEADER_TEXT_CHMOD "X-Object-Meta-Chmod" 25 | #define HEADER_TEXT_UID "X-Object-Meta-Uid" 26 | #define HEADER_TEXT_GID "X-Object-Meta-Gid" 27 | #define HEADER_TEXT_FILEPATH "X-Object-Meta-FilePath" 28 | #define TEMP_FILE_NAME_FORMAT "%s/.cloudfuse_%s" 29 | #define HUBIC_DATE_FORMAT "%Y-%m-%d %T." 30 | 31 | #define KNRM "\x1B[0m" 32 | #define KRED "\x1B[31m" 33 | #define KGRN "\x1B[32m" 34 | #define KYEL "\x1B[33m" 35 | #define KBLU "\x1B[34m" 36 | #define KMAG "\x1B[35m" 37 | #define KCYN "\x1B[36m" 38 | #define KWHT "\x1B[37m" 39 | 40 | #define min(x, y) ({ \ 41 | typeof(x) _min1 = (x); \ 42 | typeof(y) _min2 = (y); \ 43 | (void)(&_min1 == &_min2); \ 44 | _min1 < _min2 ? _min1 : _min2; }) 45 | 46 | //linked list with files in a directory 47 | typedef struct dir_entry 48 | { 49 | char* name; 50 | char* full_name; 51 | char* content_type; 52 | off_t size; 53 | time_t last_modified; 54 | // implement utimens 55 | struct timespec mtime; 56 | struct timespec ctime; 57 | struct timespec atime; 58 | char* md5sum; //interesting capability for rsync/scrub 59 | mode_t chmod; 60 | uid_t uid; 61 | gid_t gid; 62 | bool issegmented; 63 | time_t accessed_in_cache;//todo: cache support based on access time 64 | bool metadata_downloaded; 65 | // end change 66 | int isdir; 67 | int islink; 68 | struct dir_entry* next; 69 | } dir_entry; 70 | 71 | // linked list with cached folder names 72 | typedef struct dir_cache 73 | { 74 | char* path; 75 | dir_entry* entries; 76 | time_t cached; 77 | //added cache support based on access time 78 | time_t accessed_in_cache; 79 | bool was_deleted; 80 | //end change 81 | struct dir_cache* next, *prev; 82 | } dir_cache; 83 | 84 | typedef struct 85 | { 86 | int fd; 87 | int flags; 88 | } openfile; 89 | 90 | time_t my_timegm(struct tm* tm); 91 | time_t get_time_from_str_as_gmt(char* time_str); 92 | time_t get_time_as_local(time_t time_t_val, char time_str[], 93 | int char_buf_size); 94 | int get_time_as_string(time_t time_t_val, long nsec, char* time_str, 95 | int time_str_len); 96 | time_t get_time_now(); 97 | int get_timespec_as_str(const struct timespec* times, char* time_str, 98 | int time_str_len); 99 | char* str2md5(const char* str, int length); 100 | int file_md5(FILE* file_handle, char* md5_file_str); 101 | void debug_print_descriptor(struct fuse_file_info* info); 102 | int get_safe_cache_file_path(const char* file_path, char* file_path_safe, 103 | char* temp_dir); 104 | dir_entry* init_dir_entry(); 105 | void copy_dir_entry(dir_entry* src, dir_entry* dst); 106 | dir_cache* new_cache(const char* path); 107 | void dir_for(const char* path, char* dir); 108 | void debug_list_cache_content(); 109 | void update_dir_cache(const char* path, off_t size, int isdir, int islink); 110 | dir_entry* path_info(const char* path); 111 | dir_entry* check_path_info(const char* path); 112 | dir_entry* check_parent_folder_for_file(const char* path); 113 | void dir_decache(const char* path); 114 | void cloudfs_free_dir_list(dir_entry* dir_list); 115 | extern int cloudfs_list_directory(const char* path, dir_entry**); 116 | int caching_list_directory(const char* path, dir_entry** list); 117 | char* get_home_dir(); 118 | void cloudfs_debug(int dbg); 119 | void debugf(int level, char* fmt, ...); 120 | 121 | #endif 122 | -------------------------------------------------------------------------------- /config.h.in: -------------------------------------------------------------------------------- 1 | /* config.h.in. Generated from configure.in by autoheader. */ 2 | 3 | /* Define to one of `_getb67', `GETB67', `getb67' for Cray-2 and Cray-YMP 4 | systems. This function is required for `alloca.c' support on those systems. 5 | */ 6 | #undef CRAY_STACKSEG_END 7 | 8 | /* Define to 1 if using `alloca.c'. */ 9 | #undef C_ALLOCA 10 | 11 | /* Define to 1 if you have the `alarm' function. */ 12 | #undef HAVE_ALARM 13 | 14 | /* Define to 1 if you have `alloca', as a function or macro. */ 15 | #undef HAVE_ALLOCA 16 | 17 | /* Define to 1 if you have and it should be used (not on Ultrix). 18 | */ 19 | #undef HAVE_ALLOCA_H 20 | 21 | /* Define to 1 if you have the header file. */ 22 | #undef HAVE_CURL_CURL_H 23 | 24 | /* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ 25 | #undef HAVE_DOPRNT 26 | 27 | /* Define to 1 if you have the header file. */ 28 | #undef HAVE_FCNTL_H 29 | 30 | /* Define to 1 if you have the `ftruncate' function. */ 31 | #undef HAVE_FTRUNCATE 32 | 33 | /* Define to 1 if you have the header file. */ 34 | #undef HAVE_FUSE_H 35 | 36 | /* Define to 1 if you have the header file. */ 37 | #undef HAVE_INTTYPES_H 38 | 39 | /* Define to 1 if you have the header file. */ 40 | #undef HAVE_LIBXML_TREE_H 41 | 42 | /* Define to 1 if your system has a GNU libc compatible `malloc' function, and 43 | to 0 otherwise. */ 44 | #undef HAVE_MALLOC 45 | 46 | /* Define to 1 if you have the `memmove' function. */ 47 | #undef HAVE_MEMMOVE 48 | 49 | /* Define to 1 if you have the header file. */ 50 | #undef HAVE_MEMORY_H 51 | 52 | /* Openssl headers were found */ 53 | #undef HAVE_OPENSSL 54 | 55 | /* Define to 1 if you have the header file. */ 56 | #undef HAVE_PTHREAD_H 57 | 58 | /* Define to 1 if you have the header file. */ 59 | #undef HAVE_STDDEF_H 60 | 61 | /* Define to 1 if you have the header file. */ 62 | #undef HAVE_STDINT_H 63 | 64 | /* Define to 1 if you have the header file. */ 65 | #undef HAVE_STDLIB_H 66 | 67 | /* Define to 1 if you have the `strcasecmp' function. */ 68 | #undef HAVE_STRCASECMP 69 | 70 | /* Define to 1 if you have the `strchr' function. */ 71 | #undef HAVE_STRCHR 72 | 73 | /* Define to 1 if you have the `strdup' function. */ 74 | #undef HAVE_STRDUP 75 | 76 | /* Define to 1 if you have the header file. */ 77 | #undef HAVE_STRINGS_H 78 | 79 | /* Define to 1 if you have the header file. */ 80 | #undef HAVE_STRING_H 81 | 82 | /* Define to 1 if you have the `strncasecmp' function. */ 83 | #undef HAVE_STRNCASECMP 84 | 85 | /* Define to 1 if you have the `strrchr' function. */ 86 | #undef HAVE_STRRCHR 87 | 88 | /* Define to 1 if you have the `strstr' function. */ 89 | #undef HAVE_STRSTR 90 | 91 | /* Define to 1 if `st_blocks' is a member of `struct stat'. */ 92 | #undef HAVE_STRUCT_STAT_ST_BLOCKS 93 | 94 | /* Define to 1 if your `struct stat' has `st_blocks'. Deprecated, use 95 | `HAVE_STRUCT_STAT_ST_BLOCKS' instead. */ 96 | #undef HAVE_ST_BLOCKS 97 | 98 | /* Define to 1 if you have the header file. */ 99 | #undef HAVE_SYS_STAT_H 100 | 101 | /* Define to 1 if you have the header file. */ 102 | #undef HAVE_SYS_TIME_H 103 | 104 | /* Define to 1 if you have the header file. */ 105 | #undef HAVE_SYS_TYPES_H 106 | 107 | /* Define to 1 if you have the header file. */ 108 | #undef HAVE_UNISTD_H 109 | 110 | /* Define to 1 if you have the `vprintf' function. */ 111 | #undef HAVE_VPRINTF 112 | 113 | /* Define to the address where bug reports for this package should be sent. */ 114 | #undef PACKAGE_BUGREPORT 115 | 116 | /* Define to the full name of this package. */ 117 | #undef PACKAGE_NAME 118 | 119 | /* Define to the full name and version of this package. */ 120 | #undef PACKAGE_STRING 121 | 122 | /* Define to the one symbol short name of this package. */ 123 | #undef PACKAGE_TARNAME 124 | 125 | /* Define to the home page for this package. */ 126 | #undef PACKAGE_URL 127 | 128 | /* Define to the version of this package. */ 129 | #undef PACKAGE_VERSION 130 | 131 | /* Define as the return type of signal handlers (`int' or `void'). */ 132 | #undef RETSIGTYPE 133 | 134 | /* If using the C implementation of alloca, define if you know the 135 | direction of stack growth for your system; otherwise it will be 136 | automatically deduced at runtime. 137 | STACK_DIRECTION > 0 => grows toward higher addresses 138 | STACK_DIRECTION < 0 => grows toward lower addresses 139 | STACK_DIRECTION = 0 => direction of growth unknown */ 140 | #undef STACK_DIRECTION 141 | 142 | /* Define to 1 if you have the ANSI C header files. */ 143 | #undef STDC_HEADERS 144 | 145 | /* Define to 1 if you can safely include both and . */ 146 | #undef TIME_WITH_SYS_TIME 147 | 148 | /* Define to 1 if your declares `struct tm'. */ 149 | #undef TM_IN_SYS_TIME 150 | 151 | /* Define to empty if `const' does not conform to ANSI C. */ 152 | #undef const 153 | 154 | /* Define to `int' if doesn't define. */ 155 | #undef gid_t 156 | 157 | /* Define to rpl_malloc if the replacement function should be used. */ 158 | #undef malloc 159 | 160 | /* Define to `int' if does not define. */ 161 | #undef mode_t 162 | 163 | /* Define to `long int' if does not define. */ 164 | #undef off_t 165 | 166 | /* Define to `unsigned int' if does not define. */ 167 | #undef size_t 168 | 169 | /* Define to `int' if doesn't define. */ 170 | #undef uid_t 171 | -------------------------------------------------------------------------------- /configure.ac: -------------------------------------------------------------------------------- 1 | # -*- Autoconf -*- 2 | # Process this file with autoconf to produce a configure script. 3 | 4 | AC_PREREQ(2.61) 5 | AC_INIT([hubicfuse], [3.0.0], [Pascal Obry ]) 6 | AC_SUBST(CPPFLAGS, "$CPPFLAGS -D_FILE_OFFSET_BITS=64 -I/usr/include/libxml2") 7 | AC_LANG([C]) 8 | AC_CONFIG_SRCDIR([cloudfuse.c]) 9 | AC_CONFIG_HEADER([config.h]) 10 | AC_CONFIG_FILES([Makefile]) 11 | 12 | # Checks for programs. 13 | AC_PROG_CC 14 | AC_PROG_INSTALL 15 | AC_PROG_MKDIR_P 16 | 17 | # Checks that pkg-config is installed 18 | PKG_PROG_PKG_CONFIG 19 | 20 | # Checks for libraries. 21 | PKG_CHECK_MODULES(XML, libxml-2.0, , AC_MSG_ERROR('Unable to find libxml2. Please make sure library and header files are installed.')) 22 | PKG_CHECK_MODULES(CURL, libcurl, , AC_MSG_ERROR('Unable to find libcurl. Please make sure library and header files are installed.')) 23 | PKG_CHECK_MODULES(FUSE, fuse, , AC_MSG_ERROR('Unable to find libfuse. Please make sure library and header files are installed.')) 24 | PKG_CHECK_MODULES(JSON, json-c, , 25 | PKG_CHECK_MODULES(JSON, json, , AC_MSG_ERROR('Unable to find libjson. Please make sure library and header files are installed.'))) 26 | PKG_CHECK_MODULES(OPENSSL, openssl, , []) 27 | 28 | # Checks for header files. 29 | AC_FUNC_ALLOCA 30 | AC_HEADER_STDC 31 | AC_CHECK_HEADERS([fcntl.h stdint.h stddef.h stdlib.h string.h strings.h sys/time.h unistd.h pthread.h fuse.h curl/curl.h libxml/tree.h]) 32 | AC_CHECK_HEADER([openssl/crypto.h], AC_DEFINE([HAVE_OPENSSL], [], [Openssl headers were found]), , []) 33 | AC_CHECK_HEADER([magic.h], , AC_MSG_ERROR('Unable to find libmagic headers. Please make sure header files are installed.')) 34 | 35 | # Checks for typedefs, structures, and compiler characteristics. 36 | AC_C_CONST 37 | AC_TYPE_UID_T 38 | AC_TYPE_MODE_T 39 | AC_TYPE_OFF_T 40 | AC_TYPE_SIZE_T 41 | AC_STRUCT_TM 42 | AC_STRUCT_ST_BLOCKS 43 | 44 | # Checks for library functions. 45 | AC_FUNC_MALLOC 46 | AC_FUNC_MKTIME 47 | AC_TYPE_SIGNAL 48 | AC_FUNC_VPRINTF 49 | AC_CHECK_FUNCS([ftruncate memmove strcasecmp strchr strdup strncasecmp strrchr strstr]) 50 | 51 | AC_OUTPUT 52 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | /hubicfuse/hubicfuse /mnt/hubic -o noauto_cache,sync_read,allow_other; 3 | echo "mounted, starting bash" 4 | exec /bin/bash 5 | -------------------------------------------------------------------------------- /docker_mount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker build -t hubicfuse . 4 | docker run -v ~/.hubicfuse:/root/.hubicfuse -v $(pwd)/hubic_mount:/mnt/hubic:shared --device /dev/fuse --cap-add SYS_ADMIN --security-opt apparmor:unconfined -it hubicfuse 5 | -------------------------------------------------------------------------------- /hubic_token: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 4 | # Version 2, December 2004 5 | 6 | # Copyright (C) 2014 - Alain BENEDETTI 7 | 8 | # Everyone is permitted to copy and distribute verbatim or modified 9 | # copies of this license document, and changing it is allowed as long 10 | # as the name is changed. 11 | 12 | # DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 13 | # TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 14 | 15 | # 0. You just DO WHAT THE FUCK YOU WANT TO. 16 | 17 | # This program is free software. It comes without any warranty, to 18 | # the extent permitted by applicable law. You can redistribute it 19 | # and/or modify it under the terms of the Do What The Fuck You Want 20 | # To Public License, Version 2, as published by Sam Hocevar. See 21 | # http://www.wtfpl.net/ for more details. 22 | #=========================================================== 23 | 24 | # Fonction : 25 | # ---------- 26 | # - This utility will get you the 'refresh_token' that is needed for your 27 | # programm/app/script, etc... to access your hubiC objects. 28 | # 29 | # Usage : 30 | # ------- 31 | # hubic_token [-k|--insecure] [-V|--version] 32 | # -k|--insecure: connects to the server even when certificate 33 | # verification fails. If certificate verification fails 34 | # and this option is not passed, the script will fail to 35 | # protect from possible security issues. 36 | # -V|--version : displays the version of this script and exits. 37 | # 38 | # 39 | # Tested : Ubuntu Trusty, 40 | # ------ Synology (with coreutils and curl installed as Syno wget does NOT 41 | # have HTTPS support compiled in) 42 | # 43 | # Depends on : - a shell (bash NOT required, dash or ash is enough) 44 | # ---------- - sed 45 | # - curl (better) or wget (with HTTPS support) as a fallback. 46 | # No other gnu utility is necessary to run this script! 47 | # Optionnaly, if you have dd and base64, you will get a better 48 | # random seed, but that is not mandatory at all. 49 | # 50 | # Version : 1.0.0 51 | # ------- 52 | # 53 | # Date : 2014-11-28 54 | # ----- 55 | # 56 | # Author : Alain BENEDETTI 57 | # ------ 58 | # 59 | # History : 60 | # ------- 61 | # 1.0.0 62 | # - Initial version 63 | # 64 | # Contributor : // Add your name if you contribute and redistribute // 65 | # ----------- 66 | # - Sofiane, who gave me the initial raw version of the full connection script. 67 | # - Pascal Obry - added the URL encoding of user password 68 | 69 | # ====================================================================================== 70 | # General notes: the script is written to work on the Ubuntu's default shell (dash), 71 | # -------------- thus it contains no 'bashism' and can be run easily on plaforms like 72 | # NAS. It is also tested with Synology (with coreutils and curl installed). 73 | # 74 | # Why this script: the oAuth used by hubiC (OVH) is now quite well documented but only 75 | # ---------------- partially implemented by hubiC. 76 | # - What is implemented: declare an "app". Here you will get the client_id and 77 | # client_secret necessary to authorize your app to interact. You also declare a 78 | # redirect_uri. This is where you should normally have to implement what is 79 | # missing from hubiC to get the access_token for you app! To avoid having 80 | # to implement anything, you can use this script instead, then the redirect_uri 81 | # you give can be any fake uri (as long as it is a valid uri), like for 82 | # example http://localhost 83 | # - What is NOT implemented by Hubic: 84 | # ... well, pretty much everything else beyond declaring your app! 85 | # So the rest of the process is: 86 | # -1) deciding which permissions you grant to your 'app' 87 | # -2) get the refresh_token/access_token 88 | # For 1), you obviously need your main hubiC user/password, but once you 89 | # get that refresh_token, your app can connect to your hubiC account with 90 | # only that, plus the app id and secret... which is the purpose of oAuth 91 | # (authorize an app to access some of your objects without exposing your main 92 | # user/password). 93 | # So, this is a second layer above the id/secret of the app, because id/secret 94 | # alone is not enough to connect. 95 | # Let's suppose you gave id/secret AND refresh_token to someone. He can run the 96 | # app, and have access to what is authorized to the app. If you want to revoke 97 | # the authorizations of that person, you don't need to revoke the app, just run 98 | # this script again. You will then get a new refresh_token for yourself to connect 99 | # but the old refresh_token you gave to the other person won't be valid anymore. 100 | # 101 | # Legal concern: I don't work at OVH... but from their perspective this script does 102 | # -------------- not more that what you could do yourself with a browser. As you 103 | # don't need to run it often: just once to get the refresh_token, and once more 104 | # if you lose that token, there should be no harm done to OVH infrastructures! 105 | # It will even save some calls... for example, hubicfuse does all the process 106 | # everytime you connect it! So with that script, we should be able to save some 107 | # requests! 108 | # ... so, unless OVH raises any objection, this should be safe... otherwise copy 109 | # the requests, and do them with your browser! 110 | # 111 | # Security: As this is mainly for security (and also to get better performance 112 | # --------- at the initialization of your app), this script does not read secrets 113 | # from a configuration file, nor as parameters... this is also to keep the script 114 | # very simple! 115 | # The script simply prompts for those elements. 116 | # Nevertheless, if you ever want to use this script in an automated way, you can: 117 | # - prepare a file with all the answers and redirect the input to that file. 118 | # - set the appropriate environment variable prior to using that script. 119 | # The variables are named according to their names in the hubiC requests: 120 | # -- client_id (The id of your app) 121 | # -- client_secret (The secret of your app) 122 | # -- redirect_uri (The redirect_uri you declared at the app creation) 123 | # -- scope (The authorizations you grant to the app, see hubiC's documentation). 124 | # -- user_login (Your main hubiC identification -the email you gave when you applied) 125 | # NOTE: it is called 'login' in hubiC's requests, but to avoid confusion with the 126 | # standard utility login, we call it here: user_login. 127 | # -- user_pwd (Your main hubic password) 128 | # 129 | # Embedding: This scripts writes only the version (when option -V is passed) and the 130 | # --------- final result if all went OK to standard output. All other messages go 131 | # to error output. 132 | # The exit code let you know what happened. 133 | # You can then easily embed this script, pipe it, or redirect the output to a file 134 | # for the rest of your script that needs the tokens. 135 | # -------------------------------------------------------------------------------------- 136 | 137 | 138 | 139 | # -------------------------------------------------------------------------------------- 140 | # Internal constants | 141 | # -------------------------------------------------------------------------------------- 142 | URL_AUTH='https://api.hubic.com/oauth' 143 | VERSION='1.0.0' 144 | RANDOM_STR='TVq6zsU0A_GHIS6iYtbc7uc2c5jdpwIMczyMCsABJXbd' 145 | 146 | # -------------------------------------------------------------------------------------- 147 | # Messages | 148 | # -------------------------------------------------------------------------------------- 149 | # ENGLISH 150 | PROMPT_CLIENT_ID="client_id (the app's id): " 151 | PROMPT_CLIENT_SECRET="client_secret (the app's secret): " 152 | PROMPT_REDIRECT_URI="redirect_uri (declared at app's creation): " 153 | PROMPT_USER_LOGIN='user_login (the e-mail you used to subscribe): ' 154 | PROMPT_USER_PWD="user_pwd (your hubiC's main password): " 155 | MSG_SCOPE="For the scope -what you authorize your app to do-, enter characters as suggested 156 | in parenthesis, or just hit return if you don't need the item authorized." 157 | PROMPT_USAGE='Get account usage (r): ' 158 | PROMPT_GETALLLINKS='Get all published links in one call (r): ' 159 | PROMPT_CREDENTIALS='Get OpenStack credentials, eg. access to your files (r): ' 160 | PROMPT_ACTIVATE='Send activation email (w): ' 161 | PROMPT_LINKS='Add new/Get/Delete published link (wrd): ' 162 | 163 | ERR_BAD_ARG='Unknown argument: ' 164 | MSG_USAGE='Usage: ' 165 | MSG_OPTIONS=" [-k|--insecure] [-V|--version] 166 | -k 167 | --insecure: connects to the server even when certificate authentication fails. 168 | -V 169 | --version : displays the version of this script and exits." 170 | 171 | MSG_VERSION=", version: " 172 | 173 | ERR_CURL="Can't find: curl or wget. Please intall one, ex.: sudo apt-get install curl." 174 | ERR_HTTPS="ERROR: certificate verification failed. 175 | If you want to ignore certificate verification, use -k option." 176 | ERR_CNX='Unexpected error trying to connect to hubiC, see error code.' 177 | ERR_UNEXPECTED='Unexpected response from hubiC. Do your wget/curl have HTTPS support?' 178 | ERR_SED="Can't find: sed. Please intall it." 179 | 180 | ERR_CURL_FAILED='failed with error (see the exit code) at step' 181 | ERR_OAUTH_NOT_FOUND="Could not find 'oauth' in the server's response." 182 | ERR_OAUTH_HTTP="HTTP unexpected response code during oauth's request." 183 | ERR_REASON='The server said, error: ' 184 | ERR_REASON_DESC='Error description: ' 185 | ERR_REASON_UNKNOWN="Could not parse the error message from the server's response." 186 | ERR_CODE_NOT_FOUND="Could not find 'code' in the server's response." 187 | ERR_CODE_HTTP="HTTP unexpected response code during code's request." 188 | ERR_TOKEN_NOT_FOUND="Could not find 'refresh_token' in the server's response." 189 | ERR_TOKEN_HTTP="HTTP unexpected response code during refresh_token's request." 190 | 191 | ERR_OUT='Server full response:' 192 | 193 | MSG_SUCCESS='Success!' 194 | MSG_HEAD_RESULT='# Here is what your app needs to connect to hubiC:' 195 | 196 | # -------------------------------------------------------------------------------------- 197 | # Intenationalization: If the message files corresponding to your language, | 198 | # -------------------- as detected through the LANG environment variable, are | 199 | # provided, they will be used instead of fallback english | 200 | # messages defined above. | 201 | # The language file must be in the same directory as the script. As LANG is of the | 202 | # form: LANG="fr_FR.UTF-8", we will search first for: script_name_fr_FR.txt, and | 203 | # if it does not exist, for script_name_fr.txt | 204 | # The first file that exists will be sourced here. If none exist, or if LANG is not | 205 | # defined in the environment variables, you get the default english messages above. | 206 | # -------------------------------------------------------------------------------------- 207 | if [ -n "${LANG}" ]; then 208 | LANG_FILE="${0}_$( printf '%.5s' "${LANG}" ).txt" 209 | if [ -f "${LANG_FILE}" ]; then 210 | . "${LANG_FILE}" 211 | else 212 | LANG_FILE="${0}_$( printf '%.2s' "${LANG}" ).txt" 213 | [ -f "${LANG_FILE}" ] && . "${LANG_FILE}" 214 | fi 215 | fi 216 | 217 | 218 | # -------------------------------------------------------------------------------------- 219 | # error utility | 220 | # $1 is the error message we want to print out (if not empty). | 221 | # $2 if present, will trigger the display of the response from the server. | 222 | # Unless wget/curl fails, the exit code will indicate at which step we failed: | 223 | # -100: illegal argument | 224 | # -101: initialization failed | 225 | # -102: first request | 226 | # -103: second request | 227 | # -104: last request | 228 | # When wget/curl fails, this function is not used, and you get instead the exit code | 229 | # of wget/curl. | 230 | # Our codes begin are from 101 to 104 to be able to distinguish from wget/curl error | 231 | # codes that are from 1 to 89. | 232 | # -------------------------------------------------------------------------------------- 233 | STEP=0 234 | 235 | error() 236 | { 237 | [ -n "${1}" ] && echo "${1}" >&2 238 | if [ -n "${2}" ] && [ -n "${out}" ]; then 239 | echo "${ERR_OUT}" >&2 240 | printf -- '%s' "${out}" >&2 241 | fi 242 | exit $(( ${STEP} + 100 )) 243 | } 244 | 245 | # -------------------------------------------------------------------------------------- 246 | # URL encoder | 247 | # $1 is a string to be passed as URL parameter | 248 | # the string is URL encoded and returned as result | 249 | # -------------------------------------------------------------------------------------- 250 | 251 | urlenc() 252 | { 253 | echo "$1" | sed -e 's|%|%25|g' \ 254 | -e 's|!|%21|g' \ 255 | -e 's|#|%23|g' \ 256 | -e 's|\$|%24|g' \ 257 | -e 's| |%20|g' \ 258 | -e 's|&|%26|g' \ 259 | -e "s|'|%27|g" \ 260 | -e 's|(|%28|g' \ 261 | -e 's|)|%29|g' \ 262 | -e 's|*|%2A|g' \ 263 | -e 's|+|%2B|g' \ 264 | -e 's|,|%2C|g' \ 265 | -e 's|/|%2F|g' \ 266 | -e 's|:|%3A|g' \ 267 | -e 's|;|%3B|g' \ 268 | -e 's|=|%3D|g' \ 269 | -e 's|?|%3F|g' \ 270 | -e 's|@|%40|g' \ 271 | -e 's|\[|%5B|g' \ 272 | -e 's|]|%5D|g' 273 | } 274 | 275 | # -------------------------------------------------------------------------------------- 276 | # STEP 0: Read arguments. | 277 | # NOTE: to make it simple, we don't accept things like -kV because anyway it | 278 | # is identical to -V | 279 | # -------------------------------------------------------------------------------------- 280 | V='' 281 | CURL_OPTS='-s' 282 | for arg in "$@"; do 283 | case "${arg}" in 284 | '-k' | '--insecure' ) 285 | CURL_OPTS='-k' 286 | ;; 287 | '-V' | '--version' ) 288 | V='-V' 289 | ;; 290 | *) 291 | echo "${ERR_BAD_ARG} '${arg}'" >&2 292 | error "${MSG_USAGE}$(printf -- '%s' "${0}" | sed 's|.*/||')${MSG_OPTIONS}" 293 | esac 294 | done 295 | if [ -n "${V}" ]; then 296 | echo "$(printf -- '%s' "${0}" | sed 's|.*/||')${MSG_VERSION}${VERSION}" 297 | exit 0 298 | fi 299 | 300 | 301 | # -------------------------------------------------------------------------------------- 302 | # STEP 1: Check the existence of programs we absolutely need (no possible fallback). | 303 | # Note: we also test if there is https support on the detected wget/curl, plus | 304 | # that a connection to our hubiC URL returns a 301 (it is expected). | 305 | # -------------------------------------------------------------------------------------- 306 | STEP=1 307 | 308 | if [ -z "$( sed --version 2>/dev/null )" ]; then 309 | error 1 "${ERR_SED}" 310 | fi 311 | 312 | if [ -z "$( curl --version 2>/dev/null )" ]; then 313 | if [ -z "$( wget --version 2>/dev/null )" ]; then 314 | error 1 "${ERR_CURL}" 315 | else 316 | CURL=wget 317 | CURL_DATA='--post-data' 318 | if [ "${CURL_OPTS}" == '-s' ]; then 319 | CURL_OPTS='-q' 320 | else 321 | CURL_OPTS='--no-check-certificate' 322 | fi 323 | out="$(wget -S -q "${CURL_OPTS}" --max-redirect 0 "${URL_AUTH}" -O /dev/null 2>&1)" 324 | ERR=$? 325 | [ $ERR -eq 5 ] && error "${ERR_HTTPS}" 326 | [ $ERR -eq 8 ] && ERR=0 327 | fi 328 | else 329 | CURL=curl 330 | CURL_DATA='--data' 331 | out="$(curl -i -s "${CURL_OPTS}" "${URL_AUTH}")" 332 | ERR=$? 333 | [ $ERR -eq 60 ] && error "${ERR_HTTPS}" 334 | fi 335 | if [ $ERR -ne 0 ]; then 336 | echo "$ERR_CNX" >&2 337 | exit $ERR 338 | else 339 | if [ -z "$( printf '%s' "${out}" | grep 'HTTP\/1\.1 301' )" ]; then 340 | error "${ERR_UNEXPECTED}" 'y' 341 | fi 342 | fi 343 | 344 | # -------------------------------------------------------------------------------------- 345 | # curl/wget wrapper. | 346 | # For wget, we 'trap' the exit code 8 that only means we didn't get a 200. It is a | 347 | # 'normal' condition as we expect some 302, and have some documented errors with 400. | 348 | # -------------------------------------------------------------------------------------- 349 | ccurl() 350 | { 351 | if [ "${CURL}" = 'wget' ]; then 352 | out="$(wget "${CURL_OPTS}" -q -O - --max-redirect 0 -S "${@}" 2>&1 )" 353 | ERR=$? 354 | [ $ERR -eq 8 ] && return 0 355 | else 356 | out="$(curl "${CURL_OPTS}" -i -s "${@}")" 357 | ERR=$? 358 | fi 359 | 360 | if [ $ERR -ne 0 ]; then 361 | echo "${CURL} ${ERR_CURL_FAILED} ${STEP}." >&2 362 | exit $ERR 363 | fi 364 | } 365 | 366 | 367 | # -------------------------------------------------------------------------------------- 368 | # Prompt for the variables: client_id, client_secret, etc... | 369 | # NOTE: we don't prompt for account basic information access in the scope, because | 370 | # apparently, even if you don't give it in the scope, it is always authorized. | 371 | # So the minimal 'scope' variable will be: scope='account.r' | 372 | # -------------------------------------------------------------------------------------- 373 | 374 | if [ -z "${client_id}" ]; then 375 | read -p "${PROMPT_CLIENT_ID}" client_id || exit $? 376 | fi 377 | 378 | if [ -z "${client_secret}" ]; then 379 | read -p "${PROMPT_CLIENT_SECRET}" client_secret || exit $? 380 | fi 381 | 382 | if [ -z "${redirect_uri}" ]; then 383 | read -p "${PROMPT_REDIRECT_URI}" redirect_uri || exit $? 384 | fi 385 | 386 | if [ -z "${scope}" ]; then 387 | printf '\n%s\n' "${MSG_SCOPE}" >&2 388 | 389 | scope='account.r' 390 | 391 | read -p "${PROMPT_USAGE}" usage || exit $? 392 | [ "$usage" = 'r' ] && scope="${scope},usage.r" 393 | 394 | read -p "${PROMPT_GETALLLINKS}" getAllLinks || exit $? 395 | [ "$getAllLinks" = 'r' ] && scope="${scope},getAllLinks.r" 396 | 397 | read -p "${PROMPT_CREDENTIALS}" credentials || exit $? 398 | [ "$credentials" = 'r' ] && scope="${scope},credentials.r" 399 | 400 | read -p "${PROMPT_ACTIVATE}" activate || exit $? 401 | [ "$activate" = 'w' ] && scope="${scope},activate.w" 402 | 403 | read -p "${PROMPT_LINKS}" links || exit $? 404 | l="$( printf -- '%s' "${links}" | sed 's/[^\(w\|r\|d\)]//g' )" 405 | [ -n "$l" ] && [ "${l}" = "${links}" ] && scope="${scope},links.${l}" 406 | printf '\n' >&2 407 | fi 408 | 409 | if [ -z "${user_login}" ]; then 410 | read -p "${PROMPT_USER_LOGIN}" user_login || exit $? 411 | fi 412 | 413 | 414 | 415 | 416 | 417 | # -------------------------------------------------------------------------------------- 418 | # Each step is based on the same principle: | 419 | # - Prepare and send the request. | 420 | # - extract a string from the response. | 421 | # - error handling: | 422 | # = An error can happen during the request, in which case we exit with the | 423 | # return code of wget/curl. | 424 | # = An error can happen when trying to extract the string, if we can't find the | 425 | # string we search. The error message and entire server response will be | 426 | # displayed in this case. | 427 | # = There are some "documented" errors, generally indicated by a different HTTP | 428 | # status code. Should such error happen, we will then try to extract and | 429 | # display the documented message. Again if this documented message cannot be | 430 | # extracted, or we have another HTTP status, the whole response is dumped. | 431 | # | 432 | # STEP2: getting oauth | 433 | # The expected response is a html page with HTTP status 200. | 434 | # From this page we extract 'oauth' which is the value here: | 435 | # ... name="oauth" value="168341">&2 455 | if [ -n "$( printf '%s' "${out}" | sed -n '/1/h;/HTTP\/1\.1 302/p;q' )" ]; then 456 | ERR="$( printf '%s' "${out}" | sed -n 's/\&.*//;/error=/s/.*error=//p' )" 457 | if [ -n "${ERR}" ]; then 458 | printf "${ERR_REASON}%s\n" "${ERR}" >&2 459 | ERR="$( printf '%s' "${out}" | sed -n 's/.*error_description=//;s/\&.*//p' )" 460 | if [ -n "${ERR}" ]; then 461 | printf "${ERR_REASON_DESC}%s\n" "${ERR}" >&2 462 | fi 463 | error '' 464 | else 465 | error "${ERR_REASON_UNKNOWN}" 'y' 466 | fi 467 | fi 468 | error '' 'y' 469 | fi 470 | 471 | 472 | # -------------------------------------------------------------------------------------- 473 | # STEP3: setting app permissions and getting 'code' | 474 | # The expected response is a redirect (302). | 475 | # We extract the 'code' from the Location header of the redirect: | 476 | # Location: http://localhost/?code=14163171312491G7k3O0O2VGbRyk8t83&scope=usage.r ... | 477 | # | 478 | # Error extraction, if instead we get a 200, it is probably a bad login/user_pwd, so | 479 | # we extract the error given by the server (in the HTML page). | 480 | # | 481 | # NOTE: the user_pwd is read here for security reason, because we don't need it before | 482 | # this step. It is done inside a subshell to reduce its presence in memory. | 483 | # Also note that dash does not have the -s option for read, hence we use the | 484 | # stty command (hopefully it exists and works!..). We also disable tracing in | 485 | # the subshell, to avoid having the password displayed if the script was runned | 486 | # with traces on. | 487 | # -------------------------------------------------------------------------------------- 488 | STEP=3 489 | 490 | out="$( 491 | set +x 492 | if [ -z "${user_pwd}" ]; then 493 | printf -- '%s' "${PROMPT_USER_PWD}" >&2 494 | stty -echo 2>/dev/null 495 | read user_pwd || exit $? 496 | stty echo 2>/dev/null 497 | fi 498 | printf '\n' >&2 499 | POST="$( printf '%s' "${scope}" | sed 's|,|\&|g;s|\.|=|g' )&oauth=${oauth}&action=accepted&login=$(urlenc "${user_login}")&user_pwd=$(urlenc "${user_pwd}")" 500 | 501 | ccurl "${URL_AUTH}/auth/" "${CURL_DATA}" "${POST}" 502 | 503 | printf -- '%s' "${out}" 504 | )" || exit $? 505 | 506 | if [ -n "$( printf '%s' "${out}" | sed -n '/1/h;/HTTP\/1\.1 302/p;q' )" ]; then 507 | code="$(echo "${out}" | sed -n "s/.*?code=\(.*\)\&scope.*/\1/p")" 508 | if [ -z "$code" ]; then 509 | error "${ERR_CODE_NOT_FOUND}" 'y' 510 | fi 511 | else 512 | echo "${ERR_CODE_HTTP}" >&2 513 | if [ -n "$( printf '%s' "${out}" | sed -n '/1/h;/HTTP\/1\.1 200/p;q' )" ]; then 514 | ERR="$( printf '%s' "${out}" | sed -n '/class="text-error"/!d;N;s/.*\n//;s/^[ \t]*//;p' )" 515 | if [ -n "${ERR}" ]; then 516 | printf "${ERR_REASON}%s\n" "${ERR}" >&2 517 | error '' 518 | else 519 | error "${ERR_REASON_UNKNOWN}" 'y' 520 | fi 521 | fi 522 | error '' 'y' 523 | fi 524 | 525 | 526 | # -------------------------------------------------------------------------------------- 527 | # STEP4: getting the refresh_token | 528 | # The expected response is a JSON object (HTTP/1.1 200). | 529 | # Documented errors are with 400 and 401 return codes. We don't try to extract | 530 | # error strings with wget, because wget simply exits with an error when not | 531 | # receiving a 200 (the exception is 302 with can still be caugh). So when using | 532 | # wget, either this works (200) or we display the whole response. | 533 | # -------------------------------------------------------------------------------------- 534 | STEP=4 535 | 536 | POST="client_id=${client_id}&client_secret=${client_secret}&code=${code}&grant_type=authorization_code" 537 | 538 | if [ "${CURL}" = 'wget' ]; then 539 | POST="${POST}&redirect_uri=${redirect_uri}" 540 | CURL_ENCODE='-q' 541 | REDIR='-q' 542 | else 543 | CURL_ENCODE='--data-urlencode' 544 | REDIR="redirect_uri=${redirect_uri}" 545 | fi 546 | ccurl "${URL_AUTH}/token/" \ 547 | "${CURL_DATA}" "${POST}" \ 548 | "${CURL_ENCODE}" "${REDIR}" 549 | 550 | if [ -n "$( printf '%s' "${out}" | sed -n '/1/h;/HTTP\/1\.1 200/p;q' )" ]; then 551 | refresh_token="$( printf '%s' "${out}" | sed -n 's/{\"refresh_token\":\"//;s/\",\"expires_in\".*//p' )" 552 | if [ -z "$refresh_token" ]; then 553 | error "${ERR_TOKEN_NOT_FOUND}" 'y' 554 | fi 555 | else 556 | echo "${ERR_TOKEN_HTTP}" >&2 557 | if [ "${CURL}" = 'curl' ] && [ -n "$( printf '%s' "${out}" | sed -n '/1/h;/HTTP\/1\.1 40\(0\|1\)/p;q' )" ]; then 558 | ERR="$( printf '%s' "${out}" | sed -n 's/"}//;/"error"/s/.*"error":"//p' )" 559 | if [ -n "${ERR}" ]; then 560 | printf "${ERR_REASON}%s\n" "${ERR}" >&2 561 | ERR="$( printf '%s' "${out}" | sed -n 's/","error".*//;/error_description/s/{"error_description":"//p' )" 562 | if [ -n "${ERR}" ]; then 563 | printf "${ERR_REASON_DESC}%s\n" "${ERR}" >&2 564 | fi 565 | error '' 566 | else 567 | error "${ERR_REASON_UNKNOWN}" 'y' 568 | fi 569 | fi 570 | error '' 'y' 571 | fi 572 | 573 | 574 | # -------------------------------------------------------------------------------------- 575 | # THE END: we display the final result if all was successful | 576 | # -------------------------------------------------------------------------------------- 577 | 578 | echo >&2 579 | echo "${MSG_SUCCESS}" >&2 580 | echo >&2 581 | echo >&2 582 | echo "${MSG_HEAD_RESULT}" 583 | echo "client_id=${client_id}" 584 | echo "client_secret=${client_secret}" 585 | echo "refresh_token=${refresh_token}" 586 | -------------------------------------------------------------------------------- /hubic_token_fr.txt: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------------- 2 | # Messages | 3 | # -------------------------------------------------------------------------------------- 4 | # FRENCH 5 | PROMPT_CLIENT_ID="client_id (l'id de l'app): " 6 | PROMPT_CLIENT_SECRET="client_secret (le secret de l'app): " 7 | PROMPT_REDIRECT_URI="redirect_uri (declaré à la création de l'app): " 8 | PROMPT_USER_LOGIN="user_login (l'e-mail utilisé lors de l'inscription): " 9 | PROMPT_USER_PWD="user_pwd (votre mot de passe hubiC): " 10 | MSG_SCOPE="Pour le 'scope' -ce que vous autorisez à votre app-, entrez des caractères 11 | suggérés entre parenthèses, ou entrée si vous ne voulez pas autoriser l'item." 12 | PROMPT_USAGE="Lire le quota d'usage (r): " 13 | PROMPT_GETALLLINKS="Lire tous les liens publiés en une fois (r): " 14 | PROMPT_CREDENTIALS="Obtenir les autorisations OpenStack, ie. accéder à vos fichiers (r): " 15 | PROMPT_ACTIVATE="Envoyer un e-mail d'activation (w): " 16 | PROMPT_LINKS="Créer/Lire/Supprimer un lien publié (wrd): " 17 | 18 | ERR_BAD_ARG='Argument inconnu : ' 19 | MSG_USAGE='Usage : ' 20 | MSG_OPTIONS=" [-k|--insecure] [-V|--version] 21 | -k 22 | --insecure: se connecte même si l'authentification du certificat échoue. 23 | -V 24 | --version : affiche la version de ce script et se termine." 25 | 26 | MSG_VERSION=", version : " 27 | 28 | ERR_CURL="Impossible de trouver : curl or wget. Installez-en un, ex.: sudo apt-get install curl." 29 | ERR_HTTPS="ERREUR: vérification du certificat échouée. 30 | Si vous voulez ignorer la vérification du certificat, utilisez l'option -k." 31 | ERR_CNX="Erreur inattendue en essayant de se connecter à hubiC, voir code d'error." 32 | ERR_UNEXPECTED="Response inattendue d'hubiC. Votre wget/curl supporte-t-il HTTPS ?" 33 | ERR_SED="Impossible de trouver: sed. Merci de l'installer." 34 | 35 | ERR_CURL_FAILED="a échoué (voir le code de sortie) à l'étape" 36 | ERR_OAUTH_NOT_FOUND="Impossible de trouver 'oauth' dans la response du serveur." 37 | ERR_OAUTH_HTTP="Status de la réponse HTTP inattendu lors de la requête 'oauth'." 38 | ERR_REASON="Le serveur a dit, erreur : " 39 | ERR_REASON_DESC="Description de l'erreur : " 40 | ERR_REASON_UNKNOWN="Impossible de récupérer le message d'erreur dans la response du serveur." 41 | ERR_CODE_NOT_FOUND="Impossible de trouver 'code' dans la response du serveur." 42 | ERR_CODE_HTTP="Status de la réponse HTTP inattendu lors de la requête 'code'." 43 | ERR_TOKEN_NOT_FOUND="Impossible de trouver 'refresh_token' dans la response du serveur." 44 | ERR_TOKEN_HTTP="Status de la réponse HTTP inattendu lors de la requête 'refresh_token'." 45 | 46 | ERR_OUT="Réponse complète du serveur:" 47 | 48 | MSG_SUCCESS='Succès!' 49 | MSG_HEAD_RESULT="# Voici de dont vote app a besoin pour se connecter à hubiC :" 50 | 51 | -------------------------------------------------------------------------------- /install-sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # install - install a program, script, or datafile 3 | 4 | scriptversion=2006-10-14.15 5 | 6 | # This originates from X11R5 (mit/util/scripts/install.sh), which was 7 | # later released in X11R6 (xc/config/util/install.sh) with the 8 | # following copyright and license. 9 | # 10 | # Copyright (C) 1994 X Consortium 11 | # 12 | # Permission is hereby granted, free of charge, to any person obtaining a copy 13 | # of this software and associated documentation files (the "Software"), to 14 | # deal in the Software without restriction, including without limitation the 15 | # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 16 | # sell copies of the Software, and to permit persons to whom the Software is 17 | # furnished to do so, subject to the following conditions: 18 | # 19 | # The above copyright notice and this permission notice shall be included in 20 | # all copies or substantial portions of the Software. 21 | # 22 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 | # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 26 | # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- 27 | # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 28 | # 29 | # Except as contained in this notice, the name of the X Consortium shall not 30 | # be used in advertising or otherwise to promote the sale, use or other deal- 31 | # ings in this Software without prior written authorization from the X Consor- 32 | # tium. 33 | # 34 | # 35 | # FSF changes to this file are in the public domain. 36 | # 37 | # Calling this script install-sh is preferred over install.sh, to prevent 38 | # `make' implicit rules from creating a file called install from it 39 | # when there is no Makefile. 40 | # 41 | # This script is compatible with the BSD install script, but was written 42 | # from scratch. 43 | 44 | nl=' 45 | ' 46 | IFS=" "" $nl" 47 | 48 | # set DOITPROG to echo to test this script 49 | 50 | # Don't use :- since 4.3BSD and earlier shells don't like it. 51 | doit="${DOITPROG-}" 52 | if test -z "$doit"; then 53 | doit_exec=exec 54 | else 55 | doit_exec=$doit 56 | fi 57 | 58 | # Put in absolute file names if you don't have them in your path; 59 | # or use environment vars. 60 | 61 | mvprog="${MVPROG-mv}" 62 | cpprog="${CPPROG-cp}" 63 | chmodprog="${CHMODPROG-chmod}" 64 | chownprog="${CHOWNPROG-chown}" 65 | chgrpprog="${CHGRPPROG-chgrp}" 66 | stripprog="${STRIPPROG-strip}" 67 | rmprog="${RMPROG-rm}" 68 | mkdirprog="${MKDIRPROG-mkdir}" 69 | 70 | posix_glob= 71 | posix_mkdir= 72 | 73 | # Desired mode of installed file. 74 | mode=0755 75 | 76 | chmodcmd=$chmodprog 77 | chowncmd= 78 | chgrpcmd= 79 | stripcmd= 80 | rmcmd="$rmprog -f" 81 | mvcmd="$mvprog" 82 | src= 83 | dst= 84 | dir_arg= 85 | dstarg= 86 | no_target_directory= 87 | 88 | usage="Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE 89 | or: $0 [OPTION]... SRCFILES... DIRECTORY 90 | or: $0 [OPTION]... -t DIRECTORY SRCFILES... 91 | or: $0 [OPTION]... -d DIRECTORIES... 92 | 93 | In the 1st form, copy SRCFILE to DSTFILE. 94 | In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. 95 | In the 4th, create DIRECTORIES. 96 | 97 | Options: 98 | -c (ignored) 99 | -d create directories instead of installing files. 100 | -g GROUP $chgrpprog installed files to GROUP. 101 | -m MODE $chmodprog installed files to MODE. 102 | -o USER $chownprog installed files to USER. 103 | -s $stripprog installed files. 104 | -t DIRECTORY install into DIRECTORY. 105 | -T report an error if DSTFILE is a directory. 106 | --help display this help and exit. 107 | --version display version info and exit. 108 | 109 | Environment variables override the default commands: 110 | CHGRPPROG CHMODPROG CHOWNPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG 111 | " 112 | 113 | while test $# -ne 0; do 114 | case $1 in 115 | -c) shift 116 | continue;; 117 | 118 | -d) dir_arg=true 119 | shift 120 | continue;; 121 | 122 | -g) chgrpcmd="$chgrpprog $2" 123 | shift 124 | shift 125 | continue;; 126 | 127 | --help) echo "$usage"; exit $?;; 128 | 129 | -m) mode=$2 130 | shift 131 | shift 132 | case $mode in 133 | *' '* | *' '* | *' 134 | '* | *'*'* | *'?'* | *'['*) 135 | echo "$0: invalid mode: $mode" >&2 136 | exit 1;; 137 | esac 138 | continue;; 139 | 140 | -o) chowncmd="$chownprog $2" 141 | shift 142 | shift 143 | continue;; 144 | 145 | -s) stripcmd=$stripprog 146 | shift 147 | continue;; 148 | 149 | -t) dstarg=$2 150 | shift 151 | shift 152 | continue;; 153 | 154 | -T) no_target_directory=true 155 | shift 156 | continue;; 157 | 158 | --version) echo "$0 $scriptversion"; exit $?;; 159 | 160 | --) shift 161 | break;; 162 | 163 | -*) echo "$0: invalid option: $1" >&2 164 | exit 1;; 165 | 166 | *) break;; 167 | esac 168 | done 169 | 170 | if test $# -ne 0 && test -z "$dir_arg$dstarg"; then 171 | # When -d is used, all remaining arguments are directories to create. 172 | # When -t is used, the destination is already specified. 173 | # Otherwise, the last argument is the destination. Remove it from $@. 174 | for arg 175 | do 176 | if test -n "$dstarg"; then 177 | # $@ is not empty: it contains at least $arg. 178 | set fnord "$@" "$dstarg" 179 | shift # fnord 180 | fi 181 | shift # arg 182 | dstarg=$arg 183 | done 184 | fi 185 | 186 | if test $# -eq 0; then 187 | if test -z "$dir_arg"; then 188 | echo "$0: no input file specified." >&2 189 | exit 1 190 | fi 191 | # It's OK to call `install-sh -d' without argument. 192 | # This can happen when creating conditional directories. 193 | exit 0 194 | fi 195 | 196 | if test -z "$dir_arg"; then 197 | trap '(exit $?); exit' 1 2 13 15 198 | 199 | # Set umask so as not to create temps with too-generous modes. 200 | # However, 'strip' requires both read and write access to temps. 201 | case $mode in 202 | # Optimize common cases. 203 | *644) cp_umask=133;; 204 | *755) cp_umask=22;; 205 | 206 | *[0-7]) 207 | if test -z "$stripcmd"; then 208 | u_plus_rw= 209 | else 210 | u_plus_rw='% 200' 211 | fi 212 | cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; 213 | *) 214 | if test -z "$stripcmd"; then 215 | u_plus_rw= 216 | else 217 | u_plus_rw=,u+rw 218 | fi 219 | cp_umask=$mode$u_plus_rw;; 220 | esac 221 | fi 222 | 223 | for src 224 | do 225 | # Protect names starting with `-'. 226 | case $src in 227 | -*) src=./$src ;; 228 | esac 229 | 230 | if test -n "$dir_arg"; then 231 | dst=$src 232 | dstdir=$dst 233 | test -d "$dstdir" 234 | dstdir_status=$? 235 | else 236 | 237 | # Waiting for this to be detected by the "$cpprog $src $dsttmp" command 238 | # might cause directories to be created, which would be especially bad 239 | # if $src (and thus $dsttmp) contains '*'. 240 | if test ! -f "$src" && test ! -d "$src"; then 241 | echo "$0: $src does not exist." >&2 242 | exit 1 243 | fi 244 | 245 | if test -z "$dstarg"; then 246 | echo "$0: no destination specified." >&2 247 | exit 1 248 | fi 249 | 250 | dst=$dstarg 251 | # Protect names starting with `-'. 252 | case $dst in 253 | -*) dst=./$dst ;; 254 | esac 255 | 256 | # If destination is a directory, append the input filename; won't work 257 | # if double slashes aren't ignored. 258 | if test -d "$dst"; then 259 | if test -n "$no_target_directory"; then 260 | echo "$0: $dstarg: Is a directory" >&2 261 | exit 1 262 | fi 263 | dstdir=$dst 264 | dst=$dstdir/`basename "$src"` 265 | dstdir_status=0 266 | else 267 | # Prefer dirname, but fall back on a substitute if dirname fails. 268 | dstdir=` 269 | (dirname "$dst") 2>/dev/null || 270 | expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ 271 | X"$dst" : 'X\(//\)[^/]' \| \ 272 | X"$dst" : 'X\(//\)$' \| \ 273 | X"$dst" : 'X\(/\)' \| . 2>/dev/null || 274 | echo X"$dst" | 275 | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ 276 | s//\1/ 277 | q 278 | } 279 | /^X\(\/\/\)[^/].*/{ 280 | s//\1/ 281 | q 282 | } 283 | /^X\(\/\/\)$/{ 284 | s//\1/ 285 | q 286 | } 287 | /^X\(\/\).*/{ 288 | s//\1/ 289 | q 290 | } 291 | s/.*/./; q' 292 | ` 293 | 294 | test -d "$dstdir" 295 | dstdir_status=$? 296 | fi 297 | fi 298 | 299 | obsolete_mkdir_used=false 300 | 301 | if test $dstdir_status != 0; then 302 | case $posix_mkdir in 303 | '') 304 | # Create intermediate dirs using mode 755 as modified by the umask. 305 | # This is like FreeBSD 'install' as of 1997-10-28. 306 | umask=`umask` 307 | case $stripcmd.$umask in 308 | # Optimize common cases. 309 | *[2367][2367]) mkdir_umask=$umask;; 310 | .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; 311 | 312 | *[0-7]) 313 | mkdir_umask=`expr $umask + 22 \ 314 | - $umask % 100 % 40 + $umask % 20 \ 315 | - $umask % 10 % 4 + $umask % 2 316 | `;; 317 | *) mkdir_umask=$umask,go-w;; 318 | esac 319 | 320 | # With -d, create the new directory with the user-specified mode. 321 | # Otherwise, rely on $mkdir_umask. 322 | if test -n "$dir_arg"; then 323 | mkdir_mode=-m$mode 324 | else 325 | mkdir_mode= 326 | fi 327 | 328 | posix_mkdir=false 329 | case $umask in 330 | *[123567][0-7][0-7]) 331 | # POSIX mkdir -p sets u+wx bits regardless of umask, which 332 | # is incompatible with FreeBSD 'install' when (umask & 300) != 0. 333 | ;; 334 | *) 335 | tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ 336 | trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 337 | 338 | if (umask $mkdir_umask && 339 | exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 340 | then 341 | if test -z "$dir_arg" || { 342 | # Check for POSIX incompatibilities with -m. 343 | # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or 344 | # other-writeable bit of parent directory when it shouldn't. 345 | # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. 346 | ls_ld_tmpdir=`ls -ld "$tmpdir"` 347 | case $ls_ld_tmpdir in 348 | d????-?r-*) different_mode=700;; 349 | d????-?--*) different_mode=755;; 350 | *) false;; 351 | esac && 352 | $mkdirprog -m$different_mode -p -- "$tmpdir" && { 353 | ls_ld_tmpdir_1=`ls -ld "$tmpdir"` 354 | test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" 355 | } 356 | } 357 | then posix_mkdir=: 358 | fi 359 | rmdir "$tmpdir/d" "$tmpdir" 360 | else 361 | # Remove any dirs left behind by ancient mkdir implementations. 362 | rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null 363 | fi 364 | trap '' 0;; 365 | esac;; 366 | esac 367 | 368 | if 369 | $posix_mkdir && ( 370 | umask $mkdir_umask && 371 | $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" 372 | ) 373 | then : 374 | else 375 | 376 | # The umask is ridiculous, or mkdir does not conform to POSIX, 377 | # or it failed possibly due to a race condition. Create the 378 | # directory the slow way, step by step, checking for races as we go. 379 | 380 | case $dstdir in 381 | /*) prefix=/ ;; 382 | -*) prefix=./ ;; 383 | *) prefix= ;; 384 | esac 385 | 386 | case $posix_glob in 387 | '') 388 | if (set -f) 2>/dev/null; then 389 | posix_glob=true 390 | else 391 | posix_glob=false 392 | fi ;; 393 | esac 394 | 395 | oIFS=$IFS 396 | IFS=/ 397 | $posix_glob && set -f 398 | set fnord $dstdir 399 | shift 400 | $posix_glob && set +f 401 | IFS=$oIFS 402 | 403 | prefixes= 404 | 405 | for d 406 | do 407 | test -z "$d" && continue 408 | 409 | prefix=$prefix$d 410 | if test -d "$prefix"; then 411 | prefixes= 412 | else 413 | if $posix_mkdir; then 414 | (umask=$mkdir_umask && 415 | $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break 416 | # Don't fail if two instances are running concurrently. 417 | test -d "$prefix" || exit 1 418 | else 419 | case $prefix in 420 | *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; 421 | *) qprefix=$prefix;; 422 | esac 423 | prefixes="$prefixes '$qprefix'" 424 | fi 425 | fi 426 | prefix=$prefix/ 427 | done 428 | 429 | if test -n "$prefixes"; then 430 | # Don't fail if two instances are running concurrently. 431 | (umask $mkdir_umask && 432 | eval "\$doit_exec \$mkdirprog $prefixes") || 433 | test -d "$dstdir" || exit 1 434 | obsolete_mkdir_used=true 435 | fi 436 | fi 437 | fi 438 | 439 | if test -n "$dir_arg"; then 440 | { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && 441 | { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && 442 | { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || 443 | test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 444 | else 445 | 446 | # Make a couple of temp file names in the proper directory. 447 | dsttmp=$dstdir/_inst.$$_ 448 | rmtmp=$dstdir/_rm.$$_ 449 | 450 | # Trap to clean up those temp files at exit. 451 | trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 452 | 453 | # Copy the file name to the temp name. 454 | (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && 455 | 456 | # and set any options; do chmod last to preserve setuid bits. 457 | # 458 | # If any of these fail, we abort the whole thing. If we want to 459 | # ignore errors from any of these, just make sure not to ignore 460 | # errors from the above "$doit $cpprog $src $dsttmp" command. 461 | # 462 | { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } \ 463 | && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } \ 464 | && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } \ 465 | && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && 466 | 467 | # Now rename the file to the real destination. 468 | { $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null \ 469 | || { 470 | # The rename failed, perhaps because mv can't rename something else 471 | # to itself, or perhaps because mv is so ancient that it does not 472 | # support -f. 473 | 474 | # Now remove or move aside any old file at destination location. 475 | # We try this two ways since rm can't unlink itself on some 476 | # systems and the destination file might be busy for other 477 | # reasons. In this case, the final cleanup might fail but the new 478 | # file should still install successfully. 479 | { 480 | if test -f "$dst"; then 481 | $doit $rmcmd -f "$dst" 2>/dev/null \ 482 | || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null \ 483 | && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }; }\ 484 | || { 485 | echo "$0: cannot unlink or rename $dst" >&2 486 | (exit 1); exit 1 487 | } 488 | else 489 | : 490 | fi 491 | } && 492 | 493 | # Now rename the file to the real destination. 494 | $doit $mvcmd "$dsttmp" "$dst" 495 | } 496 | } || exit 1 497 | 498 | trap '' 0 499 | fi 500 | done 501 | 502 | # Local variables: 503 | # eval: (add-hook 'write-file-hooks 'time-stamp) 504 | # time-stamp-start: "scriptversion=" 505 | # time-stamp-format: "%:y-%02m-%02d.%02H" 506 | # time-stamp-end: "$" 507 | # End: 508 | --------------------------------------------------------------------------------