├── .github └── workflows │ └── go.yml ├── .gitignore ├── .travis.yml ├── LICENSE.md ├── README.md ├── TUTORIAL.md ├── chunk ├── download.go ├── manager.go ├── manager_test.go ├── stack.go ├── stack_test.go └── storage.go ├── ci ├── compile-nightly.yml ├── compile-release.yml ├── meta │ ├── notification │ └── version ├── pipeline.yml ├── scripts │ ├── compile-nightly.sh │ ├── compile-release.sh │ ├── go-build-all │ └── test.sh ├── test-nightly.yml └── test-release.yml ├── config └── config.go ├── drive ├── cache.go └── drive.go ├── logo └── banner.png ├── main.go └── mount └── mount.go /.github/workflows/go.yml: -------------------------------------------------------------------------------- 1 | name: Go 2 | 3 | on: 4 | - push 5 | - pull_request 6 | 7 | jobs: 8 | 9 | build: 10 | name: Build 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | go: 15 | - '1.15' 16 | - '1.14' 17 | - '1.13' 18 | 19 | steps: 20 | 21 | - name: Set up Go 1.x 22 | uses: actions/setup-go@v2 23 | with: 24 | go-version: ${{ matrix.go }} 25 | 26 | id: go 27 | 28 | - name: Check out code into the Go module directory 29 | uses: actions/checkout@v2 30 | 31 | - name: Get dependencies 32 | run: | 33 | mkdir -pv "$(go env GOPATH)/src/github.com/plexdrive" 34 | ln -nfsv "$PWD" "$(go env GOPATH)/src/github.com/plexdrive/plexdrive" 35 | go get -v -t -d ./... 36 | 37 | - name: Build 38 | run: go build -v . 39 | 40 | - name: Test 41 | run: go test -v ./... 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | plexdrive 2 | .idea 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | 3 | go: 4 | - 1.13 5 | - stable 6 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017 Plexdrive 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Plexdrive 2 | [![Build Status](https://travis-ci.org/dweidenfeld/plexdrive.svg?branch=master)](https://travis-ci.org/dweidenfeld/plexdrive) 3 | 4 | __Plexdrive__ allows you to mount your Google Drive account as read-only fuse filesystem, with direct delete option on the filesystem. 5 | 6 | The project is comparable to projects like [rclone](https://rclone.org/), 7 | [google-drive-ocamlfuse](https://github.com/astrada/google-drive-ocamlfuse) or 8 | [node-gdrive-fuse](https://github.com/thejinx0r/node-gdrive-fuse), 9 | but optimized for media streaming e.g. with plex ;) 10 | 11 | Please note that plexdrive doesn't currently support writes (adding new files or modifications), it only supports reading existing files and deletion. 12 | 13 | I tried using rclone for a long time, but got API Quota errors every day and/or multiple times per day, so I decided to try node-gdrive-fuse. The problem here was that it missed some of my media files, so as a result I started implementing my own file system library. 14 | 15 | _If you like the project, feel free to make a small [donation via PayPal](https://www.paypal.me/dowei). Otherwise support the project by implementing new functions / bugfixes yourself and create pull requests :)_ 16 | 17 | ## Installation 18 | 1. First you need to install fuse on your system 19 | 2. Then you should download the newest release from the [GitHub release page](https://github.com/plexdrive/plexdrive/releases). 20 | 3. Create your own client id and client secret (see [https://rclone.org/drive/#making-your-own-client-id](https://rclone.org/drive/#making-your-own-client-id)). 21 | 4. Sample command line for plexdrive 22 | ``` 23 | ./plexdrive mount -c /root/.plexdrive -o allow_other /mnt/plexdrive 24 | ``` 25 | 26 | ### Crypted mount with rclone 27 | You can use [this tutorial](TUTORIAL.md) for instruction how to mount an encrypted rclone mount. 28 | 29 | ## Usage 30 | ``` 31 | Usage of ./plexdrive mount: 32 | --cache-file string 33 | Path the the cache file (default "~/.plexdrive/cache.bolt") 34 | --chunk-check-threads int 35 | The number of threads to use for checking chunk existence (default 2) 36 | --chunk-load-ahead int 37 | The number of chunks that should be read ahead (default 3) 38 | --chunk-load-threads int 39 | The number of threads to use for downloading chunks (default 2) 40 | --chunk-size string 41 | The size of each chunk that is downloaded (units: B, K, M, G) (default "10M") 42 | -c, --config string 43 | The path to the configuration directory (default "~/.plexdrive") 44 | --drive-id string 45 | The ID of the shared drive to mount (including team drives) 46 | -o, --fuse-options string 47 | Fuse mount options (e.g. -fuse-options allow_other,...) 48 | --gid int 49 | Set the mounts GID (-1 = default permissions) (default -1) 50 | --max-chunks int 51 | The maximum number of chunks to be stored on disk (default 10) 52 | --refresh-interval duration 53 | The time to wait till checking for changes (default 1m0s) 54 | --root-node-id string 55 | The ID of the root node to mount (use this for only mount a sub directory) (default "root") 56 | --uid int 57 | Set the mounts UID (-1 = default permissions) (default -1) 58 | --umask value 59 | Override the default file permissions 60 | -v, --verbosity int 61 | Set the log level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace) 62 | --version 63 | Displays program's version information 64 | ``` 65 | 66 | ### Support 67 | Slack support is available on [our Slack channel](https://join.slack.com/t/plexdrive/shared_invite/MjM2MTMzMjY2MTc5LTE1MDQ2MDE4NDQtOTc0N2RiY2UxNw). 68 | Feel free to ask configuration and setup questions here. 69 | 70 | ### Supported FUSE mount options 71 | * allow_other 72 | * allow_dev 73 | * allow_non_empty_mount 74 | * allow_suid 75 | * max_readahead=1234 76 | * default_permissions 77 | * excl_create 78 | * fs_name=myname 79 | * local_volume 80 | * writeback_cache 81 | * volume_name=myname 82 | * read_only 83 | 84 | 85 | ### Root-Node-ID 86 | You can use the option `root-node-id` to specify a folder id that should be mounted as 87 | the root folder. This option will not prevent plexdrive from getting the changes for your 88 | whole Google Drive structure. It will only "display" another folder as root instead of the 89 | real root folder. 90 | Don't expect any performance improvement or something else. This option is only for your 91 | personal folder structuring. 92 | 93 | #### Team Drive 94 | You can pass the ID of a Team Drive as `drive-id` to get access to a Team drive, here's how: 95 | * Open the Team Drive in your browser 96 | * Note the format of the URL: https://drive.google.com/drive/u/0/folders/ABC123qwerty987 97 | * The `drive-id` of this Team Drive is `ABC123qwerty987` 98 | * Pass it with `--drive-id=ABC123qwerty987` argument to your `plexdrive mount` command 99 | 100 | # Contribute 101 | If you want to support the project by implementing functions / fixing bugs 102 | yourself feel free to do so! 103 | 104 | 1. Fork the repository 105 | 2. Clone it to your [golang workspace](https://golang.org/doc/code.html) $GOPATH/src/github.com/username/plexdrive 106 | 3. Implement your changes 107 | 4. Test your changes (e.g. `go build && ./plexdrive -v3 /tmp/drive`) 108 | 5. Format everything with [gofmt](https://golang.org/cmd/gofmt/) ( 109 | (I recommend working with [VSCode](https://code.visualstudio.com/) and [VSCode-Go](https://github.com/lukehoban/vscode-go)) 110 | 6. Create a pull request 111 | -------------------------------------------------------------------------------- /TUTORIAL.md: -------------------------------------------------------------------------------- 1 | # Tutorial for creating/mounting an encrypted rclone volume 2 | 3 | ## Install the dependencies 4 | You have to install fuse on your system to run plexdrive/rclone. Please check your system on how to install fuse. 5 | Normally you can use: 6 | ``` 7 | apt-get install fuse 8 | ``` 9 | 10 | ## Mounting the unencrypted volume with plexdrive 11 | 1. Then you should download the newest release from the [GitHub release page](https://github.com/plexdrive/plexdrive/releases). 12 | 2. Create your own client id and client secret (see [https://rclone.org/drive/#making-your-own-client-id](https://rclone.org/drive/#making-your-own-client-id)). 13 | 3. Create a systemd startup script for automatic startup on boot 14 | ``` 15 | # /etc/systemd/system/plexdrive.service 16 | 17 | [Unit] 18 | Description=Plexdrive 19 | AssertPathIsDirectory=/mnt/plexdrive 20 | After=network-online.target 21 | 22 | [Service] 23 | Type=simple 24 | ExecStart=/usr/bin/plexdrive mount -v 2 /mnt/plexdrive 25 | ExecStop=/bin/fusermount -u /mnt/plexdrive 26 | Restart=on-abort 27 | 28 | [Install] 29 | WantedBy=default.target 30 | ``` 31 | 4. Refresh your daemons 32 | ``` 33 | sudo systemctl daemon-reload 34 | ``` 35 | 5. Run the application like this 36 | ``` 37 | sudo systemctl start plexdrive.service 38 | ``` 39 | 6. Activate the auto startup option 40 | ``` 41 | sudo systemctl enable plexdrive.service 42 | ``` 43 | 44 | ## Preparations in rclone 45 | 1. Download and install rclone 46 | 2. Configure a new rclone remote: 47 | ``` 48 | rclone config 49 | ``` 50 | 3. Select "new remote" 51 | ![remote image](http://i.imgur.com/nOg64dy.png) 52 | 3. Give the remote a descriptive name. We will be using the name "local-crypt" throughout the rest of this guide. 53 | 4. Select "5" for "Encrypt/Decrypt a remote" 54 | ![type image](http://i.imgur.com/bLtWR7P.png) 55 | 5. Now we need to specify the remote to decrypt. This needs to be the path where plexdrive is mounted: 56 | ``` 57 | /mnt/plexdrive/encrypted 58 | ``` 59 | 6. Encryption type: Select the same type of encryption that you initially chose when setting up your rclone encryption. 60 | 7. Password: Use the same password you used then setting up your rclone encryption. 61 | 8. Salt: Use the same salt you used when setting up your rclone encryption. 62 | 9. Review the details and if everything looks good select "y". 63 | 10. We should now have a working Encrypt/Decrypt remote. 64 | 11. Create a systemd startup script for automatic startup on boot 65 | ``` 66 | # /etc/systemd/system/rclone.service 67 | 68 | [Unit] 69 | Description=Google Drive (rclone) 70 | AssertPathIsDirectory=/mnt/media 71 | After=plexdrive.service 72 | 73 | [Service] 74 | Type=simple 75 | ExecStart=/usr/bin/rclone mount --allow-other local-crypt: /mnt/media 76 | ExecStop=/bin/fusermount -u /mnt/media 77 | Restart=on-abort 78 | 79 | [Install] 80 | WantedBy=default.target 81 | ``` 82 | 12. Refresh your daemons 83 | ``` 84 | sudo systemctl daemon-reload 85 | ``` 86 | 13. Run the application like this 87 | ``` 88 | sudo systemctl start rclone.service 89 | ``` 90 | 14. Activate the auto startup option 91 | ``` 92 | sudo systemctl enable rclone.service 93 | ``` 94 | -------------------------------------------------------------------------------- /chunk/download.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | . "github.com/claudetech/loggo/default" 12 | "github.com/plexdrive/plexdrive/drive" 13 | ) 14 | 15 | // Downloader handles concurrent chunk downloads 16 | type Downloader struct { 17 | Client *drive.Client 18 | queue chan *Request 19 | callbacks map[string][]DownloadCallback 20 | lock sync.Mutex 21 | } 22 | 23 | type DownloadCallback func(error, []byte) 24 | 25 | // NewDownloader creates a new download manager 26 | func NewDownloader(threads int, client *drive.Client) (*Downloader, error) { 27 | manager := Downloader{ 28 | Client: client, 29 | queue: make(chan *Request, 100), 30 | callbacks: make(map[string][]DownloadCallback, 100), 31 | } 32 | 33 | for i := 0; i < threads; i++ { 34 | go manager.thread() 35 | } 36 | 37 | return &manager, nil 38 | } 39 | 40 | // Download starts a new download request 41 | func (d *Downloader) Download(req *Request, callback DownloadCallback) { 42 | d.lock.Lock() 43 | _, exists := d.callbacks[req.id] 44 | d.callbacks[req.id] = append(d.callbacks[req.id], callback) 45 | if !exists { 46 | d.queue <- req 47 | } 48 | d.lock.Unlock() 49 | } 50 | 51 | func (d *Downloader) thread() { 52 | for { 53 | req := <-d.queue 54 | d.download(d.Client.GetNativeClient(), req) 55 | } 56 | } 57 | 58 | func (d *Downloader) download(client *http.Client, req *Request) { 59 | Log.Debugf("Starting download %v (preload: %v)", req.id, req.preload) 60 | bytes, err := downloadFromAPI(client, req, 0) 61 | 62 | d.lock.Lock() 63 | callbacks := d.callbacks[req.id] 64 | for _, callback := range callbacks { 65 | callback(err, bytes) 66 | } 67 | delete(d.callbacks, req.id) 68 | d.lock.Unlock() 69 | } 70 | 71 | func downloadFromAPI(client *http.Client, request *Request, delay int64) ([]byte, error) { 72 | // sleep if request is throttled 73 | if delay > 0 { 74 | time.Sleep(time.Duration(delay) * time.Second) 75 | } 76 | 77 | req, err := http.NewRequest("GET", request.object.DownloadURL, nil) 78 | if nil != err { 79 | Log.Debugf("%v", err) 80 | return nil, fmt.Errorf("Could not create request object %v (%v) from API", request.object.ObjectID, request.object.Name) 81 | } 82 | 83 | req.Header.Add("Range", fmt.Sprintf("bytes=%v-%v", request.offsetStart, request.offsetEnd-1)) 84 | 85 | Log.Tracef("Sending HTTP Request %v", req) 86 | 87 | res, err := client.Do(req) 88 | if nil != err { 89 | Log.Debugf("%v", err) 90 | return nil, fmt.Errorf("Could not request object %v (%v) from API", request.object.ObjectID, request.object.Name) 91 | } 92 | defer res.Body.Close() 93 | reader := res.Body 94 | 95 | if res.StatusCode != 206 { 96 | if res.StatusCode != 403 && res.StatusCode != 500 { 97 | Log.Debugf("Request\n----------\n%v\n----------\n", req) 98 | Log.Debugf("Response\n----------\n%v\n----------\n", res) 99 | return nil, fmt.Errorf("Wrong status code %v for %v", res.StatusCode, request.object) 100 | } 101 | 102 | // throttle requests 103 | if delay > 8 { 104 | return nil, fmt.Errorf("Maximum throttle interval has been reached") 105 | } 106 | bytes, err := ioutil.ReadAll(reader) 107 | if nil != err { 108 | Log.Debugf("%v", err) 109 | return nil, fmt.Errorf("Could not read body of error") 110 | } 111 | body := string(bytes) 112 | if strings.Contains(body, "dailyLimitExceeded") || 113 | strings.Contains(body, "userRateLimitExceeded") || 114 | strings.Contains(body, "rateLimitExceeded") || 115 | strings.Contains(body, "backendError") || 116 | strings.Contains(body, "internalError") { 117 | if 0 == delay { 118 | delay = 1 119 | } else { 120 | delay = delay * 2 121 | } 122 | return downloadFromAPI(client, request, delay) 123 | } 124 | 125 | // return an error if other error occurred 126 | Log.Debugf("%v", body) 127 | return nil, fmt.Errorf("Could not read object %v (%v) / StatusCode: %v", 128 | request.object.ObjectID, request.object.Name, res.StatusCode) 129 | } 130 | 131 | bytes, err := ioutil.ReadAll(reader) 132 | if nil != err { 133 | Log.Debugf("%v", err) 134 | return nil, fmt.Errorf("Could not read objects %v (%v) API response", request.object.ObjectID, request.object.Name) 135 | } 136 | 137 | return bytes, nil 138 | } 139 | -------------------------------------------------------------------------------- /chunk/manager.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/claudetech/loggo/default" 7 | 8 | "github.com/plexdrive/plexdrive/drive" 9 | ) 10 | 11 | // Manager manages chunks on disk 12 | type Manager struct { 13 | ChunkSize int64 14 | LoadAhead int 15 | downloader *Downloader 16 | storage *Storage 17 | queue chan *QueueEntry 18 | } 19 | 20 | type QueueEntry struct { 21 | request *Request 22 | response chan Response 23 | } 24 | 25 | // Request represents a chunk request 26 | type Request struct { 27 | id string 28 | object *drive.APIObject 29 | offsetStart int64 30 | offsetEnd int64 31 | chunkOffset int64 32 | chunkOffsetEnd int64 33 | sequence int 34 | preload bool 35 | } 36 | 37 | // Response represetns a chunk response 38 | type Response struct { 39 | Sequence int 40 | Error error 41 | Bytes []byte 42 | } 43 | 44 | // NewManager creates a new chunk manager 45 | func NewManager( 46 | chunkSize int64, 47 | loadAhead, 48 | checkThreads int, 49 | loadThreads int, 50 | client *drive.Client, 51 | maxChunks int) (*Manager, error) { 52 | 53 | if chunkSize < 4096 { 54 | return nil, fmt.Errorf("Chunk size must not be < 4096") 55 | } 56 | if chunkSize%1024 != 0 { 57 | return nil, fmt.Errorf("Chunk size must be divideable by 1024") 58 | } 59 | if maxChunks < 2 || maxChunks < loadAhead { 60 | return nil, fmt.Errorf("max-chunks must be greater than 2 and bigger than the load ahead value") 61 | } 62 | 63 | downloader, err := NewDownloader(loadThreads, client) 64 | if nil != err { 65 | return nil, err 66 | } 67 | 68 | manager := Manager{ 69 | ChunkSize: chunkSize, 70 | LoadAhead: loadAhead, 71 | downloader: downloader, 72 | storage: NewStorage(chunkSize, maxChunks), 73 | queue: make(chan *QueueEntry, 100), 74 | } 75 | 76 | if err := manager.storage.Clear(); nil != err { 77 | return nil, err 78 | } 79 | 80 | for i := 0; i < checkThreads; i++ { 81 | go manager.thread() 82 | } 83 | 84 | return &manager, nil 85 | } 86 | 87 | // GetChunk loads one chunk and starts the preload for the next chunks 88 | func (m *Manager) GetChunk(object *drive.APIObject, offset, size int64) ([]byte, error) { 89 | maxOffset := int64(object.Size) 90 | if offset > maxOffset { 91 | return nil, fmt.Errorf("Tried to read past EOF of %v at offset %v", object.ObjectID, offset) 92 | } 93 | if offset+size > maxOffset { 94 | size = int64(object.Size) - offset 95 | } 96 | 97 | ranges := splitChunkRanges(offset, size, m.ChunkSize) 98 | responses := make(chan Response, len(ranges)) 99 | 100 | for i, r := range ranges { 101 | m.requestChunk(object, r.offset, r.size, i, responses) 102 | } 103 | 104 | data := make([]byte, size, size) 105 | for i := 0; i < cap(responses); i++ { 106 | res := <-responses 107 | if nil != res.Error { 108 | return nil, res.Error 109 | } 110 | 111 | dataOffset := ranges[res.Sequence].offset - offset 112 | 113 | if n := copy(data[dataOffset:], res.Bytes); n == 0 { 114 | return nil, fmt.Errorf("Request %v slice %v has empty response", object.ObjectID, res.Sequence) 115 | } 116 | } 117 | close(responses) 118 | 119 | return data, nil 120 | } 121 | 122 | func (m *Manager) requestChunk(object *drive.APIObject, offset, size int64, sequence int, response chan Response) { 123 | chunkOffset := offset % m.ChunkSize 124 | offsetStart := offset - chunkOffset 125 | offsetEnd := offsetStart + m.ChunkSize 126 | id := fmt.Sprintf("%v:%v", object.ObjectID, offsetStart) 127 | 128 | request := &Request{ 129 | id: id, 130 | object: object, 131 | offsetStart: offsetStart, 132 | offsetEnd: offsetEnd, 133 | chunkOffset: chunkOffset, 134 | chunkOffsetEnd: chunkOffset + size, 135 | sequence: sequence, 136 | preload: false, 137 | } 138 | 139 | m.queue <- &QueueEntry{ 140 | request: request, 141 | response: response, 142 | } 143 | 144 | for i := m.ChunkSize; i < (m.ChunkSize * int64(m.LoadAhead+1)); i += m.ChunkSize { 145 | aheadOffsetStart := offsetStart + i 146 | aheadOffsetEnd := aheadOffsetStart + m.ChunkSize 147 | if uint64(aheadOffsetStart) < object.Size && uint64(aheadOffsetEnd) < object.Size { 148 | id := fmt.Sprintf("%v:%v", object.ObjectID, aheadOffsetStart) 149 | request := &Request{ 150 | id: id, 151 | object: object, 152 | offsetStart: aheadOffsetStart, 153 | offsetEnd: aheadOffsetEnd, 154 | preload: true, 155 | } 156 | m.queue <- &QueueEntry{ 157 | request: request, 158 | } 159 | } 160 | } 161 | } 162 | 163 | type byteRange struct { 164 | offset, size int64 165 | } 166 | 167 | // Calculate request ranges that span multiple chunks 168 | // 169 | // This can happen with Direct-IO and unaligned reads or 170 | // if the size is bigger than the chunk size. 171 | func splitChunkRanges(offset, size, chunkSize int64) []byteRange { 172 | ranges := make([]byteRange, 0, size/chunkSize+2) 173 | for remaining := size; remaining > 0; remaining -= size { 174 | size = min(remaining, chunkSize-offset%chunkSize) 175 | ranges = append(ranges, byteRange{offset, size}) 176 | offset += size 177 | } 178 | return ranges 179 | } 180 | 181 | func (m *Manager) thread() { 182 | for { 183 | queueEntry := <-m.queue 184 | m.checkChunk(queueEntry.request, queueEntry.response) 185 | } 186 | } 187 | 188 | func (m *Manager) checkChunk(req *Request, response chan Response) { 189 | if bytes := m.storage.Load(req.id); nil != bytes { 190 | if nil != response { 191 | response <- Response{ 192 | Sequence: req.sequence, 193 | Bytes: adjustResponseChunk(req, bytes), 194 | } 195 | } 196 | return 197 | } 198 | 199 | m.downloader.Download(req, func(err error, bytes []byte) { 200 | if nil != err { 201 | if nil != response { 202 | response <- Response{ 203 | Sequence: req.sequence, 204 | Error: err, 205 | } 206 | } 207 | return 208 | } 209 | 210 | if nil != response { 211 | response <- Response{ 212 | Sequence: req.sequence, 213 | Bytes: adjustResponseChunk(req, bytes), 214 | } 215 | } 216 | 217 | if err := m.storage.Store(req.id, bytes); nil != err { 218 | Log.Warningf("Coult not store chunk %v", req.id) 219 | } 220 | }) 221 | } 222 | 223 | func adjustResponseChunk(req *Request, bytes []byte) []byte { 224 | bytesLen := int64(len(bytes)) 225 | sOffset := min(req.chunkOffset, bytesLen) 226 | eOffset := min(req.chunkOffsetEnd, bytesLen) 227 | return bytes[sOffset:eOffset] 228 | } 229 | 230 | func min(x, y int64) int64 { 231 | if x < y { 232 | return x 233 | } 234 | return y 235 | } 236 | -------------------------------------------------------------------------------- /chunk/manager_test.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import "testing" 4 | 5 | func TestSplitChunkRanges(t *testing.T) { 6 | testcases := []struct { 7 | offset, size, chunkSize int64 8 | result []byteRange 9 | }{ 10 | {0, 0, 4096, []byteRange{}}, 11 | {0, 4096, 4096, []byteRange{ 12 | {0, 4096}, 13 | }}, 14 | {4095, 4096, 4096, []byteRange{ 15 | {4095, 1}, 16 | {4096, 4095}, 17 | }}, 18 | {0, 8192, 4096, []byteRange{ 19 | {0, 4096}, 20 | {4096, 4096}, 21 | }}, 22 | {2048, 8192, 4096, []byteRange{ 23 | {2048, 2048}, 24 | {4096, 4096}, 25 | {8192, 2048}, 26 | }}, 27 | {2048, 8192, 4096, []byteRange{ 28 | {2048, 2048}, 29 | {4096, 4096}, 30 | {8192, 2048}, 31 | }}, 32 | {17960960, 16777216, 10485760, []byteRange{ 33 | {17960960, 3010560}, 34 | {20971520, 10485760}, 35 | {31457280, 3280896}, 36 | }}, 37 | } 38 | for i, tc := range testcases { 39 | ranges := splitChunkRanges(tc.offset, tc.size, tc.chunkSize) 40 | actualSize := len(ranges) 41 | expectedSize := len(tc.result) 42 | if actualSize != expectedSize { 43 | t.Fatalf("ByteRange %v length mismatch: %v != %v", i, actualSize, expectedSize) 44 | } 45 | for j, r := range ranges { 46 | actual := r 47 | expected := tc.result[j] 48 | if actual != expected { 49 | t.Fatalf("ByteRange %v mismatch: %v != %v", i, actual, expected) 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /chunk/stack.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | ) 7 | 8 | // Stack is a thread safe list/stack implementation 9 | type Stack struct { 10 | items *list.List 11 | index map[string]*list.Element 12 | len int 13 | lock sync.RWMutex 14 | maxSize int 15 | } 16 | 17 | // NewStack creates a new stack 18 | func NewStack(maxChunks int) *Stack { 19 | return &Stack{ 20 | items: list.New(), 21 | index: make(map[string]*list.Element, maxChunks), 22 | maxSize: maxChunks, 23 | } 24 | } 25 | 26 | // Pop pops the first item from the stack 27 | func (s *Stack) Pop() string { 28 | s.lock.Lock() 29 | if s.len < s.maxSize { 30 | s.lock.Unlock() 31 | return "" 32 | } 33 | 34 | item := s.items.Front() 35 | if nil == item { 36 | s.lock.Unlock() 37 | return "" 38 | } 39 | s.items.Remove(item) 40 | s.len-- 41 | id := item.Value.(string) 42 | delete(s.index, id) 43 | s.lock.Unlock() 44 | 45 | return id 46 | } 47 | 48 | // Touch moves the specified item to the last position of the stack 49 | func (s *Stack) Touch(id string) { 50 | s.lock.Lock() 51 | item, exists := s.index[id] 52 | if exists { 53 | s.items.MoveToBack(item) 54 | } 55 | s.lock.Unlock() 56 | } 57 | 58 | // Push adds a new item to the last position of the stack 59 | func (s *Stack) Push(id string) { 60 | s.lock.Lock() 61 | if _, exists := s.index[id]; exists { 62 | s.lock.Unlock() 63 | return 64 | } 65 | s.items.PushBack(id) 66 | s.index[id] = s.items.Back() 67 | s.len++ 68 | s.lock.Unlock() 69 | } 70 | -------------------------------------------------------------------------------- /chunk/stack_test.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import "testing" 4 | 5 | func TestOOB(t *testing.T) { 6 | stack := NewStack(1) 7 | 8 | stack.Push("1") 9 | stack.Touch("1") 10 | } 11 | 12 | func TestAddToStack(t *testing.T) { 13 | stack := NewStack(1) 14 | 15 | stack.Push("1") 16 | stack.Push("2") 17 | stack.Push("3") 18 | stack.Push("4") 19 | 20 | stack.Touch("1") 21 | stack.Touch("3") 22 | 23 | v := stack.Pop() 24 | if "2" != v { 25 | t.Fatalf("Expected 2 got %v", v) 26 | } 27 | 28 | v = stack.Pop() 29 | if "4" != v { 30 | t.Fatalf("Expected 4 got %v", v) 31 | } 32 | 33 | v = stack.Pop() 34 | if "1" != v { 35 | t.Fatalf("Expected 1 got %v", v) 36 | } 37 | 38 | v = stack.Pop() 39 | if "3" != v { 40 | t.Fatalf("Expected 3 got %v", v) 41 | } 42 | 43 | v = stack.Pop() 44 | if "" != v { 45 | t.Fatalf("Expected nil got %v", v) 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /chunk/storage.go: -------------------------------------------------------------------------------- 1 | package chunk 2 | 3 | import ( 4 | "errors" 5 | "sync" 6 | 7 | . "github.com/claudetech/loggo/default" 8 | ) 9 | 10 | // ErrTimeout is a timeout error 11 | var ErrTimeout = errors.New("timeout") 12 | 13 | // Storage is a chunk storage 14 | type Storage struct { 15 | ChunkSize int64 16 | MaxChunks int 17 | chunks map[string][]byte 18 | stack *Stack 19 | lock sync.Mutex 20 | } 21 | 22 | // Item represents a chunk in RAM 23 | type Item struct { 24 | id string 25 | bytes []byte 26 | } 27 | 28 | // NewStorage creates a new storage 29 | func NewStorage(chunkSize int64, maxChunks int) *Storage { 30 | storage := Storage{ 31 | ChunkSize: chunkSize, 32 | MaxChunks: maxChunks, 33 | chunks: make(map[string][]byte), 34 | stack: NewStack(maxChunks), 35 | } 36 | 37 | return &storage 38 | } 39 | 40 | // Clear removes all old chunks on disk (will be called on each program start) 41 | func (s *Storage) Clear() error { 42 | return nil 43 | } 44 | 45 | // Load a chunk from ram or creates it 46 | func (s *Storage) Load(id string) []byte { 47 | s.lock.Lock() 48 | if chunk, exists := s.chunks[id]; exists { 49 | s.lock.Unlock() 50 | s.stack.Touch(id) 51 | return chunk 52 | } 53 | s.lock.Unlock() 54 | return nil 55 | } 56 | 57 | // Store stores a chunk in the RAM and adds it to the disk storage queue 58 | func (s *Storage) Store(id string, bytes []byte) error { 59 | s.lock.Lock() 60 | 61 | deleteID := s.stack.Pop() 62 | if "" != deleteID { 63 | delete(s.chunks, deleteID) 64 | 65 | Log.Debugf("Deleted chunk %v", deleteID) 66 | } 67 | 68 | s.chunks[id] = bytes 69 | s.stack.Push(id) 70 | s.lock.Unlock() 71 | 72 | return nil 73 | } 74 | -------------------------------------------------------------------------------- /ci/compile-nightly.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: golang 7 | 8 | inputs: 9 | - name: plexdrive 10 | path: go/src/github.com/plexdrive/plexdrive 11 | 12 | run: 13 | path: go/src/github.com/plexdrive/plexdrive/ci/scripts/compile-nightly.sh 14 | 15 | outputs: 16 | - name: release 17 | - name: metadata 18 | -------------------------------------------------------------------------------- /ci/compile-release.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: golang 7 | 8 | inputs: 9 | - name: plexdrive 10 | path: go/src/github.com/plexdrive/plexdrive 11 | 12 | run: 13 | path: go/src/github.com/plexdrive/plexdrive/ci/scripts/compile-release.sh 14 | 15 | outputs: 16 | - name: release 17 | - name: metadata 18 | -------------------------------------------------------------------------------- /ci/meta/notification: -------------------------------------------------------------------------------- 1 | Version %VERSION% has been published to GitHub. 2 | Download it now: https://github.com/plexdrive/plexdrive/releases 3 | -------------------------------------------------------------------------------- /ci/meta/version: -------------------------------------------------------------------------------- 1 | 5.1.0 2 | -------------------------------------------------------------------------------- /ci/pipeline.yml: -------------------------------------------------------------------------------- 1 | resource_types: 2 | - name: slack-notification 3 | type: docker-image 4 | source: 5 | repository: cfcommunity/slack-notification-resource 6 | tag: latest 7 | 8 | resources: 9 | - name: plexdrive-develop 10 | type: git 11 | source: 12 | uri: https://github.com/plexdrive/plexdrive 13 | branch: develop 14 | 15 | - name: plexdrive-master 16 | type: git 17 | source: 18 | uri: https://github.com/plexdrive/plexdrive 19 | branch: master 20 | 21 | - name: github-nightly-release 22 | type: github-release 23 | source: 24 | owner: plexdrive 25 | repository: plexdrive 26 | access_token: {{gh-access-token}} 27 | release: false 28 | pre_release: true 29 | 30 | - name: github-release 31 | type: github-release 32 | source: 33 | owner: plexdrive 34 | repository: plexdrive 35 | access_token: {{gh-access-token}} 36 | release: true 37 | pre_release: false 38 | 39 | - name: slack-notification 40 | type: slack-notification 41 | source: 42 | url: https://hooks.slack.com/services/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 43 | 44 | jobs: 45 | - name: build 46 | plan: 47 | - get: plexdrive 48 | resource: plexdrive-develop 49 | trigger: true 50 | - task: test 51 | file: plexdrive/ci/test-nightly.yml 52 | - task: compile 53 | file: plexdrive/ci/compile-nightly.yml 54 | - put: github-nightly-release 55 | params: 56 | name: metadata/version 57 | tag: metadata/version 58 | body: metadata/version 59 | globs: 60 | - release/* 61 | - put: slack-notification 62 | params: 63 | text_file: metadata/notification 64 | 65 | - name: release 66 | plan: 67 | - get: plexdrive 68 | resource: plexdrive-master 69 | trigger: true 70 | - task: test 71 | file: plexdrive/ci/test-release.yml 72 | - task: compile 73 | file: plexdrive/ci/compile-release.yml 74 | - put: github-release 75 | params: 76 | name: metadata/version 77 | tag: metadata/version 78 | body: metadata/version 79 | globs: 80 | - release/* 81 | - put: slack-notification 82 | params: 83 | text_file: metadata/notification 84 | -------------------------------------------------------------------------------- /ci/scripts/compile-nightly.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | # Configuration 5 | export ORIGIN=$PWD 6 | export GOPATH=$PWD/go 7 | export PATH=$GOPATH/bin:$PATH 8 | export TS=$(date +%s) 9 | cd $GOPATH/src/github.com/plexdrive/plexdrive 10 | 11 | # Version 12 | export VERSION="$(cat ci/meta/version)-beta.$TS" 13 | echo "Got version $VERSION" 14 | 15 | sed -i.bak s/%VERSION%/$VERSION/g main.go 16 | echo $VERSION > $ORIGIN/metadata/version 17 | sed s/%VERSION%/$VERSION/g ci/meta/notification > $ORIGIN/metadata/notification 18 | 19 | # Build 20 | go get -v 21 | ./ci/scripts/go-build-all 22 | 23 | mv plexdrive-* $ORIGIN/release 24 | 25 | # Check 26 | cd $ORIGIN 27 | ls -lah release 28 | ls -lah metadata 29 | -------------------------------------------------------------------------------- /ci/scripts/compile-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | # Configuration 5 | export ORIGIN=$PWD 6 | export GOPATH=$PWD/go 7 | export PATH=$GOPATH/bin:$PATH 8 | cd $GOPATH/src/github.com/plexdrive/plexdrive 9 | 10 | # Version 11 | export VERSION="$(cat ci/meta/version)" 12 | echo "Got version $VERSION" 13 | 14 | sed -i.bak s/%VERSION%/$VERSION/g main.go 15 | echo $VERSION > $ORIGIN/metadata/version 16 | sed s/%VERSION%/$VERSION/g ci/meta/notification > $ORIGIN/metadata/notification 17 | 18 | # Build 19 | go get -v 20 | ./ci/scripts/go-build-all 21 | 22 | mv plexdrive-* $ORIGIN/release 23 | 24 | # Check 25 | cd $ORIGIN 26 | ls -lah release 27 | ls -lah metadata 28 | -------------------------------------------------------------------------------- /ci/scripts/go-build-all: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # GoLang cross-compile snippet for Go 1.6+ based loosely on Dave Chaney's cross-compile script: 4 | # http://dave.cheney.net/2012/09/08/an-introduction-to-cross-compilation-with-go 5 | # 6 | # To use: 7 | # 8 | # $ cd ~/path-to/my-awesome-project 9 | # $ go-build-all 10 | # 11 | # Features: 12 | # 13 | # * Cross-compiles to multiple machine types and architectures. 14 | # * Uses the current directory name as the output name... 15 | # * ...unless you supply an source file: $ go-build-all main.go 16 | # * Windows binaries are named .exe. 17 | # * ARM v5, v6, v7 and v8 (arm64) support 18 | # 19 | # ARM Support: 20 | # 21 | # You must read https://github.com/golang/go/wiki/GoArm for the specifics of running 22 | # Linux/BSD-style kernels and what kernel modules are needed for the target platform. 23 | # While not needed for cross-compilation of this script, you're users will need to ensure 24 | # the correct modules are included. 25 | # 26 | # Requirements: 27 | # 28 | # * GoLang 1.6+ (for mips and ppc), 1.5 for non-mips/ppc. 29 | # * CD to directory of the binary you are compiling. $PWD is used here. 30 | # 31 | # For 1.4 and earlier, see http://dave.cheney.net/2012/09/08/an-introduction-to-cross-compilation-with-go 32 | # 33 | 34 | # This PLATFORMS list is refreshed after every major Go release. 35 | # Though more platforms may be supported (freebsd/386), they have been removed 36 | # from the standard ports/downloads and therefore removed from this list. 37 | # 38 | PLATFORMS="darwin/amd64 darwin/386" # amd64 only as of go1.5 39 | # PLATFORMS="$PLATFORMS windows/amd64 windows/386" # arm compilation not available for Windows 40 | PLATFORMS="$PLATFORMS linux/amd64 linux/386" 41 | PLATFORMS="$PLATFORMS linux/ppc64 linux/ppc64le" 42 | # PLATFORMS="$PLATFORMS linux/mips64 linux/mips64le" # experimental in go1.6 43 | PLATFORMS="$PLATFORMS freebsd/amd64" 44 | # PLATFORMS="$PLATFORMS netbsd/amd64" # amd64 only as of go1.6 45 | # PLATFORMS="$PLATFORMS openbsd/amd64" # amd64 only as of go1.6 46 | # PLATFORMS="$PLATFORMS dragonfly/amd64" # amd64 only as of go1.5 47 | # PLATFORMS="$PLATFORMS plan9/amd64 plan9/386" # as of go1.4 48 | # PLATFORMS="$PLATFORMS solaris/amd64" # as of go1.3 49 | 50 | # ARMBUILDS lists the platforms that are currently supported. From this list 51 | # we generate the following architectures: 52 | # 53 | # ARM64 (aka ARMv8) <- only supported on linux and darwin builds (go1.6) 54 | # ARMv7 55 | # ARMv6 56 | # ARMv5 57 | # 58 | # Some words of caution from the master: 59 | # 60 | # @dfc: you'll have to use gomobile to build for darwin/arm64 [and others] 61 | # @dfc: that target expects that you're bulding for a mobile phone 62 | # @dfc: iphone 5 and below, ARMv7, iphone 3 and below ARMv6, iphone 5s and above arm64 63 | # 64 | PLATFORMS_ARM="linux" 65 | 66 | FLAGS="-ldflags='-s -w'" 67 | 68 | ############################################################## 69 | # Shouldn't really need to modify anything below this line. # 70 | ############################################################## 71 | 72 | type setopt >/dev/null 2>&1 73 | 74 | SCRIPT_NAME=`basename "$0"` 75 | FAILURES="" 76 | SOURCE_FILE=`echo $@ | sed 's/\.go//'` 77 | CURRENT_DIRECTORY=${PWD##*/} 78 | OUTPUT=${SOURCE_FILE:-$CURRENT_DIRECTORY} # if no src file given, use current dir name 79 | 80 | for PLATFORM in $PLATFORMS; do 81 | GOOS=${PLATFORM%/*} 82 | GOARCH=${PLATFORM#*/} 83 | BIN_FILENAME="${OUTPUT}-${GOOS}-${GOARCH}" 84 | if [[ "${GOOS}" == "windows" ]]; then BIN_FILENAME="${BIN_FILENAME}.exe"; fi 85 | CMD="GOOS=${GOOS} GOARCH=${GOARCH} go build ${FLAGS} -o ${BIN_FILENAME} $@" 86 | echo "${CMD}" 87 | eval $CMD || FAILURES="${FAILURES} ${PLATFORM}" 88 | done 89 | 90 | # ARM builds 91 | if [[ $PLATFORMS_ARM == *"linux"* ]]; then 92 | CMD="GOOS=linux GOARCH=arm64 go build ${FLAGS} -o ${OUTPUT}-linux-arm64 $@" 93 | echo "${CMD}" 94 | eval $CMD || FAILURES="${FAILURES} ${PLATFORM}" 95 | fi 96 | for GOOS in $PLATFORMS_ARM; do 97 | GOARCH="arm" 98 | # build for each ARM version 99 | for GOARM in 7 6 5; do 100 | BIN_FILENAME="${OUTPUT}-${GOOS}-${GOARCH}${GOARM}" 101 | CMD="GOARM=${GOARM} GOOS=${GOOS} GOARCH=${GOARCH} go build ${FLAGS} -o ${BIN_FILENAME} $@" 102 | echo "${CMD}" 103 | eval "${CMD}" || FAILURES="${FAILURES} ${GOOS}/${GOARCH}${GOARM}" 104 | done 105 | done 106 | 107 | # eval errors 108 | if [[ "${FAILURES}" != "" ]]; then 109 | echo "" 110 | echo "${SCRIPT_NAME} failed on: ${FAILURES}" 111 | exit 1 112 | fi 113 | -------------------------------------------------------------------------------- /ci/scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | export GOPATH=$PWD/go 5 | export PATH=$GOPATH/bin:$PATH 6 | 7 | cd $GOPATH/src/github.com/plexdrive/plexdrive 8 | 9 | go get -v 10 | go test ./... -race -cover 11 | -------------------------------------------------------------------------------- /ci/test-nightly.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: golang 7 | 8 | inputs: 9 | - name: plexdrive 10 | path: go/src/github.com/plexdrive/plexdrive 11 | 12 | run: 13 | path: go/src/github.com/plexdrive/plexdrive/ci/scripts/test.sh 14 | -------------------------------------------------------------------------------- /ci/test-release.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: golang 7 | 8 | inputs: 9 | - name: plexdrive 10 | path: go/src/github.com/plexdrive/plexdrive 11 | 12 | run: 13 | path: go/src/github.com/plexdrive/plexdrive/ci/scripts/test.sh 14 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | 8 | . "github.com/claudetech/loggo/default" 9 | ) 10 | 11 | // Config describes the basic configuration architecture 12 | type Config struct { 13 | ClientID string 14 | ClientSecret string 15 | } 16 | 17 | // Read reads the configuration based on a filesystem path 18 | func Read(configPath string) (*Config, error) { 19 | configFile, err := ioutil.ReadFile(configPath) 20 | if nil != err { 21 | return nil, fmt.Errorf("Could not read config file in %v", configPath) 22 | } 23 | 24 | var config Config 25 | json.Unmarshal(configFile, &config) 26 | return &config, nil 27 | } 28 | 29 | // Create creates the configuration by requesting from stdin 30 | func Create(configPath string) (*Config, error) { 31 | var config Config 32 | fmt.Println("1. Please go to https://console.developers.google.com/") 33 | fmt.Println("2. Create a new project") 34 | fmt.Println("3. Go to library and activate the Google Drive API") 35 | fmt.Println("4. Go to credentials and create an OAuth client ID") 36 | fmt.Println("5. Set the application type to 'other'") 37 | fmt.Println("6. Specify some name and click create") 38 | fmt.Printf("7. Enter your generated client ID: ") 39 | 40 | if _, err := fmt.Scan(&config.ClientID); err != nil { 41 | Log.Debugf("%v", err) 42 | return nil, fmt.Errorf("Unable to read client id") 43 | } 44 | fmt.Printf("8. Enter your generated client secret: ") 45 | if _, err := fmt.Scan(&config.ClientSecret); err != nil { 46 | Log.Debugf("%v", err) 47 | return nil, fmt.Errorf("Unable to read client secret") 48 | } 49 | 50 | configJSON, err := json.Marshal(&config) 51 | if nil != err { 52 | Log.Debugf("%v", err) 53 | return nil, fmt.Errorf("Could not generate config.json content") 54 | } 55 | 56 | if err := ioutil.WriteFile(configPath, configJSON, 0766); nil != err { 57 | Log.Debugf("%v", err) 58 | return nil, fmt.Errorf("Could not generate config.json file") 59 | } 60 | 61 | return &config, nil 62 | } 63 | -------------------------------------------------------------------------------- /drive/cache.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "path/filepath" 9 | 10 | "time" 11 | 12 | . "github.com/claudetech/loggo/default" 13 | "golang.org/x/oauth2" 14 | 15 | "github.com/boltdb/bolt" 16 | ) 17 | 18 | // Cache is the cache 19 | type Cache struct { 20 | db *bolt.DB 21 | tokenPath string 22 | } 23 | 24 | var ( 25 | bObjects = []byte("api_objects") 26 | bParents = []byte("idx_api_objects_py_parent") 27 | bPageToken = []byte("page_token") 28 | ) 29 | 30 | // APIObject is a Google Drive file object 31 | type APIObject struct { 32 | ObjectID string 33 | Name string 34 | IsDir bool 35 | Size uint64 36 | LastModified time.Time 37 | DownloadURL string 38 | Parents []string 39 | CanTrash bool 40 | } 41 | 42 | // PageToken is the last change id 43 | type PageToken struct { 44 | ID string 45 | Token string 46 | } 47 | 48 | // NewCache creates a new cache instance 49 | func NewCache(cacheFile, configPath string, sqlDebug bool) (*Cache, error) { 50 | Log.Debugf("Opening cache connection") 51 | 52 | db, err := bolt.Open(cacheFile, 0600, nil) 53 | if nil != err { 54 | Log.Debugf("%v", err) 55 | return nil, fmt.Errorf("Could not open cache file") 56 | } 57 | 58 | cache := Cache{ 59 | db: db, 60 | tokenPath: filepath.Join(configPath, "token.json"), 61 | } 62 | 63 | // Make sure the necessary buckets exist 64 | err = db.Update(func(tx *bolt.Tx) error { 65 | if _, err := tx.CreateBucketIfNotExists(bObjects); nil != err { 66 | return err 67 | } 68 | if _, err := tx.CreateBucketIfNotExists(bParents); nil != err { 69 | return err 70 | } 71 | if _, err := tx.CreateBucketIfNotExists(bPageToken); nil != err { 72 | return err 73 | } 74 | return nil 75 | }) 76 | 77 | return &cache, err 78 | } 79 | 80 | // Close closes all handles 81 | func (c *Cache) Close() error { 82 | Log.Debugf("Closing cache file") 83 | c.db.Close() 84 | return nil 85 | } 86 | 87 | // LoadToken loads a token from cache 88 | func (c *Cache) LoadToken() (*oauth2.Token, error) { 89 | Log.Debugf("Loading token from cache") 90 | 91 | tokenFile, err := ioutil.ReadFile(c.tokenPath) 92 | if nil != err { 93 | Log.Debugf("%v", err) 94 | return nil, fmt.Errorf("Could not read token file in %v", c.tokenPath) 95 | } 96 | 97 | var token oauth2.Token 98 | json.Unmarshal(tokenFile, &token) 99 | 100 | Log.Tracef("Got token from cache %v", token) 101 | 102 | return &token, nil 103 | } 104 | 105 | // StoreToken stores a token in the cache or updates the existing token element 106 | func (c *Cache) StoreToken(token *oauth2.Token) error { 107 | Log.Debugf("Storing token to cache") 108 | 109 | tokenJSON, err := json.Marshal(token) 110 | if nil != err { 111 | Log.Debugf("%v", err) 112 | return fmt.Errorf("Could not generate token.json content") 113 | } 114 | 115 | if err := ioutil.WriteFile(c.tokenPath, tokenJSON, 0644); nil != err { 116 | Log.Debugf("%v", err) 117 | return fmt.Errorf("Could not generate token.json file") 118 | } 119 | 120 | return nil 121 | } 122 | 123 | // GetObject gets an object by id 124 | func (c *Cache) GetObject(id string) (object *APIObject, err error) { 125 | Log.Tracef("Getting object %v", id) 126 | 127 | c.db.View(func(tx *bolt.Tx) error { 128 | object, err = boltGetObject(tx, id) 129 | return nil 130 | }) 131 | if nil != err { 132 | return nil, err 133 | } 134 | 135 | Log.Tracef("Got object from cache %v", object) 136 | return object, err 137 | } 138 | 139 | // GetObjectsByParent get all objects under parent id 140 | func (c *Cache) GetObjectsByParent(parent string) ([]*APIObject, error) { 141 | Log.Tracef("Getting children for %v", parent) 142 | 143 | objects := make([]*APIObject, 0) 144 | c.db.View(func(tx *bolt.Tx) error { 145 | cr := tx.Bucket(bParents).Cursor() 146 | 147 | // Iterate over all object ids stored under the parent in the index 148 | objectIds := make([]string, 0) 149 | prefix := []byte(parent + "/") 150 | for k, v := cr.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = cr.Next() { 151 | objectIds = append(objectIds, string(v)) 152 | } 153 | 154 | // Fetch all objects for the given ids 155 | for _, id := range objectIds { 156 | if object, err := boltGetObject(tx, id); nil == err { 157 | objects = append(objects, object) 158 | } 159 | } 160 | return nil 161 | }) 162 | 163 | Log.Tracef("Got objects from cache %v", objects) 164 | return objects, nil 165 | } 166 | 167 | // GetObjectByParentAndName finds a child element by name and its parent id 168 | func (c *Cache) GetObjectByParentAndName(parent, name string) (object *APIObject, err error) { 169 | Log.Tracef("Getting object %v in parent %v", name, parent) 170 | 171 | c.db.View(func(tx *bolt.Tx) error { 172 | // Look up object id in parent-name index 173 | b := tx.Bucket(bParents) 174 | v := b.Get([]byte(parent + "/" + name)) 175 | if nil == v { 176 | return nil 177 | } 178 | 179 | // Fetch object for given id 180 | object, err = boltGetObject(tx, string(v)) 181 | return nil 182 | }) 183 | if nil != err { 184 | return nil, err 185 | } 186 | 187 | if object == nil { 188 | return nil, fmt.Errorf("Could not find object with name %v in parent %v", name, parent) 189 | } 190 | 191 | Log.Tracef("Got object from cache %v", object) 192 | return object, nil 193 | } 194 | 195 | // DeleteObject deletes an object by id 196 | func (c *Cache) DeleteObject(id string) error { 197 | err := c.db.Update(func(tx *bolt.Tx) error { 198 | b := tx.Bucket(bObjects) 199 | object, _ := boltGetObject(tx, id) 200 | if nil == object { 201 | return nil 202 | } 203 | 204 | b.Delete([]byte(id)) 205 | 206 | // Remove object ids from the index 207 | b = tx.Bucket(bParents) 208 | for _, parent := range object.Parents { 209 | b.Delete([]byte(parent + "/" + object.Name)) 210 | } 211 | 212 | return nil 213 | }) 214 | if nil != err { 215 | Log.Debugf("%v", err) 216 | return fmt.Errorf("Could not delete object %v", id) 217 | } 218 | 219 | return nil 220 | } 221 | 222 | // UpdateObject updates an object 223 | func (c *Cache) UpdateObject(object *APIObject) error { 224 | err := c.db.Update(func(tx *bolt.Tx) error { 225 | return boltUpdateObject(tx, object) 226 | }) 227 | 228 | if nil != err { 229 | Log.Debugf("%v", err) 230 | return fmt.Errorf("Could not update/save object %v (%v)", object.ObjectID, object.Name) 231 | } 232 | 233 | return nil 234 | } 235 | 236 | func boltStoreObject(tx *bolt.Tx, object *APIObject) error { 237 | b := tx.Bucket(bObjects) 238 | v, err := json.Marshal(object) 239 | if nil != err { 240 | return err 241 | } 242 | return b.Put([]byte(object.ObjectID), v) 243 | } 244 | 245 | func boltGetObject(tx *bolt.Tx, id string) (*APIObject, error) { 246 | b := tx.Bucket(bObjects) 247 | v := b.Get([]byte(id)) 248 | if v == nil { 249 | return nil, fmt.Errorf("Could not find object %v in cache", id) 250 | } 251 | 252 | var object APIObject 253 | err := json.Unmarshal(v, &object) 254 | return &object, err 255 | } 256 | 257 | func boltUpdateObject(tx *bolt.Tx, object *APIObject) error { 258 | prev, _ := boltGetObject(tx, object.ObjectID) 259 | if nil != prev { 260 | // Remove object ids from the index 261 | b := tx.Bucket(bParents) 262 | for _, parent := range prev.Parents { 263 | b.Delete([]byte(parent + "/" + prev.Name)) 264 | } 265 | } 266 | 267 | if err := boltStoreObject(tx, object); nil != err { 268 | return err 269 | } 270 | 271 | // Store the object id by parent-name in the index 272 | b := tx.Bucket(bParents) 273 | for _, parent := range object.Parents { 274 | if err := b.Put([]byte(parent+"/"+object.Name), []byte(object.ObjectID)); nil != err { 275 | return err 276 | } 277 | } 278 | return nil 279 | } 280 | 281 | func (c *Cache) BatchUpdateObjects(objects []*APIObject) error { 282 | err := c.db.Update(func(tx *bolt.Tx) error { 283 | for _, object := range objects { 284 | if err := boltUpdateObject(tx, object); nil != err { 285 | return err 286 | } 287 | } 288 | return nil 289 | }) 290 | 291 | if nil != err { 292 | Log.Debugf("%v", err) 293 | return fmt.Errorf("Could not update/save objects: %v", err) 294 | } 295 | 296 | return nil 297 | } 298 | 299 | // StoreStartPageToken stores the page token for changes 300 | func (c *Cache) StoreStartPageToken(token string) error { 301 | Log.Debugf("Storing page token %v in cache", token) 302 | err := c.db.Update(func(tx *bolt.Tx) error { 303 | b := tx.Bucket(bPageToken) 304 | return b.Put([]byte("t"), []byte(token)) 305 | }) 306 | 307 | if nil != err { 308 | Log.Debugf("%v", err) 309 | return fmt.Errorf("Could not store token %v", token) 310 | } 311 | 312 | return nil 313 | } 314 | 315 | // GetStartPageToken gets the start page token 316 | func (c *Cache) GetStartPageToken() (string, error) { 317 | var pageToken string 318 | 319 | Log.Debugf("Getting start page token from cache") 320 | c.db.View(func(tx *bolt.Tx) error { 321 | b := tx.Bucket(bPageToken) 322 | v := b.Get([]byte("t")) 323 | pageToken = string(v) 324 | return nil 325 | }) 326 | if pageToken == "" { 327 | return "", fmt.Errorf("Could not get token from cache, token is empty") 328 | } 329 | 330 | Log.Tracef("Got start page token %v", pageToken) 331 | return pageToken, nil 332 | } 333 | -------------------------------------------------------------------------------- /drive/drive.go: -------------------------------------------------------------------------------- 1 | package drive 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "time" 8 | 9 | . "github.com/claudetech/loggo/default" 10 | "github.com/plexdrive/plexdrive/config" 11 | "golang.org/x/oauth2" 12 | gdrive "google.golang.org/api/drive/v3" 13 | "google.golang.org/api/googleapi" 14 | ) 15 | 16 | // Fields are the fields that should be returned by the Google Drive API 17 | var Fields string 18 | 19 | // init initializes the global configurations 20 | func init() { 21 | Fields = "id, name, mimeType, modifiedTime, size, explicitlyTrashed, parents, capabilities/canTrash" 22 | } 23 | 24 | // Client holds the Google Drive API connection(s) 25 | type Client struct { 26 | cache *Cache 27 | context context.Context 28 | token *oauth2.Token 29 | config *oauth2.Config 30 | rootNodeID string 31 | driveID string 32 | changesChecking bool 33 | } 34 | 35 | // NewClient creates a new Google Drive client 36 | func NewClient(config *config.Config, cache *Cache, refreshInterval time.Duration, rootNodeID string, driveID string) (*Client, error) { 37 | client := Client{ 38 | cache: cache, 39 | context: context.Background(), 40 | config: &oauth2.Config{ 41 | ClientID: config.ClientID, 42 | ClientSecret: config.ClientSecret, 43 | Endpoint: oauth2.Endpoint{ 44 | AuthURL: "https://accounts.google.com/o/oauth2/auth", 45 | TokenURL: "https://accounts.google.com/o/oauth2/token", 46 | }, 47 | RedirectURL: "urn:ietf:wg:oauth:2.0:oob", 48 | Scopes: []string{gdrive.DriveScope}, 49 | }, 50 | rootNodeID: rootNodeID, 51 | driveID: driveID, 52 | changesChecking: false, 53 | } 54 | 55 | if "" == client.rootNodeID { 56 | client.rootNodeID = "root" 57 | } 58 | if "" != client.driveID && client.rootNodeID == "root" { 59 | client.rootNodeID = client.driveID 60 | } 61 | 62 | if err := client.authorize(); nil != err { 63 | return nil, err 64 | } 65 | 66 | go client.startWatchChanges(refreshInterval) 67 | 68 | return &client, nil 69 | } 70 | 71 | func (d *Client) startWatchChanges(refreshInterval time.Duration) { 72 | d.checkChanges(true) 73 | for _ = range time.Tick(refreshInterval) { 74 | d.checkChanges(false) 75 | } 76 | } 77 | 78 | func (d *Client) checkChanges(firstCheck bool) { 79 | if d.changesChecking { 80 | return 81 | } 82 | d.changesChecking = true 83 | 84 | Log.Debugf("Checking for changes") 85 | 86 | client, err := d.getClient() 87 | if nil != err { 88 | Log.Debugf("%v", err) 89 | Log.Warningf("Could not get Google Drive client to watch for changes") 90 | return 91 | } 92 | 93 | // get the last token 94 | pageToken, err := d.cache.GetStartPageToken() 95 | if nil != err { 96 | pageToken = "1" 97 | Log.Info("No last change id found, starting from beginning...") 98 | } else { 99 | Log.Debugf("Last change id found, continuing getting changes (%v)", pageToken) 100 | } 101 | 102 | if firstCheck { 103 | Log.Infof("First cache build process started...") 104 | } 105 | 106 | deletedItems := 0 107 | updatedItems := 0 108 | processedItems := 0 109 | for { 110 | query := client.Changes. 111 | List(pageToken). 112 | Fields(googleapi.Field(fmt.Sprintf("nextPageToken, newStartPageToken, changes(changeType, removed, fileId, file(%v))", Fields))). 113 | PageSize(1000). 114 | SupportsAllDrives(true). 115 | IncludeItemsFromAllDrives(true). 116 | IncludeCorpusRemovals(true) 117 | 118 | if d.driveID != "" { 119 | query = query.TeamDriveId(d.driveID) 120 | } 121 | 122 | results, err := query.Do() 123 | if nil != err { 124 | Log.Debugf("%v", err) 125 | Log.Warningf("Could not get changes") 126 | break 127 | } 128 | 129 | objects := make([]*APIObject, 0) 130 | for _, change := range results.Changes { 131 | Log.Tracef("Change %v", change) 132 | // ignore changes for changeType drive 133 | if change.ChangeType != "file" { 134 | Log.Warningf("Ignoring change type %v", change.ChangeType) 135 | continue 136 | } 137 | 138 | if change.Removed || (nil != change.File && change.File.ExplicitlyTrashed) { 139 | if err := d.cache.DeleteObject(change.FileId); nil != err { 140 | Log.Tracef("%v", err) 141 | } 142 | deletedItems++ 143 | } else { 144 | object, err := d.mapFileToObject(change.File) 145 | if nil != err { 146 | Log.Debugf("%v", err) 147 | Log.Warningf("Could not map Google Drive file %v (%v) to object", change.File.Id, change.File.Name) 148 | } else { 149 | objects = append(objects, object) 150 | updatedItems++ 151 | } 152 | } 153 | 154 | processedItems++ 155 | } 156 | if err := d.cache.BatchUpdateObjects(objects); nil != err { 157 | Log.Warningf("%v", err) 158 | return 159 | } 160 | 161 | if processedItems > 0 { 162 | Log.Infof("Processed %v items / deleted %v items / updated %v items", 163 | processedItems, deletedItems, updatedItems) 164 | } 165 | 166 | if "" != results.NextPageToken { 167 | pageToken = results.NextPageToken 168 | d.cache.StoreStartPageToken(pageToken) 169 | } else { 170 | pageToken = results.NewStartPageToken 171 | d.cache.StoreStartPageToken(pageToken) 172 | break 173 | } 174 | } 175 | 176 | if firstCheck { 177 | Log.Infof("First cache build process finished!") 178 | } 179 | 180 | d.changesChecking = false 181 | } 182 | 183 | func (d *Client) authorize() error { 184 | Log.Debugf("Authorizing against Google Drive API") 185 | 186 | token, err := d.cache.LoadToken() 187 | if nil != err { 188 | Log.Debugf("Token could not be found, fetching new one") 189 | 190 | t, err := getTokenFromWeb(d.config) 191 | if nil != err { 192 | return err 193 | } 194 | token = t 195 | if err := d.cache.StoreToken(token); nil != err { 196 | return err 197 | } 198 | } 199 | 200 | d.token = token 201 | return nil 202 | } 203 | 204 | // getTokenFromWeb uses Config to request a Token. 205 | // It returns the retrieved Token. 206 | func getTokenFromWeb(config *oauth2.Config) (*oauth2.Token, error) { 207 | authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline) 208 | fmt.Printf("Go to the following link in your browser %v\n", authURL) 209 | fmt.Printf("Paste the authorization code: ") 210 | 211 | var code string 212 | if _, err := fmt.Scan(&code); err != nil { 213 | return nil, fmt.Errorf("Unable to read authorization code %v", err) 214 | } 215 | 216 | tok, err := config.Exchange(oauth2.NoContext, code) 217 | if err != nil { 218 | return nil, fmt.Errorf("Unable to retrieve token from web %v", err) 219 | } 220 | return tok, err 221 | } 222 | 223 | // getClient gets a new Google Drive client 224 | func (d *Client) getClient() (*gdrive.Service, error) { 225 | return gdrive.New(d.config.Client(d.context, d.token)) 226 | } 227 | 228 | // GetNativeClient gets a native http client 229 | func (d *Client) GetNativeClient() *http.Client { 230 | return oauth2.NewClient(d.context, d.config.TokenSource(d.context, d.token)) 231 | } 232 | 233 | // GetRoot gets the root node directly from the API 234 | func (d *Client) GetRoot() (*APIObject, error) { 235 | Log.Debugf("Getting root from API") 236 | 237 | client, err := d.getClient() 238 | if nil != err { 239 | Log.Debugf("%v", err) 240 | return nil, fmt.Errorf("Could not get Google Drive client") 241 | } 242 | 243 | file, err := client.Files. 244 | Get(d.rootNodeID). 245 | Fields(googleapi.Field(Fields)). 246 | SupportsAllDrives(true). 247 | Do() 248 | if nil != err { 249 | Log.Debugf("%v", err) 250 | return nil, fmt.Errorf("Could not get object %v from API", d.rootNodeID) 251 | } 252 | 253 | // getting file size 254 | if file.MimeType != "application/vnd.google-apps.folder" && 0 == file.Size { 255 | res, err := client.Files.Get(d.rootNodeID).SupportsAllDrives(true).Download() 256 | if nil != err { 257 | Log.Debugf("%v", err) 258 | return nil, fmt.Errorf("Could not get file size for object %v", d.rootNodeID) 259 | } 260 | file.Size = res.ContentLength 261 | } 262 | 263 | return d.mapFileToObject(file) 264 | } 265 | 266 | // GetObject gets an object by id 267 | func (d *Client) GetObject(id string) (*APIObject, error) { 268 | return d.cache.GetObject(id) 269 | } 270 | 271 | // GetObjectsByParent get all objects under parent id 272 | func (d *Client) GetObjectsByParent(parent string) ([]*APIObject, error) { 273 | return d.cache.GetObjectsByParent(parent) 274 | } 275 | 276 | // GetObjectByParentAndName finds a child element by name and its parent id 277 | func (d *Client) GetObjectByParentAndName(parent, name string) (*APIObject, error) { 278 | return d.cache.GetObjectByParentAndName(parent, name) 279 | } 280 | 281 | // Remove removes file from Google Drive 282 | func (d *Client) Remove(object *APIObject, parent string) error { 283 | client, err := d.getClient() 284 | if nil != err { 285 | Log.Debugf("%v", err) 286 | return fmt.Errorf("Could not get Google Drive client") 287 | } 288 | 289 | if err := d.cache.DeleteObject(object.ObjectID); nil != err { 290 | Log.Debugf("%v", err) 291 | return fmt.Errorf("Could not delete object %v (%v) from cache", object.ObjectID, object.Name) 292 | } 293 | 294 | go func() { 295 | if object.CanTrash { 296 | if _, err := client.Files.Update(object.ObjectID, &gdrive.File{Trashed: true}).SupportsAllDrives(true).Do(); nil != err { 297 | Log.Debugf("%v", err) 298 | Log.Warningf("Could not delete object %v (%v) from API", object.ObjectID, object.Name) 299 | d.cache.UpdateObject(object) 300 | } 301 | } else { 302 | if _, err := client.Files.Update(object.ObjectID, nil).RemoveParents(parent).SupportsAllDrives(true).Do(); nil != err { 303 | Log.Debugf("%v", err) 304 | Log.Warningf("Could not unsubscribe object %v (%v) from API", object.ObjectID, object.Name) 305 | d.cache.UpdateObject(object) 306 | } 307 | } 308 | }() 309 | 310 | return nil 311 | } 312 | 313 | // Mkdir creates a new directory in Google Drive 314 | func (d *Client) Mkdir(parent string, Name string) (*APIObject, error) { 315 | client, err := d.getClient() 316 | if nil != err { 317 | Log.Debugf("%v", err) 318 | return nil, fmt.Errorf("Could not get Google Drive client") 319 | } 320 | 321 | created, err := client.Files.Create(&gdrive.File{Name: Name, Parents: []string{parent}, MimeType: "application/vnd.google-apps.folder"}).SupportsAllDrives(true).Do() 322 | if nil != err { 323 | Log.Debugf("%v", err) 324 | return nil, fmt.Errorf("Could not create object(%v) from API", Name) 325 | } 326 | 327 | file, err := client.Files.Get(created.Id).Fields(googleapi.Field(Fields)).SupportsAllDrives(true).Do() 328 | if nil != err { 329 | Log.Debugf("%v", err) 330 | return nil, fmt.Errorf("Could not get object fields %v from API", created.Id) 331 | } 332 | 333 | Obj, err := d.mapFileToObject(file) 334 | if nil != err { 335 | Log.Debugf("%v", err) 336 | return nil, fmt.Errorf("Could not map file to object %v (%v)", file.Id, file.Name) 337 | } 338 | 339 | if err := d.cache.UpdateObject(Obj); nil != err { 340 | Log.Debugf("%v", err) 341 | return nil, fmt.Errorf("Could not create object %v (%v) from cache", Obj.ObjectID, Obj.Name) 342 | } 343 | 344 | return Obj, nil 345 | } 346 | 347 | // Rename renames file in Google Drive 348 | func (d *Client) Rename(object *APIObject, OldParent string, NewParent string, NewName string) error { 349 | client, err := d.getClient() 350 | if nil != err { 351 | Log.Debugf("%v", err) 352 | return fmt.Errorf("Could not get Google Drive client") 353 | } 354 | 355 | if _, err := client.Files.Update(object.ObjectID, &gdrive.File{Name: NewName}).RemoveParents(OldParent).AddParents(NewParent).SupportsAllDrives(true).Do(); nil != err { 356 | Log.Debugf("%v", err) 357 | return fmt.Errorf("Could not rename object %v (%v) from API", object.ObjectID, object.Name) 358 | } 359 | 360 | object.Name = NewName 361 | for i, p := range object.Parents { 362 | if p == OldParent { 363 | object.Parents = append(object.Parents[:i], object.Parents[i+1:]...) 364 | break 365 | } 366 | } 367 | object.Parents = append(object.Parents, NewParent) 368 | 369 | if err := d.cache.UpdateObject(object); nil != err { 370 | Log.Debugf("%v", err) 371 | return fmt.Errorf("Could not rename object %v (%v) from cache", object.ObjectID, object.Name) 372 | } 373 | 374 | return nil 375 | } 376 | 377 | // mapFileToObject maps a Google Drive file to APIObject 378 | func (d *Client) mapFileToObject(file *gdrive.File) (*APIObject, error) { 379 | Log.Tracef("Converting Google Drive file: %v", file) 380 | 381 | lastModified, err := time.Parse(time.RFC3339, file.ModifiedTime) 382 | if nil != err { 383 | Log.Debugf("%v", err) 384 | Log.Warningf("Could not parse last modified date for object %v (%v)", file.Id, file.Name) 385 | lastModified = time.Now() 386 | } 387 | 388 | var parents []string 389 | for _, parent := range file.Parents { 390 | parents = append(parents, parent) 391 | } 392 | 393 | return &APIObject{ 394 | ObjectID: file.Id, 395 | Name: file.Name, 396 | IsDir: file.MimeType == "application/vnd.google-apps.folder", 397 | LastModified: lastModified, 398 | Size: uint64(file.Size), 399 | DownloadURL: fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%v?alt=media", file.Id), 400 | Parents: parents, 401 | CanTrash: file.Capabilities.CanTrash, 402 | }, nil 403 | } 404 | -------------------------------------------------------------------------------- /logo/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/felixbuenemann/plexdrive/0330ec3ac0529e77c71fb5df93002f4bd8119e1d/logo/banner.png -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/user" 7 | "path/filepath" 8 | "strconv" 9 | 10 | "time" 11 | 12 | "strings" 13 | 14 | "syscall" 15 | 16 | "os/signal" 17 | 18 | "runtime" 19 | 20 | "github.com/claudetech/loggo" 21 | . "github.com/claudetech/loggo/default" 22 | "github.com/plexdrive/plexdrive/chunk" 23 | "github.com/plexdrive/plexdrive/config" 24 | "github.com/plexdrive/plexdrive/drive" 25 | "github.com/plexdrive/plexdrive/mount" 26 | flag "github.com/ogier/pflag" 27 | "golang.org/x/sys/unix" 28 | ) 29 | 30 | func main() { 31 | // Find users home directory 32 | usr, err := user.Current() 33 | home := "" 34 | if err != nil { 35 | // Fall back to reading $HOME - work around user.Current() not 36 | // working for cross compiled binaries on OSX or freebsd. 37 | // https://github.com/golang/go/issues/6376 38 | home = os.Getenv("HOME") 39 | if home == "" { 40 | panic(fmt.Sprintf("Could not read users homedir and HOME is not set: %v\n", err)) 41 | } 42 | } else { 43 | home = usr.HomeDir 44 | } 45 | 46 | // parse the command line arguments 47 | argLogLevel := flag.IntP("verbosity", "v", 0, "Set the log level (0 = error, 1 = warn, 2 = info, 3 = debug, 4 = trace)") 48 | argRootNodeID := flag.String("root-node-id", "root", "The ID of the root node to mount (use this for only mount a sub directory)") 49 | argDriveID := flag.String("drive-id", "", "The ID of the shared drive to mount (including team drives)") 50 | argConfigPath := flag.StringP("config", "c", filepath.Join(home, ".plexdrive"), "The path to the configuration directory") 51 | argCacheFile := flag.String("cache-file", filepath.Join(home, ".plexdrive", "cache.bolt"), "Path the the cache file") 52 | argChunkSize := flag.String("chunk-size", "10M", "The size of each chunk that is downloaded (units: B, K, M, G)") 53 | argChunkLoadThreads := flag.Int("chunk-load-threads", max(runtime.NumCPU()/2, 1), "The number of threads to use for downloading chunks") 54 | argChunkCheckThreads := flag.Int("chunk-check-threads", max(runtime.NumCPU()/2, 1), "The number of threads to use for checking chunk existence") 55 | argChunkLoadAhead := flag.Int("chunk-load-ahead", max(runtime.NumCPU()-1, 1), "The number of chunks that should be read ahead") 56 | argMaxChunks := flag.Int("max-chunks", runtime.NumCPU()*2, "The maximum number of chunks to be stored on disk") 57 | argRefreshInterval := flag.Duration("refresh-interval", 1*time.Minute, "The time to wait till checking for changes") 58 | argMountOptions := flag.StringP("fuse-options", "o", "", "Fuse mount options (e.g. -fuse-options allow_other,...)") 59 | argVersion := flag.Bool("version", false, "Displays program's version information") 60 | argUID := flag.Int64("uid", -1, "Set the mounts UID (-1 = default permissions)") 61 | argGID := flag.Int64("gid", -1, "Set the mounts GID (-1 = default permissions)") 62 | argUmask := flag.Uint32("umask", 0, "Override the default file permissions") 63 | // argDownloadSpeedLimit := flag.String("speed-limit", "", "This value limits the download speed, e.g. 5M = 5MB/s per chunk (units: B, K, M, G)") 64 | flag.Parse() 65 | 66 | // display version information 67 | if *argVersion { 68 | fmt.Println("%VERSION%") 69 | return 70 | } 71 | 72 | argCommand := flag.Arg(0) 73 | 74 | if argCommand == "mount" { 75 | // check if mountpoint is specified 76 | argMountPoint := flag.Arg(1) 77 | if "" == argMountPoint { 78 | flag.Usage() 79 | fmt.Println() 80 | panic(fmt.Errorf("Mountpoint not specified")) 81 | } 82 | 83 | // calculate uid / gid 84 | uid := uint32(unix.Geteuid()) 85 | gid := uint32(unix.Getegid()) 86 | if *argUID > -1 { 87 | uid = uint32(*argUID) 88 | } 89 | if *argGID > -1 { 90 | gid = uint32(*argGID) 91 | } 92 | 93 | // parse filemode 94 | umask := os.FileMode(*argUmask) 95 | 96 | // parse the mount options 97 | var mountOptions []string 98 | if "" != *argMountOptions { 99 | mountOptions = strings.Split(*argMountOptions, ",") 100 | } 101 | 102 | // initialize the logger with the specific log level 103 | var logLevel loggo.Level 104 | switch *argLogLevel { 105 | case 0: 106 | logLevel = loggo.Error 107 | case 1: 108 | logLevel = loggo.Warning 109 | case 2: 110 | logLevel = loggo.Info 111 | case 3: 112 | logLevel = loggo.Debug 113 | case 4: 114 | logLevel = loggo.Trace 115 | default: 116 | logLevel = loggo.Warning 117 | } 118 | Log.SetLevel(logLevel) 119 | 120 | // debug all given parameters 121 | Log.Debugf("verbosity : %v", logLevel) 122 | Log.Debugf("root-node-id : %v", *argRootNodeID) 123 | Log.Debugf("drive-id : %v", *argDriveID) 124 | Log.Debugf("config : %v", *argConfigPath) 125 | Log.Debugf("cache-file : %v", *argCacheFile) 126 | Log.Debugf("chunk-size : %v", *argChunkSize) 127 | Log.Debugf("chunk-load-threads : %v", *argChunkLoadThreads) 128 | Log.Debugf("chunk-check-threads : %v", *argChunkCheckThreads) 129 | Log.Debugf("chunk-load-ahead : %v", *argChunkLoadAhead) 130 | Log.Debugf("max-chunks : %v", *argMaxChunks) 131 | Log.Debugf("refresh-interval : %v", *argRefreshInterval) 132 | Log.Debugf("fuse-options : %v", *argMountOptions) 133 | Log.Debugf("UID : %v", uid) 134 | Log.Debugf("GID : %v", gid) 135 | Log.Debugf("umask : %v", umask) 136 | // Log.Debugf("speed-limit : %v", *argDownloadSpeedLimit) 137 | // version missing here 138 | 139 | // create all directories 140 | if err := os.MkdirAll(*argConfigPath, 0766); nil != err { 141 | Log.Errorf("Could not create configuration directory") 142 | Log.Debugf("%v", err) 143 | os.Exit(1) 144 | } 145 | if err := os.MkdirAll(filepath.Dir(*argCacheFile), 0766); nil != err { 146 | Log.Errorf("Could not create cache file directory") 147 | Log.Debugf("%v", err) 148 | os.Exit(1) 149 | } 150 | 151 | // set the global buffer configuration 152 | chunkSize, err := parseSizeArg(*argChunkSize) 153 | if nil != err { 154 | Log.Errorf("%v", err) 155 | os.Exit(2) 156 | } 157 | 158 | // read the configuration 159 | configPath := filepath.Join(*argConfigPath, "config.json") 160 | cfg, err := config.Read(configPath) 161 | if nil != err { 162 | cfg, err = config.Create(configPath) 163 | if nil != err { 164 | Log.Errorf("Could not read configuration") 165 | Log.Debugf("%v", err) 166 | os.Exit(3) 167 | } 168 | } 169 | 170 | cache, err := drive.NewCache(*argCacheFile, *argConfigPath, *argLogLevel > 3) 171 | if nil != err { 172 | Log.Errorf("%v", err) 173 | os.Exit(4) 174 | } 175 | defer cache.Close() 176 | 177 | client, err := drive.NewClient(cfg, cache, *argRefreshInterval, *argRootNodeID, *argDriveID) 178 | if nil != err { 179 | Log.Errorf("%v", err) 180 | os.Exit(4) 181 | } 182 | 183 | chunkManager, err := chunk.NewManager( 184 | chunkSize, 185 | *argChunkLoadAhead, 186 | *argChunkCheckThreads, 187 | *argChunkLoadThreads, 188 | client, 189 | *argMaxChunks) 190 | if nil != err { 191 | Log.Errorf("%v", err) 192 | os.Exit(4) 193 | } 194 | 195 | // check os signals like SIGINT/TERM 196 | checkOsSignals(argMountPoint) 197 | if err := mount.Mount(client, chunkManager, argMountPoint, mountOptions, uid, gid, umask); nil != err { 198 | Log.Debugf("%v", err) 199 | os.Exit(5) 200 | } 201 | } else { 202 | Log.Errorf("Command %v not found", argCommand) 203 | } 204 | } 205 | 206 | func checkOsSignals(mountpoint string) { 207 | signals := make(chan os.Signal, 1) 208 | signal.Notify(signals, syscall.SIGINT) 209 | 210 | go func() { 211 | for sig := range signals { 212 | if sig == syscall.SIGINT { 213 | if err := mount.Unmount(mountpoint, false); nil != err { 214 | Log.Warningf("%v", err) 215 | } 216 | } 217 | } 218 | }() 219 | } 220 | 221 | func max(x, y int) int { 222 | if x > y { 223 | return x 224 | } 225 | return y 226 | } 227 | 228 | func parseSizeArg(input string) (int64, error) { 229 | if "" == input { 230 | return 0, nil 231 | } 232 | 233 | suffix := input[len(input)-1] 234 | suffixLen := 1 235 | var multiplier float64 236 | switch suffix { 237 | case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': 238 | suffixLen = 0 239 | case 'b', 'B': 240 | multiplier = 1 241 | case 'k', 'K': 242 | multiplier = 1024 243 | case 'm', 'M': 244 | multiplier = 1024 * 1024 245 | case 'g', 'G': 246 | multiplier = 1024 * 1024 * 1024 247 | default: 248 | return 0, fmt.Errorf("Invalid unit %v for %v", suffix, input) 249 | } 250 | input = input[:len(input)-suffixLen] 251 | value, err := strconv.ParseFloat(input, 64) 252 | if nil != err { 253 | Log.Debugf("%v", err) 254 | return 0, fmt.Errorf("Could not parse numeric value %v", input) 255 | } 256 | if value < 0 { 257 | return 0, fmt.Errorf("Numeric value must not be negative %v", input) 258 | } 259 | value *= multiplier 260 | return int64(value), nil 261 | } 262 | -------------------------------------------------------------------------------- /mount/mount.go: -------------------------------------------------------------------------------- 1 | package mount 2 | 3 | import ( 4 | "os" 5 | 6 | "fmt" 7 | 8 | "strings" 9 | 10 | "strconv" 11 | 12 | "bazil.org/fuse" 13 | "bazil.org/fuse/fs" 14 | . "github.com/claudetech/loggo/default" 15 | "github.com/plexdrive/plexdrive/chunk" 16 | "github.com/plexdrive/plexdrive/drive" 17 | "golang.org/x/net/context" 18 | ) 19 | 20 | // Mount the fuse volume 21 | func Mount( 22 | client *drive.Client, 23 | chunkManager *chunk.Manager, 24 | mountpoint string, 25 | mountOptions []string, 26 | uid, gid uint32, 27 | umask os.FileMode) error { 28 | 29 | Log.Infof("Mounting path %v", mountpoint) 30 | 31 | if _, err := os.Stat(mountpoint); os.IsNotExist(err) { 32 | Log.Debugf("Mountpoint doesn't exist, creating...") 33 | if err := os.MkdirAll(mountpoint, 0644); nil != err { 34 | Log.Debugf("%v", err) 35 | return fmt.Errorf("Could not create mount directory %v", mountpoint) 36 | } 37 | } 38 | 39 | fuse.Debug = func(msg interface{}) { 40 | Log.Tracef("FUSE %v", msg) 41 | } 42 | 43 | // Set mount options 44 | options := []fuse.MountOption{ 45 | fuse.NoAppleDouble(), 46 | fuse.NoAppleXattr(), 47 | } 48 | for _, option := range mountOptions { 49 | if "allow_other" == option { 50 | options = append(options, fuse.AllowOther()) 51 | } else if "allow_root" == option { 52 | return fmt.Errorf("The allow_root mount option is no longer supported") 53 | } else if "allow_dev" == option { 54 | options = append(options, fuse.AllowDev()) 55 | } else if "allow_non_empty_mount" == option { 56 | options = append(options, fuse.AllowNonEmptyMount()) 57 | } else if "allow_suid" == option { 58 | options = append(options, fuse.AllowSUID()) 59 | } else if strings.Contains(option, "max_readahead=") { 60 | data := strings.Split(option, "=") 61 | value, err := strconv.ParseUint(data[1], 10, 32) 62 | if nil != err { 63 | Log.Debugf("%v", err) 64 | return fmt.Errorf("Could not parse max_readahead value") 65 | } 66 | options = append(options, fuse.MaxReadahead(uint32(value))) 67 | } else if "default_permissions" == option { 68 | options = append(options, fuse.DefaultPermissions()) 69 | } else if "excl_create" == option { 70 | options = append(options, fuse.ExclCreate()) 71 | } else if strings.Contains(option, "fs_name") { 72 | data := strings.Split(option, "=") 73 | options = append(options, fuse.FSName(data[1])) 74 | } else if "local_volume" == option { 75 | options = append(options, fuse.LocalVolume()) 76 | } else if "writeback_cache" == option { 77 | options = append(options, fuse.WritebackCache()) 78 | } else if strings.Contains(option, "volume_name") { 79 | data := strings.Split(option, "=") 80 | options = append(options, fuse.VolumeName(data[1])) 81 | } else if "read_only" == option { 82 | options = append(options, fuse.ReadOnly()) 83 | } else { 84 | Log.Warningf("Fuse option %v is not supported, yet", option) 85 | } 86 | } 87 | 88 | c, err := fuse.Mount(mountpoint, options...) 89 | if err != nil { 90 | return err 91 | } 92 | defer c.Close() 93 | 94 | filesys := &FS{ 95 | client: client, 96 | chunkManager: chunkManager, 97 | uid: uid, 98 | gid: gid, 99 | umask: umask, 100 | } 101 | if err := fs.Serve(c, filesys); err != nil { 102 | return err 103 | } 104 | 105 | // check if the mount process has an error to report 106 | <-c.Ready 107 | if err := c.MountError; nil != err { 108 | Log.Debugf("%v", err) 109 | return fmt.Errorf("Error mounting FUSE") 110 | } 111 | 112 | return Unmount(mountpoint, true) 113 | } 114 | 115 | // Unmount unmounts the mountpoint 116 | func Unmount(mountpoint string, notify bool) error { 117 | if notify { 118 | Log.Infof("Unmounting path %v", mountpoint) 119 | } 120 | fuse.Unmount(mountpoint) 121 | return nil 122 | } 123 | 124 | // FS the fuse filesystem 125 | type FS struct { 126 | client *drive.Client 127 | chunkManager *chunk.Manager 128 | uid uint32 129 | gid uint32 130 | umask os.FileMode 131 | } 132 | 133 | // Root returns the root path 134 | func (f *FS) Root() (fs.Node, error) { 135 | object, err := f.client.GetRoot() 136 | if nil != err { 137 | Log.Warningf("%v", err) 138 | return nil, fmt.Errorf("Could not get root object") 139 | } 140 | return &Object{ 141 | client: f.client, 142 | chunkManager: f.chunkManager, 143 | object: object, 144 | uid: f.uid, 145 | gid: f.gid, 146 | umask: f.umask, 147 | }, nil 148 | } 149 | 150 | // Object represents one drive object 151 | type Object struct { 152 | client *drive.Client 153 | chunkManager *chunk.Manager 154 | object *drive.APIObject 155 | uid uint32 156 | gid uint32 157 | umask os.FileMode 158 | } 159 | 160 | // Attr returns the attributes for a directory 161 | func (o *Object) Attr(ctx context.Context, attr *fuse.Attr) error { 162 | if o.object.IsDir { 163 | if o.umask > 0 { 164 | attr.Mode = os.ModeDir | o.umask 165 | } else { 166 | attr.Mode = os.ModeDir | 0755 167 | } 168 | attr.Size = 0 169 | } else { 170 | if o.umask > 0 { 171 | attr.Mode = o.umask 172 | } else { 173 | attr.Mode = 0644 174 | } 175 | attr.Size = o.object.Size 176 | } 177 | 178 | attr.Uid = uint32(o.uid) 179 | attr.Gid = uint32(o.gid) 180 | 181 | attr.Mtime = o.object.LastModified 182 | attr.Crtime = o.object.LastModified 183 | attr.Ctime = o.object.LastModified 184 | 185 | attr.Blocks = (attr.Size + 511) / 512 186 | 187 | return nil 188 | } 189 | 190 | // ReadDirAll shows all files in the current directory 191 | func (o *Object) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { 192 | objects, err := o.client.GetObjectsByParent(o.object.ObjectID) 193 | if nil != err { 194 | Log.Debugf("%v", err) 195 | return nil, fuse.ENOENT 196 | } 197 | 198 | dirs := []fuse.Dirent{} 199 | for _, object := range objects { 200 | if object.IsDir { 201 | dirs = append(dirs, fuse.Dirent{ 202 | Name: object.Name, 203 | Type: fuse.DT_Dir, 204 | }) 205 | } else { 206 | dirs = append(dirs, fuse.Dirent{ 207 | Name: object.Name, 208 | Type: fuse.DT_File, 209 | }) 210 | } 211 | } 212 | return dirs, nil 213 | } 214 | 215 | // Lookup tests if a file is existent in the current directory 216 | func (o *Object) Lookup(ctx context.Context, name string) (fs.Node, error) { 217 | object, err := o.client.GetObjectByParentAndName(o.object.ObjectID, name) 218 | if nil != err { 219 | Log.Tracef("%v", err) 220 | return nil, fuse.ENOENT 221 | } 222 | 223 | return &Object{ 224 | client: o.client, 225 | chunkManager: o.chunkManager, 226 | object: object, 227 | uid: o.uid, 228 | gid: o.gid, 229 | umask: o.umask, 230 | }, nil 231 | } 232 | 233 | // Read reads some bytes or the whole file 234 | func (o *Object) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { 235 | data, err := o.chunkManager.GetChunk(o.object, req.Offset, int64(req.Size)) 236 | if nil != err { 237 | Log.Warningf("%v", err) 238 | return fuse.EIO 239 | } 240 | 241 | resp.Data = data 242 | return nil 243 | } 244 | 245 | // Remove deletes an element 246 | func (o *Object) Remove(ctx context.Context, req *fuse.RemoveRequest) error { 247 | obj, err := o.client.GetObjectByParentAndName(o.object.ObjectID, req.Name) 248 | if nil != err { 249 | Log.Warningf("%v", err) 250 | return fuse.EIO 251 | } 252 | 253 | err = o.client.Remove(obj, o.object.ObjectID) 254 | if nil != err { 255 | Log.Warningf("%v", err) 256 | return fuse.EIO 257 | } 258 | 259 | return nil 260 | } 261 | 262 | // Mkdir creates a new directory 263 | func (o *Object) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { 264 | newObj, err := o.client.Mkdir(o.object.ObjectID, req.Name) 265 | if nil != err { 266 | Log.Warningf("%v", err) 267 | return nil, fuse.EIO 268 | } 269 | 270 | return &Object{ 271 | client: o.client, 272 | object: newObj, 273 | uid: o.uid, 274 | gid: o.gid, 275 | umask: o.umask, 276 | }, nil 277 | } 278 | 279 | // Rename renames an element 280 | func (o *Object) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { 281 | obj, err := o.client.GetObjectByParentAndName(o.object.ObjectID, req.OldName) 282 | if nil != err { 283 | Log.Warningf("%v", err) 284 | return fuse.EIO 285 | } 286 | 287 | destDir, ok := newDir.(*Object) 288 | if !ok { 289 | Log.Warningf("%v", err) 290 | return fuse.EIO 291 | } 292 | 293 | err = o.client.Rename(obj, o.object.ObjectID, destDir.object.ObjectID, req.NewName) 294 | if nil != err { 295 | Log.Warningf("%v", err) 296 | return fuse.EIO 297 | } 298 | 299 | return nil 300 | } 301 | --------------------------------------------------------------------------------