├── .gitignore ├── .gobuilder.yml ├── LICENSE ├── README.md ├── cmd └── syncthingfuse │ ├── addresslister.go │ ├── config.go │ ├── fuse.go │ ├── gui.go │ ├── gui_test.go │ ├── locations.go │ ├── main.go │ ├── random.go │ ├── tls.go │ └── usage.go ├── gui ├── css │ └── icon-addon.css ├── index.html ├── js │ ├── app.js │ ├── core │ │ ├── core.js │ │ └── module.js │ ├── device │ │ ├── editDeviceModalDirective.js │ │ ├── editDeviceModalView.html │ │ ├── editSettingsModalDirective.js │ │ ├── editSettingsModalView.html │ │ └── module.js │ ├── folder │ │ ├── editFolderModalDirective.js │ │ ├── editFolderModalView.html │ │ └── module.js │ └── pins │ │ ├── editPinsModalDirective.js │ │ ├── editPinsModalView.html │ │ └── module.js └── vendor │ ├── angular-1.4.7 │ └── angular.min.js │ └── jquery-2.1.4.min.js ├── lib ├── autogenerated │ └── gui.files.go ├── config │ ├── config.go │ ├── converter.go │ ├── debug.go │ └── wrapper.go ├── fileblockcache │ ├── debug.go │ ├── fileblockcache.go │ └── fileblockcache_test.go ├── filetreecache │ ├── debug.go │ └── filetreecache.go └── model │ ├── debug.go │ ├── model.go │ └── model_test.go ├── old-status.md ├── pre-commit-hook └── scripts └── packassets.go /.gitignore: -------------------------------------------------------------------------------- 1 | private.md 2 | 3 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 4 | *.o 5 | *.a 6 | *.so 7 | 8 | # Folders 9 | _obj 10 | _test 11 | 12 | # Architecture specific extensions/prefixes 13 | *.[568vq] 14 | [568vq].out 15 | 16 | *.cgo1.go 17 | *.cgo2.c 18 | _cgo_defun.c 19 | _cgo_gotypes.go 20 | _cgo_export.* 21 | 22 | _testmain.go 23 | 24 | *.exe 25 | *.test 26 | *.prof 27 | 28 | syncthingfuse 29 | -------------------------------------------------------------------------------- /.gobuilder.yml: -------------------------------------------------------------------------------- 1 | build_matrix: 2 | osx/amd64: 3 | osx/386: 4 | linux/amd64: 5 | linux/386: 6 | readme_file: README.md 7 | 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Mozilla Public License, version 2.0 2 | 3 | 1. Definitions 4 | 5 | 1.1. "Contributor" 6 | 7 | means each individual or legal entity that creates, contributes to the 8 | creation of, or owns Covered Software. 9 | 10 | 1.2. "Contributor Version" 11 | 12 | means the combination of the Contributions of others (if any) used by a 13 | Contributor and that particular Contributor's Contribution. 14 | 15 | 1.3. "Contribution" 16 | 17 | means Covered Software of a particular Contributor. 18 | 19 | 1.4. "Covered Software" 20 | 21 | means Source Code Form to which the initial Contributor has attached the 22 | notice in Exhibit A, the Executable Form of such Source Code Form, and 23 | Modifications of such Source Code Form, in each case including portions 24 | thereof. 25 | 26 | 1.5. "Incompatible With Secondary Licenses" 27 | means 28 | 29 | a. that the initial Contributor has attached the notice described in 30 | Exhibit B to the Covered Software; or 31 | 32 | b. that the Covered Software was made available under the terms of 33 | version 1.1 or earlier of the License, but not also under the terms of 34 | a Secondary License. 35 | 36 | 1.6. "Executable Form" 37 | 38 | means any form of the work other than Source Code Form. 39 | 40 | 1.7. "Larger Work" 41 | 42 | means a work that combines Covered Software with other material, in a 43 | separate file or files, that is not Covered Software. 44 | 45 | 1.8. "License" 46 | 47 | means this document. 48 | 49 | 1.9. "Licensable" 50 | 51 | means having the right to grant, to the maximum extent possible, whether 52 | at the time of the initial grant or subsequently, any and all of the 53 | rights conveyed by this License. 54 | 55 | 1.10. "Modifications" 56 | 57 | means any of the following: 58 | 59 | a. any file in Source Code Form that results from an addition to, 60 | deletion from, or modification of the contents of Covered Software; or 61 | 62 | b. any new file in Source Code Form that contains any Covered Software. 63 | 64 | 1.11. "Patent Claims" of a Contributor 65 | 66 | means any patent claim(s), including without limitation, method, 67 | process, and apparatus claims, in any patent Licensable by such 68 | Contributor that would be infringed, but for the grant of the License, 69 | by the making, using, selling, offering for sale, having made, import, 70 | or transfer of either its Contributions or its Contributor Version. 71 | 72 | 1.12. "Secondary License" 73 | 74 | means either the GNU General Public License, Version 2.0, the GNU Lesser 75 | General Public License, Version 2.1, the GNU Affero General Public 76 | License, Version 3.0, or any later versions of those licenses. 77 | 78 | 1.13. "Source Code Form" 79 | 80 | means the form of the work preferred for making modifications. 81 | 82 | 1.14. "You" (or "Your") 83 | 84 | means an individual or a legal entity exercising rights under this 85 | License. For legal entities, "You" includes any entity that controls, is 86 | controlled by, or is under common control with You. For purposes of this 87 | definition, "control" means (a) the power, direct or indirect, to cause 88 | the direction or management of such entity, whether by contract or 89 | otherwise, or (b) ownership of more than fifty percent (50%) of the 90 | outstanding shares or beneficial ownership of such entity. 91 | 92 | 93 | 2. License Grants and Conditions 94 | 95 | 2.1. Grants 96 | 97 | Each Contributor hereby grants You a world-wide, royalty-free, 98 | non-exclusive license: 99 | 100 | a. under intellectual property rights (other than patent or trademark) 101 | Licensable by such Contributor to use, reproduce, make available, 102 | modify, display, perform, distribute, and otherwise exploit its 103 | Contributions, either on an unmodified basis, with Modifications, or 104 | as part of a Larger Work; and 105 | 106 | b. under Patent Claims of such Contributor to make, use, sell, offer for 107 | sale, have made, import, and otherwise transfer either its 108 | Contributions or its Contributor Version. 109 | 110 | 2.2. Effective Date 111 | 112 | The licenses granted in Section 2.1 with respect to any Contribution 113 | become effective for each Contribution on the date the Contributor first 114 | distributes such Contribution. 115 | 116 | 2.3. Limitations on Grant Scope 117 | 118 | The licenses granted in this Section 2 are the only rights granted under 119 | this License. No additional rights or licenses will be implied from the 120 | distribution or licensing of Covered Software under this License. 121 | Notwithstanding Section 2.1(b) above, no patent license is granted by a 122 | Contributor: 123 | 124 | a. for any code that a Contributor has removed from Covered Software; or 125 | 126 | b. for infringements caused by: (i) Your and any other third party's 127 | modifications of Covered Software, or (ii) the combination of its 128 | Contributions with other software (except as part of its Contributor 129 | Version); or 130 | 131 | c. under Patent Claims infringed by Covered Software in the absence of 132 | its Contributions. 133 | 134 | This License does not grant any rights in the trademarks, service marks, 135 | or logos of any Contributor (except as may be necessary to comply with 136 | the notice requirements in Section 3.4). 137 | 138 | 2.4. Subsequent Licenses 139 | 140 | No Contributor makes additional grants as a result of Your choice to 141 | distribute the Covered Software under a subsequent version of this 142 | License (see Section 10.2) or under the terms of a Secondary License (if 143 | permitted under the terms of Section 3.3). 144 | 145 | 2.5. Representation 146 | 147 | Each Contributor represents that the Contributor believes its 148 | Contributions are its original creation(s) or it has sufficient rights to 149 | grant the rights to its Contributions conveyed by this License. 150 | 151 | 2.6. Fair Use 152 | 153 | This License is not intended to limit any rights You have under 154 | applicable copyright doctrines of fair use, fair dealing, or other 155 | equivalents. 156 | 157 | 2.7. Conditions 158 | 159 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in 160 | Section 2.1. 161 | 162 | 163 | 3. Responsibilities 164 | 165 | 3.1. Distribution of Source Form 166 | 167 | All distribution of Covered Software in Source Code Form, including any 168 | Modifications that You create or to which You contribute, must be under 169 | the terms of this License. You must inform recipients that the Source 170 | Code Form of the Covered Software is governed by the terms of this 171 | License, and how they can obtain a copy of this License. You may not 172 | attempt to alter or restrict the recipients' rights in the Source Code 173 | Form. 174 | 175 | 3.2. Distribution of Executable Form 176 | 177 | If You distribute Covered Software in Executable Form then: 178 | 179 | a. such Covered Software must also be made available in Source Code Form, 180 | as described in Section 3.1, and You must inform recipients of the 181 | Executable Form how they can obtain a copy of such Source Code Form by 182 | reasonable means in a timely manner, at a charge no more than the cost 183 | of distribution to the recipient; and 184 | 185 | b. You may distribute such Executable Form under the terms of this 186 | License, or sublicense it under different terms, provided that the 187 | license for the Executable Form does not attempt to limit or alter the 188 | recipients' rights in the Source Code Form under this License. 189 | 190 | 3.3. Distribution of a Larger Work 191 | 192 | You may create and distribute a Larger Work under terms of Your choice, 193 | provided that You also comply with the requirements of this License for 194 | the Covered Software. If the Larger Work is a combination of Covered 195 | Software with a work governed by one or more Secondary Licenses, and the 196 | Covered Software is not Incompatible With Secondary Licenses, this 197 | License permits You to additionally distribute such Covered Software 198 | under the terms of such Secondary License(s), so that the recipient of 199 | the Larger Work may, at their option, further distribute the Covered 200 | Software under the terms of either this License or such Secondary 201 | License(s). 202 | 203 | 3.4. Notices 204 | 205 | You may not remove or alter the substance of any license notices 206 | (including copyright notices, patent notices, disclaimers of warranty, or 207 | limitations of liability) contained within the Source Code Form of the 208 | Covered Software, except that You may alter any license notices to the 209 | extent required to remedy known factual inaccuracies. 210 | 211 | 3.5. Application of Additional Terms 212 | 213 | You may choose to offer, and to charge a fee for, warranty, support, 214 | indemnity or liability obligations to one or more recipients of Covered 215 | Software. However, You may do so only on Your own behalf, and not on 216 | behalf of any Contributor. You must make it absolutely clear that any 217 | such warranty, support, indemnity, or liability obligation is offered by 218 | You alone, and You hereby agree to indemnify every Contributor for any 219 | liability incurred by such Contributor as a result of warranty, support, 220 | indemnity or liability terms You offer. You may include additional 221 | disclaimers of warranty and limitations of liability specific to any 222 | jurisdiction. 223 | 224 | 4. Inability to Comply Due to Statute or Regulation 225 | 226 | If it is impossible for You to comply with any of the terms of this License 227 | with respect to some or all of the Covered Software due to statute, 228 | judicial order, or regulation then You must: (a) comply with the terms of 229 | this License to the maximum extent possible; and (b) describe the 230 | limitations and the code they affect. Such description must be placed in a 231 | text file included with all distributions of the Covered Software under 232 | this License. Except to the extent prohibited by statute or regulation, 233 | such description must be sufficiently detailed for a recipient of ordinary 234 | skill to be able to understand it. 235 | 236 | 5. Termination 237 | 238 | 5.1. The rights granted under this License will terminate automatically if You 239 | fail to comply with any of its terms. However, if You become compliant, 240 | then the rights granted under this License from a particular Contributor 241 | are reinstated (a) provisionally, unless and until such Contributor 242 | explicitly and finally terminates Your grants, and (b) on an ongoing 243 | basis, if such Contributor fails to notify You of the non-compliance by 244 | some reasonable means prior to 60 days after You have come back into 245 | compliance. Moreover, Your grants from a particular Contributor are 246 | reinstated on an ongoing basis if such Contributor notifies You of the 247 | non-compliance by some reasonable means, this is the first time You have 248 | received notice of non-compliance with this License from such 249 | Contributor, and You become compliant prior to 30 days after Your receipt 250 | of the notice. 251 | 252 | 5.2. If You initiate litigation against any entity by asserting a patent 253 | infringement claim (excluding declaratory judgment actions, 254 | counter-claims, and cross-claims) alleging that a Contributor Version 255 | directly or indirectly infringes any patent, then the rights granted to 256 | You by any and all Contributors for the Covered Software under Section 257 | 2.1 of this License shall terminate. 258 | 259 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user 260 | license agreements (excluding distributors and resellers) which have been 261 | validly granted by You or Your distributors under this License prior to 262 | termination shall survive termination. 263 | 264 | 6. Disclaimer of Warranty 265 | 266 | Covered Software is provided under this License on an "as is" basis, 267 | without warranty of any kind, either expressed, implied, or statutory, 268 | including, without limitation, warranties that the Covered Software is free 269 | of defects, merchantable, fit for a particular purpose or non-infringing. 270 | The entire risk as to the quality and performance of the Covered Software 271 | is with You. Should any Covered Software prove defective in any respect, 272 | You (not any Contributor) assume the cost of any necessary servicing, 273 | repair, or correction. This disclaimer of warranty constitutes an essential 274 | part of this License. No use of any Covered Software is authorized under 275 | this License except under this disclaimer. 276 | 277 | 7. Limitation of Liability 278 | 279 | Under no circumstances and under no legal theory, whether tort (including 280 | negligence), contract, or otherwise, shall any Contributor, or anyone who 281 | distributes Covered Software as permitted above, be liable to You for any 282 | direct, indirect, special, incidental, or consequential damages of any 283 | character including, without limitation, damages for lost profits, loss of 284 | goodwill, work stoppage, computer failure or malfunction, or any and all 285 | other commercial damages or losses, even if such party shall have been 286 | informed of the possibility of such damages. This limitation of liability 287 | shall not apply to liability for death or personal injury resulting from 288 | such party's negligence to the extent applicable law prohibits such 289 | limitation. Some jurisdictions do not allow the exclusion or limitation of 290 | incidental or consequential damages, so this exclusion and limitation may 291 | not apply to You. 292 | 293 | 8. Litigation 294 | 295 | Any litigation relating to this License may be brought only in the courts 296 | of a jurisdiction where the defendant maintains its principal place of 297 | business and such litigation shall be governed by laws of that 298 | jurisdiction, without reference to its conflict-of-law provisions. Nothing 299 | in this Section shall prevent a party's ability to bring cross-claims or 300 | counter-claims. 301 | 302 | 9. Miscellaneous 303 | 304 | This License represents the complete agreement concerning the subject 305 | matter hereof. If any provision of this License is held to be 306 | unenforceable, such provision shall be reformed only to the extent 307 | necessary to make it enforceable. Any law or regulation which provides that 308 | the language of a contract shall be construed against the drafter shall not 309 | be used to construe this License against a Contributor. 310 | 311 | 312 | 10. Versions of the License 313 | 314 | 10.1. New Versions 315 | 316 | Mozilla Foundation is the license steward. Except as provided in Section 317 | 10.3, no one other than the license steward has the right to modify or 318 | publish new versions of this License. Each version will be given a 319 | distinguishing version number. 320 | 321 | 10.2. Effect of New Versions 322 | 323 | You may distribute the Covered Software under the terms of the version 324 | of the License under which You originally received the Covered Software, 325 | or under the terms of any subsequent version published by the license 326 | steward. 327 | 328 | 10.3. Modified Versions 329 | 330 | If you create software not governed by this License, and you want to 331 | create a new license for such software, you may create and use a 332 | modified version of this License if you rename the license and remove 333 | any references to the name of the license steward (except to note that 334 | such modified license differs from this License). 335 | 336 | 10.4. Distributing Source Code Form that is Incompatible With Secondary 337 | Licenses If You choose to distribute Source Code Form that is 338 | Incompatible With Secondary Licenses under the terms of this version of 339 | the License, the notice described in Exhibit B of this License must be 340 | attached. 341 | 342 | Exhibit A - Source Code Form License Notice 343 | 344 | This Source Code Form is subject to the 345 | terms of the Mozilla Public License, v. 346 | 2.0. If a copy of the MPL was not 347 | distributed with this file, You can 348 | obtain one at 349 | http://mozilla.org/MPL/2.0/. 350 | 351 | If it is not possible or desirable to put the notice in a particular file, 352 | then You may include the notice in a location (such as a LICENSE file in a 353 | relevant directory) where a recipient would be likely to look for such a 354 | notice. 355 | 356 | You may add additional accurate notices of copyright ownership. 357 | 358 | Exhibit B - "Incompatible With Secondary Licenses" Notice 359 | 360 | This Source Code Form is "Incompatible 361 | With Secondary Licenses", as defined by 362 | the Mozilla Public License, v. 2.0. 363 | 364 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | SyncthingFUSE 2 | ============= 3 | 4 | SyncthingFUSE allows you to see all of the files in shared [Syncthing](https://syncthing.net) folders, but only stores a fixed amount of data locally. 5 | 6 | When you open a file, the contents are served from a local cache, if possible. If the contents are not in the cache, then SyncthingFUSE asks peers for the contents and adds them to the cache. If no peers are currently available for the file, then opening the file will fail. 7 | 8 | This is particularly useful if you have a Syncthing device with a lot of data that you want access to, but don't have room for all of on a device. For example, you may have a large collection of photos on a desktop running Syncthing at home. Your laptop's hard drive may not be large enough to hold all of the photos. Running SyncthingFUSE on the laptop, you will see all of your photos. As you view the photos on your laptop, they'll be read from the local cache or pulled from home. The local cache will not grow larger than a fixed size, though. 9 | 10 | SyncthingFUSE is available on OS X and Linux. 11 | 12 | SynthingFUSE is currently read-only. You can browse and view files but cannot write or modify them. (Supporting writes appears possible, but no one has put in the development effort, yet.) 13 | 14 | _SyncthingFUSE is currently an early release. Since it's currently read-only, it poses a low threat to damaging your computer or Syncthing folders. There is some risk, however, and you assume all of that yourself._ 15 | 16 | Getting Started 17 | =============== 18 | 19 | SyncthingFUSE follows many patterns of Syncthing, so you should be familiar with it before starting. Additionally, SyncthingFUSE requires at least one device running Syncthing. 20 | 21 | To get started, grab a [release](https://github.com/burkemw3/syncthingfuse/releases) for your operating system, unzip it, and run it. When you start the `syncthingfuse` binary, it will set itself up with some defaults and start. ([OSXFUSE](https://osxfuse.github.io/) may be required on OS X, if you don't have it already.) 22 | 23 | By default, a configuration UI is available in a browser at `http://127.0.0.1:8385` (If the default port is taken, check the output of the startup for the line `API listening on`). Upon visiting, you will see a UI similar (albeit uglier) to Syncthing. On the left are folders that are configured, and on the right are devices. 24 | 25 | Add devices and folders through the UI and restart SyncthingFUSE for the changes to take effect. Folders have a default cache size of 512 MiB, configurable through the UI. You'll also need to add the SyncthingFUSE device to your Syncthing devices. 26 | 27 | By default, a mount point called "SyncthingFUSE" will be created in your home directory. After SyncthingFUSE connects to other Syncthing devices, you will be able to browse folder contents through this mount point. 28 | 29 | SyncthingFUSE will appear as "Syncing (0%)" when connected in Syncthing devices. This looks strange but is expected. 30 | 31 | Syncthing Compatibility 32 | ======================= 33 | 34 | Supports: 35 | 36 | - connecting with Syncthing instances, including: 37 | - local and global discovery 38 | - relays 39 | 40 | Does not support: 41 | 42 | - accurate reporting of status: SyncthingFUSE will always appear as 0% synced on Syncthing devices 43 | - symlink files 44 | - UPnP 45 | - introducers: additional peers will not be added automatically 46 | - responding to read requests from other peers 47 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/addresslister.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2015 The Syncthing Authors. 2 | // 3 | // This Source Code Form is subject to the terms of the Mozilla Public 4 | // License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 | // You can obtain one at http://mozilla.org/MPL/2.0/. 6 | 7 | package main 8 | 9 | import ( 10 | "net" 11 | "net/url" 12 | 13 | "github.com/syncthing/syncthing/lib/config" 14 | ) 15 | 16 | type addressLister struct { 17 | cfg *config.Wrapper 18 | } 19 | 20 | func newAddressLister(cfg *config.Wrapper) *addressLister { 21 | return &addressLister{ 22 | cfg: cfg, 23 | } 24 | } 25 | 26 | // ExternalAddresses returns a list of addresses that are our best guess for 27 | // where we are reachable from the outside. As a special case, we may return 28 | // one or more addresses with an empty IP address (0.0.0.0 or ::) and just 29 | // port number - this means that the outside address of a NAT gateway should 30 | // be substituted. 31 | func (e *addressLister) ExternalAddresses() []string { 32 | return e.addresses(false) 33 | } 34 | 35 | // AllAddresses returns a list of addresses that are our best guess for where 36 | // we are reachable from the local network. Same conditions as 37 | // ExternalAddresses, but private IPv4 addresses are included. 38 | func (e *addressLister) AllAddresses() []string { 39 | return e.addresses(true) 40 | } 41 | 42 | func (e *addressLister) addresses(includePrivateIPV4 bool) []string { 43 | var addrs []string 44 | 45 | // Grab our listen addresses from the config. Unspecified ones are passed 46 | // on verbatim (to be interpreted by a global discovery server or local 47 | // discovery peer). Public addresses are passed on verbatim. Private 48 | // addresses are filtered. 49 | for _, addrStr := range e.cfg.Options().ListenAddresses { 50 | addrURL, err := url.Parse(addrStr) 51 | if err != nil { 52 | l.Infoln("Listen address", addrStr, "is invalid:", err) 53 | continue 54 | } 55 | addr, err := net.ResolveTCPAddr("tcp", addrURL.Host) 56 | if err != nil { 57 | l.Infoln("Listen address", addrStr, "is invalid:", err) 58 | continue 59 | } 60 | 61 | if addr.IP == nil || addr.IP.IsUnspecified() { 62 | // Address like 0.0.0.0:22000 or [::]:22000 or :22000; include as is. 63 | addrs = append(addrs, tcpAddr(addr.String())) 64 | } else if isPublicIPv4(addr.IP) || isPublicIPv6(addr.IP) { 65 | // A public address; include as is. 66 | addrs = append(addrs, tcpAddr(addr.String())) 67 | } else if includePrivateIPV4 && addr.IP.To4().IsGlobalUnicast() { 68 | // A private IPv4 address. 69 | addrs = append(addrs, tcpAddr(addr.String())) 70 | } 71 | } 72 | 73 | return addrs 74 | } 75 | 76 | func isPublicIPv4(ip net.IP) bool { 77 | ip = ip.To4() 78 | if ip == nil { 79 | // Not an IPv4 address (IPv6) 80 | return false 81 | } 82 | 83 | // IsGlobalUnicast below only checks that it's not link local or 84 | // multicast, and we want to exclude private (NAT:ed) addresses as well. 85 | rfc1918 := []net.IPNet{ 86 | {IP: net.IP{10, 0, 0, 0}, Mask: net.IPMask{255, 0, 0, 0}}, 87 | {IP: net.IP{172, 16, 0, 0}, Mask: net.IPMask{255, 240, 0, 0}}, 88 | {IP: net.IP{192, 168, 0, 0}, Mask: net.IPMask{255, 255, 0, 0}}, 89 | } 90 | for _, n := range rfc1918 { 91 | if n.Contains(ip) { 92 | return false 93 | } 94 | } 95 | 96 | return ip.IsGlobalUnicast() 97 | } 98 | 99 | func isPublicIPv6(ip net.IP) bool { 100 | if ip.To4() != nil { 101 | // Not an IPv6 address (IPv4) 102 | // (To16() returns a v6 mapped v4 address so can't be used to check 103 | // that it's an actual v6 address) 104 | return false 105 | } 106 | 107 | return ip.IsGlobalUnicast() 108 | } 109 | 110 | func tcpAddr(host string) string { 111 | u := url.URL{ 112 | Scheme: "tcp", 113 | Host: host, 114 | } 115 | return u.String() 116 | } 117 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "os" 7 | 8 | "github.com/burkemw3/syncthingfuse/lib/config" 9 | "github.com/syncthing/syncthing/lib/osutil" 10 | ) 11 | 12 | func getConfiguration() *config.Wrapper { 13 | cfgFile := locations[locConfigFile] 14 | 15 | // Load the configuration file, if it exists. If it does not, create a template. 16 | if info, err := os.Stat(cfgFile); err == nil { 17 | if !info.Mode().IsRegular() { 18 | l.Fatalln("Config file is not a file?") 19 | } 20 | cfg, err = config.Load(cfgFile, myID) 21 | if err != nil { 22 | l.Fatalln("Configuration:", err) 23 | } 24 | } else { 25 | l.Infoln("No config file; starting with empty defaults") 26 | myName, _ := os.Hostname() 27 | newCfg := defaultConfig(myName) 28 | cfg = config.Wrap(cfgFile, newCfg) 29 | cfg.Save() 30 | l.Infof("Edit %s to taste or use the GUI\n", cfgFile) 31 | } 32 | 33 | return cfg 34 | } 35 | 36 | func ensureDir(dir string, mode int) { 37 | fi, err := os.Stat(dir) 38 | if os.IsNotExist(err) { 39 | err := osutil.MkdirAll(dir, 0700) 40 | if err != nil { 41 | l.Fatalln(err) 42 | } 43 | } else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode { 44 | err := os.Chmod(dir, os.FileMode(mode)) 45 | // This can fail on crappy filesystems, nothing we can do about it. 46 | if err != nil { 47 | l.Warnln(err) 48 | } 49 | } 50 | } 51 | 52 | func defaultConfig(myName string) config.Configuration { 53 | newCfg := config.New(myID, myName) 54 | 55 | port, err := getFreePort("0.0.0.0", 22000) 56 | if err != nil { 57 | l.Fatalln("get free port (BEP):", err) 58 | } 59 | newCfg.Options.ListenAddress = []string{fmt.Sprintf("tcp://0.0.0.0:%d", port)} 60 | 61 | return newCfg 62 | } 63 | 64 | // getFreePort returns a free TCP port fort listening on. The ports given are 65 | // tried in succession and the first to succeed is returned. If none succeed, 66 | // a random high port is returned. 67 | func getFreePort(host string, ports ...int) (int, error) { 68 | for _, port := range ports { 69 | c, err := net.Listen("tcp", fmt.Sprintf("%s:%d", host, port)) 70 | if err == nil { 71 | c.Close() 72 | return port, nil 73 | } 74 | } 75 | 76 | c, err := net.Listen("tcp", host+":0") 77 | if err != nil { 78 | return 0, err 79 | } 80 | addr := c.Addr().(*net.TCPAddr) 81 | c.Close() 82 | return addr.Port, nil 83 | } 84 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/fuse.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "flag" 6 | "fmt" 7 | "os" 8 | "os/exec" 9 | "os/signal" 10 | "path" 11 | "path/filepath" 12 | "runtime" 13 | "strings" 14 | "syscall" 15 | "time" 16 | 17 | "bazil.org/fuse" 18 | "bazil.org/fuse/fs" 19 | "github.com/burkemw3/syncthingfuse/lib/model" 20 | "github.com/thejerf/suture" 21 | "golang.org/x/net/context" 22 | ) 23 | 24 | var Usage = func() { 25 | fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) 26 | fmt.Fprintf(os.Stderr, " %s MOUNTPOINT\n", os.Args[0]) 27 | flag.PrintDefaults() 28 | } 29 | 30 | func MountFuse(mountpoint string, m *model.Model, mainSvc suture.Service) { 31 | c, err := fuse.Mount( 32 | mountpoint, 33 | fuse.FSName("syncthingfuse"), 34 | fuse.Subtype("syncthingfuse"), 35 | fuse.LocalVolume(), 36 | fuse.VolumeName("Syncthing FUSE"), 37 | ) 38 | if err != nil { 39 | l.Warnln(err) 40 | } 41 | 42 | sigc := make(chan os.Signal, 1) 43 | signal.Notify(sigc, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT) 44 | 45 | doneServe := make(chan error, 1) 46 | go func() { 47 | doneServe <- fs.Serve(c, FS{m: m}) 48 | }() 49 | 50 | select { 51 | case err := <-doneServe: 52 | l.Infoln("conn.Serve returned", err) 53 | 54 | // check if the mount process has an error to report 55 | <-c.Ready 56 | if err := c.MountError; err != nil { 57 | l.Warnln("conn.MountError:", err) 58 | } 59 | case sig := <-sigc: 60 | l.Infoln("Signal", sig, "received, shutting down.") 61 | } 62 | 63 | mainSvc.Stop() 64 | 65 | l.Infoln("Unmounting...") 66 | err = Unmount(mountpoint) 67 | if err == nil { 68 | l.Infoln("Unmounted") 69 | } else { 70 | l.Infoln("Unmount failed:", err) 71 | } 72 | } 73 | 74 | var ( 75 | debugFuse = strings.Contains(os.Getenv("STTRACE"), "fuse") || os.Getenv("STTRACE") == "all" 76 | ) 77 | 78 | type FS struct { 79 | m *model.Model 80 | } 81 | 82 | func (fs FS) Root() (fs.Node, error) { 83 | if debugFuse { 84 | l.Debugln("Root") 85 | } 86 | return STFolder{m: fs.m}, nil 87 | } 88 | 89 | type STFolder struct { 90 | m *model.Model 91 | } 92 | 93 | func (stf STFolder) Attr(ctx context.Context, a *fuse.Attr) error { 94 | if debugFuse { 95 | l.Debugln("stf Attr") 96 | } 97 | a.Mode = os.ModeDir | 0555 98 | return nil 99 | } 100 | 101 | func (stf STFolder) Lookup(ctx context.Context, folderName string) (fs.Node, error) { 102 | if debugFuse { 103 | l.Debugln("STF Lookup folder", folderName) 104 | } 105 | 106 | if stf.m.HasFolder(folderName) { 107 | return Dir{ 108 | folder: folderName, 109 | m: stf.m, 110 | }, nil 111 | } 112 | 113 | return Dir{}, fuse.ENOENT 114 | } 115 | 116 | func (stf STFolder) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { 117 | if debugFuse { 118 | l.Debugln("ReadDirAll stf") 119 | } 120 | 121 | entries := stf.m.GetFolders() 122 | result := make([]fuse.Dirent, len(entries)) 123 | for i, entry := range entries { 124 | result[i] = fuse.Dirent{ 125 | Name: entry, 126 | Type: fuse.DT_Dir, 127 | } 128 | } 129 | 130 | return result, nil 131 | } 132 | 133 | // Dir implements both Node and Handle for the root directory. 134 | type Dir struct { 135 | path string 136 | folder string 137 | m *model.Model 138 | } 139 | 140 | func (d Dir) Attr(ctx context.Context, a *fuse.Attr) error { 141 | if debugFuse { 142 | l.Debugln("Dir Attr folder", d.folder, "path", d.path) 143 | } 144 | 145 | entry, _ := d.m.GetEntry(d.folder, d.path) 146 | 147 | // TODO assert directory? 148 | 149 | a.Mode = os.ModeDir | 0555 150 | a.Mtime = time.Unix(entry.ModifiedS, 0) 151 | return nil 152 | } 153 | 154 | func (d Dir) Lookup(ctx context.Context, name string) (fs.Node, error) { 155 | if debugFuse { 156 | l.Debugln("Dir Lookup folder", d.folder, "path", d.path, "for", name) 157 | } 158 | entry, found := d.m.GetEntry(d.folder, filepath.Join(d.path, name)) 159 | 160 | if false == found { 161 | return nil, fuse.ENOENT 162 | } 163 | 164 | var node fs.Node 165 | if entry.IsDirectory() { 166 | node = Dir{ 167 | path: entry.Name, 168 | folder: d.folder, 169 | m: d.m, 170 | } 171 | } else { 172 | node = File{ 173 | path: entry.Name, 174 | folder: d.folder, 175 | m: d.m, 176 | } 177 | } 178 | 179 | return node, nil 180 | } 181 | 182 | func (d Dir) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { 183 | if debugFuse { 184 | l.Debugln("ReadDirAll", d.path) 185 | } 186 | 187 | p := path.Clean(d.path) 188 | 189 | entries := d.m.GetChildren(d.folder, p) 190 | result := make([]fuse.Dirent, len(entries)) 191 | for i, entry := range entries { 192 | eType := fuse.DT_File 193 | if entry.IsDirectory() { 194 | eType = fuse.DT_Dir 195 | } 196 | result[i] = fuse.Dirent{ 197 | Name: path.Base(entry.Name), 198 | Type: eType, 199 | } 200 | } 201 | 202 | return result, nil 203 | } 204 | 205 | // File implements both Node and Handle for the hello file. 206 | type File struct { 207 | path string 208 | folder string 209 | m *model.Model 210 | } 211 | 212 | func (f File) Attr(ctx context.Context, a *fuse.Attr) error { 213 | entry, found := f.m.GetEntry(f.folder, f.path) 214 | 215 | // TODO assert file? 216 | 217 | if false == found { 218 | return fuse.ENOENT 219 | } 220 | 221 | a.Mode = 0444 222 | a.Mtime = time.Unix(entry.ModifiedS, 0) 223 | a.Size = uint64(entry.Size) 224 | return nil 225 | } 226 | 227 | func (f File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { 228 | data, err := f.m.GetFileData(f.folder, f.path, req.Offset, req.Size) 229 | 230 | if err != nil { 231 | return err 232 | } 233 | 234 | resp.Data = data 235 | 236 | return err 237 | } 238 | 239 | // Unmount attempts to unmount the provided FUSE mount point, forcibly 240 | // if necessary. 241 | func Unmount(point string) error { 242 | var cmd *exec.Cmd 243 | switch runtime.GOOS { 244 | case "darwin": 245 | cmd = exec.Command("/usr/sbin/diskutil", "umount", "force", point) 246 | case "linux": 247 | cmd = exec.Command("fusermount", "-u", point) 248 | default: 249 | return errors.New("unmount: unimplemented") 250 | } 251 | 252 | errc := make(chan error, 1) 253 | go func() { 254 | if err := exec.Command("umount", point).Run(); err == nil { 255 | errc <- err 256 | } 257 | // retry to unmount with the fallback cmd 258 | errc <- cmd.Run() 259 | }() 260 | select { 261 | case <-time.After(10 * time.Second): 262 | return errors.New("umount timeout") 263 | case err := <-errc: 264 | return err 265 | } 266 | } 267 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/gui.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "crypto/tls" 7 | "encoding/json" 8 | "fmt" 9 | "io/ioutil" 10 | "mime" 11 | "net" 12 | "net/http" 13 | "os" 14 | "path/filepath" 15 | "strings" 16 | "time" 17 | 18 | "github.com/burkemw3/syncthingfuse/lib/autogenerated" 19 | "github.com/burkemw3/syncthingfuse/lib/config" 20 | "github.com/burkemw3/syncthingfuse/lib/model" 21 | human "github.com/dustin/go-humanize" 22 | "github.com/syncthing/syncthing/lib/protocol" 23 | "github.com/syncthing/syncthing/lib/sync" 24 | "github.com/syncthing/syncthing/lib/tlsutil" 25 | ) 26 | 27 | var ( 28 | guiAssets = os.Getenv("STGUIASSETS") 29 | ) 30 | 31 | type apiSvc struct { 32 | id protocol.DeviceID 33 | cfg *config.Wrapper 34 | model *model.Model 35 | assetDir string 36 | listener net.Listener 37 | stop chan struct{} 38 | configInSync bool 39 | systemConfigMut sync.Mutex 40 | } 41 | 42 | func newAPISvc(id protocol.DeviceID, cfg *config.Wrapper, model *model.Model) (*apiSvc, error) { 43 | if guiAssets == "" { 44 | guiAssets = locations[locGUIAssets] 45 | } 46 | 47 | svc := &apiSvc{ 48 | id: id, 49 | cfg: cfg, 50 | model: model, 51 | assetDir: guiAssets, 52 | systemConfigMut: sync.NewMutex(), 53 | configInSync: true, 54 | } 55 | 56 | var err error 57 | svc.listener, err = svc.getListener() 58 | return svc, err 59 | } 60 | 61 | func (s *apiSvc) getListener() (net.Listener, error) { 62 | cert, err := tls.LoadX509KeyPair(locations[locHTTPSCertFile], locations[locHTTPSKeyFile]) 63 | if err != nil { 64 | l.Infoln("Loading HTTPS certificate:", err) 65 | l.Infoln("Creating new HTTPS certificate") 66 | 67 | // When generating the HTTPS certificate, use the system host name per 68 | // default. If that isn't available, use the "syncthing" default. 69 | var name string 70 | name, err = os.Hostname() 71 | if err != nil { 72 | name = tlsDefaultCommonName 73 | } 74 | 75 | cert, err = tlsutil.NewCertificate(locations[locHTTPSCertFile], locations[locHTTPSKeyFile], name, tlsRSABits) 76 | } 77 | if err != nil { 78 | return nil, err 79 | } 80 | tlsCfg := &tls.Config{ 81 | Certificates: []tls.Certificate{cert}, 82 | MinVersion: tls.VersionTLS10, // No SSLv3 83 | CipherSuites: []uint16{ 84 | // No RC4 85 | tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 86 | tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 87 | tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 88 | tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 89 | tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 90 | tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 91 | tls.TLS_RSA_WITH_AES_128_CBC_SHA, 92 | tls.TLS_RSA_WITH_AES_256_CBC_SHA, 93 | tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 94 | tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, 95 | }, 96 | } 97 | 98 | rawListener, err := net.Listen("tcp", s.cfg.Raw().GUI.RawAddress) 99 | if err != nil { 100 | return nil, err 101 | } 102 | 103 | listener := &tlsutil.DowngradingListener{rawListener, tlsCfg} 104 | return listener, nil 105 | } 106 | 107 | func (s *apiSvc) getMux() *http.ServeMux { 108 | mux := http.NewServeMux() 109 | 110 | getApiMux := http.NewServeMux() 111 | getApiMux.HandleFunc("/api/system/config", s.getSystemConfig) 112 | getApiMux.HandleFunc("/api/system/config/insync", s.getSystemConfigInSync) 113 | getApiMux.HandleFunc("/api/system/connections", s.getSystemConnections) 114 | getApiMux.HandleFunc("/api/system/pins/status", s.getPinStatus) 115 | getApiMux.HandleFunc("/api/verify/deviceid", s.getDeviceID) // id 116 | getApiMux.HandleFunc("/api/db/browse", s.getDBBrowse) // folderID pathPrefix 117 | 118 | postApiMux := http.NewServeMux() 119 | postApiMux.HandleFunc("/api/system/config", s.postSystemConfig) // 120 | postApiMux.HandleFunc("/api/verify/humansize", s.postVerifyHumanSize) // 121 | 122 | apiMux := getMethodHandler(getApiMux, postApiMux) 123 | mux.Handle("/api/", apiMux) 124 | 125 | // Serve compiled in assets unless an asset directory was set (for development) 126 | mux.Handle("/", embeddedStatic{ 127 | assetDir: s.assetDir, 128 | assets: autogenerated.Assets(), 129 | }) 130 | 131 | return mux 132 | } 133 | 134 | func (s *apiSvc) Serve() { 135 | s.stop = make(chan struct{}) 136 | 137 | srv := http.Server{ 138 | Handler: s.getMux(), 139 | ReadTimeout: 10 * time.Second, 140 | } 141 | 142 | l.Infoln("API listening on", s.listener.Addr()) 143 | err := srv.Serve(s.listener) 144 | 145 | // The return could be due to an intentional close. Wait for the stop 146 | // signal before returning. IF there is no stop signal within a second, we 147 | // assume it was unintentional and log the error before retrying. 148 | select { 149 | case <-s.stop: 150 | case <-time.After(time.Second): 151 | l.Warnln("API:", err) 152 | } 153 | } 154 | 155 | func getMethodHandler(get, post http.Handler) http.Handler { 156 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 157 | switch r.Method { 158 | case "GET": 159 | get.ServeHTTP(w, r) 160 | case "POST": 161 | post.ServeHTTP(w, r) 162 | default: 163 | http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) 164 | } 165 | }) 166 | } 167 | 168 | func (s *apiSvc) Stop() { 169 | close(s.stop) 170 | s.listener.Close() 171 | } 172 | 173 | func (s *apiSvc) String() string { 174 | return fmt.Sprintf("apiSvc@%p", s) 175 | } 176 | 177 | func (s *apiSvc) getSystemConfig(w http.ResponseWriter, r *http.Request) { 178 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 179 | json.NewEncoder(w).Encode(s.cfg.Raw()) 180 | } 181 | 182 | func (s *apiSvc) getSystemConfigInSync(w http.ResponseWriter, r *http.Request) { 183 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 184 | json.NewEncoder(w).Encode(s.configInSync) 185 | } 186 | 187 | func (s *apiSvc) getSystemConnections(w http.ResponseWriter, r *http.Request) { 188 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 189 | json.NewEncoder(w).Encode(s.model.GetConnections()) 190 | } 191 | 192 | func (s *apiSvc) getPinStatus(w http.ResponseWriter, r *http.Request) { 193 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 194 | json.NewEncoder(w).Encode(s.model.GetPinsStatusByFolder()) 195 | } 196 | 197 | func (s *apiSvc) getDeviceID(w http.ResponseWriter, r *http.Request) { 198 | qs := r.URL.Query() 199 | idStr := qs.Get("id") 200 | id, err := protocol.DeviceIDFromString(idStr) 201 | w.Header().Set("Content-Type", "application/json; charset=utf-8") 202 | if err == nil { 203 | json.NewEncoder(w).Encode(map[string]string{ 204 | "id": id.String(), 205 | }) 206 | } else { 207 | json.NewEncoder(w).Encode(map[string]string{ 208 | "error": err.Error(), 209 | }) 210 | } 211 | } 212 | 213 | func (s *apiSvc) getDBBrowse(w http.ResponseWriter, r *http.Request) { 214 | qs := r.URL.Query() 215 | folderID := qs.Get("folderID") 216 | pathPrefix := qs.Get("pathPrefix") 217 | 218 | paths := s.model.GetPathsMatchingPrefix(folderID, pathPrefix) 219 | 220 | json.NewEncoder(w).Encode(paths) 221 | } 222 | 223 | func (s *apiSvc) postSystemConfig(w http.ResponseWriter, r *http.Request) { 224 | s.systemConfigMut.Lock() 225 | defer s.systemConfigMut.Unlock() 226 | 227 | // deserialize 228 | var to config.Configuration 229 | err := json.NewDecoder(r.Body).Decode(&to) 230 | if err != nil { 231 | l.Warnln("decoding posted config:", err) 232 | http.Error(w, err.Error(), 500) 233 | return 234 | } 235 | 236 | // Activate and save 237 | err = s.cfg.Replace(to) 238 | s.configInSync = false 239 | if err != nil { 240 | http.Error(w, err.Error(), 400) 241 | return 242 | } 243 | s.cfg.Save() 244 | } 245 | 246 | func (s *apiSvc) postVerifyHumanSize(w http.ResponseWriter, r *http.Request) { 247 | b, err := ioutil.ReadAll(r.Body) 248 | if err != nil { 249 | http.Error(w, "Error reading body"+err.Error(), 500) 250 | return 251 | } 252 | 253 | _, err = human.ParseBytes(string(b)) 254 | if err != nil { 255 | http.Error(w, "Cannot parse size"+err.Error(), 500) 256 | return 257 | } 258 | return 259 | } 260 | 261 | type embeddedStatic struct { 262 | assetDir string 263 | assets map[string][]byte 264 | } 265 | 266 | func (s embeddedStatic) ServeHTTP(w http.ResponseWriter, r *http.Request) { 267 | file := r.URL.Path 268 | 269 | if file[0] == '/' { 270 | file = file[1:] 271 | } 272 | 273 | if len(file) == 0 { 274 | file = "index.html" 275 | } 276 | 277 | if s.assetDir != "" { 278 | p := filepath.Join(s.assetDir, filepath.FromSlash(file)) 279 | _, err := os.Stat(p) 280 | if err == nil { 281 | http.ServeFile(w, r, p) 282 | return 283 | } 284 | } 285 | 286 | bs, ok := s.assets[file] 287 | if !ok { 288 | http.NotFound(w, r) 289 | return 290 | } 291 | 292 | if r.Header.Get("If-Modified-Since") == autogenerated.AssetsBuildDate { 293 | w.WriteHeader(http.StatusNotModified) 294 | return 295 | } 296 | 297 | mtype := s.mimeTypeForFile(file) 298 | if len(mtype) != 0 { 299 | w.Header().Set("Content-Type", mtype) 300 | } 301 | if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { 302 | w.Header().Set("Content-Encoding", "gzip") 303 | } else { 304 | // ungzip if browser not send gzip accepted header 305 | var gr *gzip.Reader 306 | gr, _ = gzip.NewReader(bytes.NewReader(bs)) 307 | bs, _ = ioutil.ReadAll(gr) 308 | gr.Close() 309 | } 310 | w.Header().Set("Content-Length", fmt.Sprintf("%d", len(bs))) 311 | w.Header().Set("Last-Modified", autogenerated.AssetsBuildDate) 312 | w.Header().Set("Cache-Control", "public") 313 | 314 | w.Write(bs) 315 | } 316 | 317 | func (s embeddedStatic) mimeTypeForFile(file string) string { 318 | // We use a built in table of the common types since the system 319 | // TypeByExtension might be unreliable. But if we don't know, we delegate 320 | // to the system. 321 | ext := filepath.Ext(file) 322 | switch ext { 323 | case ".htm", ".html": 324 | return "text/html" 325 | case ".css": 326 | return "text/css" 327 | case ".js": 328 | return "application/javascript" 329 | case ".json": 330 | return "application/json" 331 | case ".png": 332 | return "image/png" 333 | case ".ttf": 334 | return "application/x-font-ttf" 335 | case ".woff": 336 | return "application/x-font-woff" 337 | case ".svg": 338 | return "image/svg+xml" 339 | default: 340 | return mime.TypeByExtension(ext) 341 | } 342 | } 343 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/gui_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "strings" 7 | "testing" 8 | ) 9 | 10 | func TestHumanSizeVerifications(t *testing.T) { 11 | // Arrange 12 | var api apiSvc 13 | mux := api.getMux() 14 | server := httptest.NewServer(mux) 15 | defer server.Close() 16 | 17 | // Act 18 | assertHumanSizeVerification(t, server, "512 MiB", true) 19 | assertHumanSizeVerification(t, server, "102 MiB", true) 20 | assertHumanSizeVerification(t, server, "102", true) 21 | 22 | assertHumanSizeVerification(t, server, "MiB", false) 23 | assertHumanSizeVerification(t, server, "foobar", false) 24 | assertHumanSizeVerification(t, server, "512m MB", false) 25 | } 26 | 27 | func assertHumanSizeVerification(t *testing.T, server *httptest.Server, input string, success bool) { 28 | // Act 29 | resp, err := http.Post(server.URL+"/api/verify/humansize", "application/x-www-form-urlencoded", strings.NewReader(input)) 30 | 31 | // Assert 32 | if success { 33 | if err != nil { 34 | t.Error(input, err) 35 | } 36 | if resp.StatusCode != 200 { 37 | t.Errorf(input+"Received non-200 response: %d\n", resp.StatusCode) 38 | } 39 | } else { 40 | if resp.StatusCode != 500 { 41 | t.Errorf(input+" Received non-500 response: %d\n", resp.StatusCode) 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/locations.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "runtime" 7 | "strings" 8 | 9 | "github.com/syncthing/syncthing/lib/osutil" 10 | ) 11 | 12 | type locationEnum string 13 | 14 | // Use strings as keys to make printout and serialization of the locations map 15 | // more meaningful. 16 | const ( 17 | locConfigFile locationEnum = "config" 18 | locCertFile = "certFile" 19 | locKeyFile = "keyFile" 20 | locHTTPSCertFile = "httpsCertFile" 21 | locHTTPSKeyFile = "httpsKeyFile" 22 | locDatabase = "database" 23 | locLogFile = "logFile" 24 | locCsrfTokens = "csrfTokens" 25 | locPanicLog = "panicLog" 26 | locAuditLog = "auditLog" 27 | locGUIAssets = "GUIAssets" 28 | locDefFolder = "defFolder" 29 | ) 30 | 31 | // Platform dependent directories 32 | var baseDirs = map[string]string{ 33 | "config": defaultConfigDir(), // Overridden by -home flag 34 | "home": homeDir(), // User's home directory, *not* -home flag 35 | } 36 | 37 | // Use the variables from baseDirs here 38 | var locations = map[locationEnum]string{ 39 | locConfigFile: "${config}/config.xml", 40 | locCertFile: "${config}/cert.pem", 41 | locKeyFile: "${config}/key.pem", 42 | locHTTPSCertFile: "${config}/https-cert.pem", 43 | locHTTPSKeyFile: "${config}/https-key.pem", 44 | locDatabase: "${config}/index-v0.11.0.db", 45 | locLogFile: "${config}/syncthing.log", // -logfile on Windows 46 | locCsrfTokens: "${config}/csrftokens.txt", 47 | locPanicLog: "${config}/panic-${timestamp}.log", 48 | locAuditLog: "${config}/audit-${timestamp}.log", 49 | locGUIAssets: "${config}/gui", 50 | locDefFolder: "${home}/Sync", 51 | } 52 | 53 | // expandLocations replaces the variables in the location map with actual 54 | // directory locations. 55 | func expandLocations() error { 56 | for key, dir := range locations { 57 | for varName, value := range baseDirs { 58 | dir = strings.Replace(dir, "${"+varName+"}", value, -1) 59 | } 60 | var err error 61 | dir, err = osutil.ExpandTilde(dir) 62 | if err != nil { 63 | return err 64 | } 65 | locations[key] = dir 66 | } 67 | return nil 68 | } 69 | 70 | // defaultConfigDir returns the default configuration directory, as figured 71 | // out by various the environment variables present on each platform, or dies 72 | // trying. 73 | func defaultConfigDir() string { 74 | switch runtime.GOOS { 75 | case "darwin": 76 | dir, err := osutil.ExpandTilde("~/Library/Application Support/SyncthingFUSE") 77 | if err != nil { 78 | l.Fatalln(err) 79 | } 80 | return dir 81 | case "linux": 82 | if xdgCfg := os.Getenv("XDG_CONFIG_HOME"); xdgCfg != "" { 83 | return filepath.Join(xdgCfg, "syncthing") 84 | } 85 | dir, err := osutil.ExpandTilde("~/.config/syncthingfuse") 86 | if err != nil { 87 | l.Fatalln(err) 88 | } 89 | return dir 90 | 91 | default: 92 | l.Fatalln("Only OS X and Linux supported right now!") 93 | } 94 | 95 | return "nil" 96 | } 97 | 98 | // homeDir returns the user's home directory, or dies trying. 99 | func homeDir() string { 100 | home, err := osutil.ExpandTilde("~") 101 | if err != nil { 102 | l.Fatalln(err) 103 | } 104 | return home 105 | } 106 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/tls" 5 | "flag" 6 | "fmt" 7 | "net" 8 | "os" 9 | "path" 10 | "time" 11 | 12 | "github.com/boltdb/bolt" 13 | "github.com/burkemw3/syncthingfuse/lib/config" 14 | "github.com/burkemw3/syncthingfuse/lib/model" 15 | "github.com/calmh/logger" 16 | "github.com/syncthing/syncthing/lib/connections" 17 | "github.com/syncthing/syncthing/lib/discover" 18 | "github.com/syncthing/syncthing/lib/osutil" 19 | "github.com/syncthing/syncthing/lib/protocol" 20 | "github.com/thejerf/suture" 21 | ) 22 | 23 | var ( 24 | Version = "unknown-dev" 25 | LongVersion = Version 26 | ) 27 | 28 | var ( 29 | cfg *config.Wrapper 30 | myID protocol.DeviceID 31 | confDir string 32 | stop = make(chan int) 33 | cert tls.Certificate 34 | lans []*net.IPNet 35 | m *model.Model 36 | ) 37 | 38 | const ( 39 | bepProtocolName = "bep/1.0" 40 | ) 41 | 42 | var l = logger.DefaultLogger 43 | 44 | // Command line and environment options 45 | var ( 46 | showVersion bool 47 | ) 48 | 49 | const ( 50 | usage = "syncthingfuse [options]" 51 | extraUsage = ` 52 | The default configuration directory is: 53 | 54 | %s 55 | 56 | ` 57 | ) 58 | 59 | // The discovery results are sorted by their source priority. 60 | const ( 61 | ipv6LocalDiscoveryPriority = iota 62 | ipv4LocalDiscoveryPriority 63 | globalDiscoveryPriority 64 | ) 65 | 66 | func main() { 67 | flag.BoolVar(&showVersion, "version", false, "Show version") 68 | 69 | flag.Usage = usageFor(flag.CommandLine, usage, fmt.Sprintf(extraUsage, baseDirs["config"])) 70 | flag.Parse() 71 | 72 | if showVersion { 73 | fmt.Println(Version) 74 | return 75 | } 76 | 77 | if err := expandLocations(); err != nil { 78 | l.Fatalln(err) 79 | } 80 | 81 | // Ensure that our home directory exists. 82 | ensureDir(baseDirs["config"], 0700) 83 | 84 | // Ensure that that we have a certificate and key. 85 | tlsCfg, cert := getTlsConfig() 86 | 87 | // We reinitialize the predictable RNG with our device ID, to get a 88 | // sequence that is always the same but unique to this syncthing instance. 89 | predictableRandom.Seed(seedFromBytes(cert.Certificate[0])) 90 | 91 | myID = protocol.NewDeviceID(cert.Certificate[0]) 92 | l.SetPrefix(fmt.Sprintf("[%s] ", myID.String()[:5])) 93 | 94 | l.Infoln("Started syncthingfuse v.", LongVersion) 95 | l.Infoln("My ID:", myID) 96 | 97 | cfg := getConfiguration() 98 | 99 | if info, err := os.Stat(cfg.Raw().MountPoint); err == nil { 100 | if !info.Mode().IsDir() { 101 | l.Fatalln("Mount point (", cfg.Raw().MountPoint, ") must be a directory, but isn't") 102 | os.Exit(1) 103 | } 104 | } else { 105 | l.Infoln("Mount point (", cfg.Raw().MountPoint, ") does not exist, creating it") 106 | err = os.MkdirAll(cfg.Raw().MountPoint, 0700) 107 | if err != nil { 108 | l.Warnln("Error creating mount point", cfg.Raw().MountPoint, err) 109 | l.Warnln("Sometimes, SyncthingFUSE doesn't shut down and unmount cleanly,") 110 | l.Warnln("If you don't know of any other file systems you have mounted at") 111 | l.Warnln("the mount point, try running the command below to unmount, then") 112 | l.Warnln("start SyncthingFUSE again.") 113 | l.Warnln(" umount", cfg.Raw().MountPoint) 114 | l.Fatalln("Cannot create missing mount point") 115 | os.Exit(1) 116 | } 117 | } 118 | 119 | mainSvc := suture.New("main", suture.Spec{ 120 | Log: func(line string) { 121 | l.Debugln(line) 122 | }, 123 | }) 124 | mainSvc.ServeBackground() 125 | 126 | database := openDatabase(cfg) 127 | 128 | m = model.NewModel(cfg, database) 129 | 130 | lans, _ := osutil.GetLans() 131 | 132 | // Start discovery 133 | cachedDiscovery := discover.NewCachingMux() 134 | mainSvc.Add(cachedDiscovery) 135 | 136 | // Start connection management 137 | connectionsService := connections.NewService(cfg.AsStCfg(myID), myID, m, tlsCfg, cachedDiscovery, bepProtocolName, tlsDefaultCommonName, lans) 138 | mainSvc.Add(connectionsService) 139 | 140 | if cfg.Raw().Options.GlobalAnnounceEnabled { 141 | for _, srv := range cfg.Raw().Options.GlobalAnnounceServers { 142 | l.Infoln("Using discovery server", srv) 143 | gd, err := discover.NewGlobal(srv, cert, connectionsService) 144 | if err != nil { 145 | l.Warnln("Global discovery:", err) 146 | continue 147 | } 148 | 149 | // Each global discovery server gets its results cached for five 150 | // minutes, and is not asked again for a minute when it's returned 151 | // unsuccessfully. 152 | cachedDiscovery.Add(gd, 5*time.Minute, time.Minute, globalDiscoveryPriority) 153 | } 154 | } 155 | 156 | if cfg.Raw().Options.LocalAnnounceEnabled { 157 | // v4 broadcasts 158 | bcd, err := discover.NewLocal(myID, fmt.Sprintf(":%d", cfg.Raw().Options.LocalAnnouncePort), connectionsService) 159 | if err != nil { 160 | l.Warnln("IPv4 local discovery:", err) 161 | } else { 162 | cachedDiscovery.Add(bcd, 0, 0, ipv4LocalDiscoveryPriority) 163 | } 164 | // v6 multicasts 165 | mcd, err := discover.NewLocal(myID, cfg.Raw().Options.LocalAnnounceMCAddr, connectionsService) 166 | if err != nil { 167 | l.Warnln("IPv6 local discovery:", err) 168 | } else { 169 | cachedDiscovery.Add(mcd, 0, 0, ipv6LocalDiscoveryPriority) 170 | } 171 | } 172 | 173 | if cfg.Raw().GUI.Enabled { 174 | api, err := newAPISvc(myID, cfg, m) 175 | if err != nil { 176 | l.Fatalln("Cannot start GUI:", err) 177 | } 178 | mainSvc.Add(api) 179 | } 180 | 181 | l.Infoln("Started ...") 182 | 183 | MountFuse(cfg.Raw().MountPoint, m, mainSvc) // TODO handle fight between FUSE and Syncthing Service 184 | 185 | l.Okln("Exiting") 186 | 187 | return 188 | } 189 | 190 | func openDatabase(cfg *config.Wrapper) *bolt.DB { 191 | databasePath := path.Join(path.Dir(cfg.ConfigPath()), "boltdb") 192 | database, _ := bolt.Open(databasePath, 0600, nil) // TODO check error 193 | return database 194 | } 195 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/random.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2014 The Syncthing Authors. 2 | // 3 | // This Source Code Form is subject to the terms of the Mozilla Public 4 | // License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 | // You can obtain one at http://mozilla.org/MPL/2.0/. 6 | 7 | package main 8 | 9 | import ( 10 | "crypto/md5" 11 | cryptoRand "crypto/rand" 12 | "encoding/binary" 13 | "io" 14 | mathRand "math/rand" 15 | ) 16 | 17 | // randomCharset contains the characters that can make up a randomString(). 18 | const randomCharset = "01234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" 19 | 20 | // predictableRandom is an RNG that will always have the same sequence. It 21 | // will be seeded with the device ID during startup, so that the sequence is 22 | // predictable but varies between instances. 23 | var predictableRandom = mathRand.New(mathRand.NewSource(42)) 24 | 25 | func init() { 26 | // The default RNG should be seeded with something good. 27 | mathRand.Seed(randomInt64()) 28 | } 29 | 30 | // randomString returns a string of random characters (taken from 31 | // randomCharset) of the specified length. 32 | func randomString(l int) string { 33 | bs := make([]byte, l) 34 | for i := range bs { 35 | bs[i] = randomCharset[mathRand.Intn(len(randomCharset))] 36 | } 37 | return string(bs) 38 | } 39 | 40 | // randomInt64 returns a strongly random int64, slowly 41 | func randomInt64() int64 { 42 | var bs [8]byte 43 | _, err := io.ReadFull(cryptoRand.Reader, bs[:]) 44 | if err != nil { 45 | panic("randomness failure: " + err.Error()) 46 | } 47 | return seedFromBytes(bs[:]) 48 | } 49 | 50 | // seedFromBytes calculates a weak 64 bit hash from the given byte slice, 51 | // suitable for use a predictable random seed. 52 | func seedFromBytes(bs []byte) int64 { 53 | h := md5.New() 54 | h.Write(bs) 55 | s := h.Sum(nil) 56 | // The MD5 hash of the byte slice is 16 bytes long. We interpret it as two 57 | // uint64s and XOR them together. 58 | return int64(binary.BigEndian.Uint64(s[0:]) ^ binary.BigEndian.Uint64(s[8:])) 59 | } 60 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/tls.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/rsa" 6 | "crypto/tls" 7 | "crypto/x509" 8 | "crypto/x509/pkix" 9 | "encoding/pem" 10 | "math/big" 11 | mr "math/rand" 12 | "os" 13 | "time" 14 | ) 15 | 16 | const ( 17 | tlsRSABits = 3072 18 | tlsDefaultCommonName = "syncthing" 19 | ) 20 | 21 | func getTlsConfig() (*tls.Config, tls.Certificate) { 22 | cert, err := tls.LoadX509KeyPair(locations[locCertFile], locations[locKeyFile]) 23 | if err != nil { 24 | cert, err = newCertificate(locations[locCertFile], locations[locKeyFile], tlsDefaultCommonName) 25 | if err != nil { 26 | l.Fatalln("load cert:", err) 27 | } 28 | } 29 | 30 | // The TLS configuration is used for both the listening socket and outgoing 31 | // connections. 32 | tlsCfg := &tls.Config{ 33 | Certificates: []tls.Certificate{cert}, 34 | NextProtos: []string{bepProtocolName}, 35 | ClientAuth: tls.RequestClientCert, 36 | SessionTicketsDisabled: true, 37 | InsecureSkipVerify: true, 38 | MinVersion: tls.VersionTLS12, 39 | CipherSuites: []uint16{ 40 | tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 41 | tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 42 | tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 43 | tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 44 | tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 45 | tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 46 | }, 47 | } 48 | 49 | return tlsCfg, cert 50 | } 51 | 52 | func newCertificate(certFile, keyFile, name string) (tls.Certificate, error) { 53 | l.Infof("Generating RSA key and certificate for %s...", name) 54 | 55 | priv, err := rsa.GenerateKey(rand.Reader, tlsRSABits) 56 | if err != nil { 57 | l.Fatalln("generate key:", err) 58 | } 59 | 60 | notBefore := time.Now() 61 | notAfter := time.Date(2049, 12, 31, 23, 59, 59, 0, time.UTC) 62 | 63 | template := x509.Certificate{ 64 | SerialNumber: new(big.Int).SetInt64(mr.Int63()), 65 | Subject: pkix.Name{ 66 | CommonName: name, 67 | }, 68 | NotBefore: notBefore, 69 | NotAfter: notAfter, 70 | 71 | KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, 72 | ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, 73 | BasicConstraintsValid: true, 74 | } 75 | 76 | derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) 77 | if err != nil { 78 | l.Fatalln("create cert:", err) 79 | } 80 | 81 | certOut, err := os.Create(certFile) 82 | if err != nil { 83 | l.Fatalln("save cert:", err) 84 | } 85 | err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) 86 | if err != nil { 87 | l.Fatalln("save cert:", err) 88 | } 89 | err = certOut.Close() 90 | if err != nil { 91 | l.Fatalln("save cert:", err) 92 | } 93 | 94 | keyOut, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) 95 | if err != nil { 96 | l.Fatalln("save key:", err) 97 | } 98 | err = pem.Encode(keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}) 99 | if err != nil { 100 | l.Fatalln("save key:", err) 101 | } 102 | err = keyOut.Close() 103 | if err != nil { 104 | l.Fatalln("save key:", err) 105 | } 106 | 107 | return tls.LoadX509KeyPair(certFile, keyFile) 108 | } 109 | -------------------------------------------------------------------------------- /cmd/syncthingfuse/usage.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "io" 8 | "text/tabwriter" 9 | ) 10 | 11 | func usageFor(fs *flag.FlagSet, usage string, extra string) func() { 12 | return func() { 13 | var b bytes.Buffer 14 | b.WriteString("Usage:\n " + usage + "\n") 15 | 16 | var options [][]string 17 | fs.VisitAll(func(f *flag.Flag) { 18 | var opt = " -" + f.Name 19 | 20 | if f.DefValue != "false" { 21 | opt += "=" + fmt.Sprintf(`"%s"`, f.DefValue) 22 | } 23 | options = append(options, []string{opt, f.Usage}) 24 | }) 25 | 26 | if len(options) > 0 { 27 | b.WriteString("\nOptions:\n") 28 | optionTable(&b, options) 29 | } 30 | 31 | fmt.Println(b.String()) 32 | 33 | if len(extra) > 0 { 34 | fmt.Println(extra) 35 | } 36 | } 37 | } 38 | 39 | func optionTable(w io.Writer, rows [][]string) { 40 | tw := tabwriter.NewWriter(w, 2, 4, 2, ' ', 0) 41 | for _, row := range rows { 42 | for i, cell := range row { 43 | if i > 0 { 44 | tw.Write([]byte("\t")) 45 | } 46 | tw.Write([]byte(cell)) 47 | } 48 | tw.Write([]byte("\n")) 49 | } 50 | tw.Flush() 51 | } 52 | -------------------------------------------------------------------------------- /gui/css/icon-addon.css: -------------------------------------------------------------------------------- 1 | /* 2 | from http://bootsnipp.com/snippets/featured/support-glyph-and-fa-icon-inside-input 3 | 4 | tweaked to put icons on right, not left 5 | */ 6 | 7 | .icon-addon { 8 | position: relative; 9 | color: #555; 10 | display: block; 11 | } 12 | 13 | .icon-addon:after, 14 | .icon-addon:before { 15 | display: table; 16 | content: " "; 17 | } 18 | 19 | .icon-addon:after { 20 | clear: both; 21 | } 22 | 23 | .icon-addon.addon-md .glyphicon, 24 | .icon-addon .glyphicon, 25 | .icon-addon.addon-md .fa, 26 | .icon-addon .fa { 27 | position: absolute; 28 | z-index: 2; 29 | right: 10px; 30 | font-size: 14px; 31 | width: 20px; 32 | margin-left: -2.5px; 33 | text-align: center; 34 | padding: 10px 0; 35 | top: 1px 36 | } 37 | 38 | .icon-addon.addon-lg .form-control { 39 | line-height: 1.33; 40 | height: 46px; 41 | font-size: 18px; 42 | padding: 10px 16px 10px 40px; 43 | } 44 | 45 | .icon-addon.addon-sm .form-control { 46 | height: 30px; 47 | padding: 5px 10px 5px 28px; 48 | font-size: 12px; 49 | line-height: 1.5; 50 | } 51 | 52 | .icon-addon.addon-lg .fa, 53 | .icon-addon.addon-lg .glyphicon { 54 | font-size: 18px; 55 | margin-left: 0; 56 | right: 11px; 57 | top: 4px; 58 | } 59 | 60 | .icon-addon.addon-md .form-control, 61 | .icon-addon .form-control { 62 | padding-left: 30px; 63 | float: left; 64 | font-weight: normal; 65 | } 66 | 67 | .icon-addon.addon-sm .fa, 68 | .icon-addon.addon-sm .glyphicon { 69 | margin-left: 0; 70 | font-size: 12px; 71 | right: 5px; 72 | top: -1px 73 | } 74 | 75 | .icon-addon .form-control:focus + .glyphicon, 76 | .icon-addon:hover .glyphicon, 77 | .icon-addon .form-control:focus + .fa, 78 | .icon-addon:hover .fa { 79 | color: #2580db; 80 | } -------------------------------------------------------------------------------- /gui/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | SyncthingFUSE 4 | 5 | 6 | 7 | 8 | 9 |
10 |
11 |

SyncthingFUSE

12 |
13 | 14 |
15 |
16 |
17 |

Restart Needed

18 |
19 |

The configuration has been saved but not activated. You must restart SyncthingFUSE to activate the new configuration.

20 |
21 |
22 |
23 |
24 | 25 |
26 |
27 |

Folders

28 |
29 |

{{ folder.id }}

30 |
31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
Cache size{{ folder.cacheSize }}
Pinned files{{ pinnedFileStatus[folder.id] }}
Shared with{{sharesFolder(folder)}}
45 |
46 | 57 |
58 |
59 | 60 | 63 | 64 |
65 |
66 |
67 |
68 |
69 |

This Device

70 |
71 |

{{ deviceCfg.name }}

72 |
73 | 74 | 75 | 76 | 77 | 78 |
Mount Point{{ config.mountPoint }}
79 |
80 | 88 |
89 |

Devices

90 |
91 |
92 |

93 | {{ device.name }} 94 | Connected 95 | Disconnected 96 |

97 |
98 |
99 | 100 | 101 | 102 | 103 | 104 | 105 |
Address{{ connections[device.deviceID].Address }}{{ device.addresses.join(", ") }}
106 |
107 | 115 |
116 |
117 | 118 | 121 | 122 |
123 |
124 |
125 |
126 |
127 | 128 |
129 |
130 |
131 |
132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | -------------------------------------------------------------------------------- /gui/js/app.js: -------------------------------------------------------------------------------- 1 | var stfuseApp = angular.module('syncthingfuse', [ 2 | 'syncthingfuse.core', 3 | 'syncthingfuse.device', 4 | 'syncthingfuse.folder', 5 | 'syncthingfuse.pins' 6 | ]); 7 | -------------------------------------------------------------------------------- /gui/js/core/core.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.core').controller('SyncthingFuseController', function ($scope, $http) { 2 | $scope.config = { devices: [] }; 3 | $scope.connections = {}; 4 | $scope.pinnedFileStatus = {}; 5 | $scope.configInSync = true; 6 | 7 | function initController() { 8 | $scope.refresh() 9 | setInterval($scope.refresh, 10000); 10 | } 11 | 12 | $scope.refresh = function() { 13 | $http.get('/api/system/connections').then( 14 | function(response) { 15 | var newConnections = {}; 16 | response.data.forEach(function(connection) { 17 | newConnections[connection.DeviceID] = connection 18 | }); 19 | $scope.connections = newConnections; 20 | }, 21 | function() { /* TODO handle error */ }); 22 | 23 | $http.get('/api/system/pins/status').then( 24 | function(response) { 25 | $scope.pinnedFileStatus = response.data; 26 | }, 27 | function() { /* TODO handle error */ }); 28 | }; 29 | 30 | $scope.isDeviceConnected = function(deviceID) { 31 | if ($scope.connections.hasOwnProperty(deviceID)) { 32 | return true; 33 | } 34 | return false; 35 | }; 36 | 37 | $http.get('/api/system/config').success(function(data) { 38 | $scope.config = data; 39 | 40 | $scope.config.folders.sort(function(a, b) { 41 | return a.id.localeCompare(b.id); 42 | }); 43 | $scope.config.devices.sort(function(a, b) { 44 | return a.deviceID.localeCompare(b.deviceID); 45 | }); 46 | }); 47 | 48 | $http.get('/api/system/config/insync').then( 49 | function(response) { $scope.configInSync = response.data; }, 50 | function() { /* TODO handle error */ }); 51 | 52 | $scope.findDevice = function (deviceID) { 53 | var matches = $scope.config.devices.filter(function (n) { return n.deviceID === deviceID; }); 54 | if (matches.length !== 1) { 55 | return undefined; 56 | } 57 | return matches[0]; 58 | }; 59 | 60 | $scope.sharesFolder = function (folderCfg) { 61 | var names = []; 62 | folderCfg.devices.forEach(function (device) { 63 | if (device.deviceID != $scope.config.myID) { 64 | names.push($scope.deviceName($scope.findDevice(device.deviceID))); 65 | } 66 | }); 67 | names.sort(); 68 | return names.join(", "); 69 | }; 70 | 71 | $scope.thisDevice = function () { 72 | for (var i = 0; i < $scope.config.devices.length; i++) { 73 | var device = $scope.config.devices[i]; 74 | if (device.deviceID === $scope.config.myID) { 75 | return device; 76 | } 77 | } 78 | }; 79 | 80 | $scope.otherDevices = function() { 81 | var devices = []; 82 | 83 | for (var i=0 ; i<$scope.config.devices.length ; i++) { 84 | device = $scope.config.devices[i]; 85 | if (device.deviceID !== $scope.config.myID) { 86 | devices.push(device) 87 | } 88 | } 89 | 90 | return devices; 91 | }; 92 | 93 | $scope.deviceName = function (deviceCfg) { 94 | if (typeof deviceCfg === 'undefined') { 95 | return ""; 96 | } 97 | if (deviceCfg.name) { 98 | return deviceCfg.name; 99 | } 100 | return deviceCfg.deviceID.substr(0, 6); 101 | }; 102 | 103 | $scope.addDevice = function() { 104 | $scope.currentDevice = { 105 | deviceID: '', 106 | _addressesStr: 'dynamic', 107 | compression: 'metadata', 108 | introducer: false, 109 | selectedFolders: {} 110 | }; 111 | $scope.editingExisting = false; 112 | $scope.deviceEditor.$setPristine(); 113 | $('#editDevice').modal(); 114 | }; 115 | 116 | $scope.editDevice = function(device) { 117 | $scope.currentDevice = angular.copy(device); 118 | $scope.editingExisting = true; 119 | $scope.currentDevice._addressesStr = device.addresses.join(', '); 120 | 121 | $scope.currentDevice.selectedFolders = {}; 122 | for (var i=0 ; i<$scope.config.folders.length ; i++) { 123 | var folder = $scope.config.folders[i]; 124 | for (var j=0 ; j 2 | 67 | -------------------------------------------------------------------------------- /gui/js/device/editSettingsModalDirective.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.device') 2 | .directive('editSettingsModal', function () { 3 | return { 4 | restrict: 'A', 5 | templateUrl: 'js/device/editSettingsModalView.html' 6 | }; 7 | }); 8 | -------------------------------------------------------------------------------- /gui/js/device/editSettingsModalView.html: -------------------------------------------------------------------------------- 1 | 48 | 49 | -------------------------------------------------------------------------------- /gui/js/device/module.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.device', []); 2 | -------------------------------------------------------------------------------- /gui/js/folder/editFolderModalDirective.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.folder') 2 | .directive('editFolderModal', function () { 3 | return { 4 | restrict: 'A', 5 | templateUrl: 'js/folder/editFolderModalView.html' 6 | }; 7 | }); 8 | 9 | angular.module('syncthingfuse.folder') 10 | .directive('humansize', function($q, $http) { 11 | return { 12 | require: 'ngModel', 13 | link: function(scope, elm, attrs, ctrl) { 14 | ctrl.$asyncValidators.humansize = function(modelValue, viewValue) { 15 | 16 | if (ctrl.$isEmpty(modelValue)) { 17 | // consider empty model valid 18 | return $q.when(); 19 | } 20 | 21 | var def = $q.defer(); 22 | 23 | $http.post('/api/verify/humansize', modelValue).then( 24 | function() { 25 | def.resolve(); 26 | }, 27 | function() { 28 | def.reject(); 29 | }); 30 | 31 | return def.promise; 32 | }; 33 | } 34 | }; 35 | }); 36 | -------------------------------------------------------------------------------- /gui/js/folder/editFolderModalView.html: -------------------------------------------------------------------------------- 1 | 60 | -------------------------------------------------------------------------------- /gui/js/folder/module.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.folder', []); 2 | -------------------------------------------------------------------------------- /gui/js/pins/editPinsModalDirective.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.pins') 2 | .directive('editPinsModal', function () { 3 | return { 4 | restrict: 'A', 5 | templateUrl: 'js/pins/editPinsModalView.html' 6 | }; 7 | }); 8 | -------------------------------------------------------------------------------- /gui/js/pins/editPinsModalView.html: -------------------------------------------------------------------------------- 1 | 44 | -------------------------------------------------------------------------------- /gui/js/pins/module.js: -------------------------------------------------------------------------------- 1 | angular.module('syncthingfuse.pins', []); 2 | -------------------------------------------------------------------------------- /lib/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "encoding/xml" 5 | "io" 6 | "os/user" 7 | "path" 8 | "reflect" 9 | "strconv" 10 | "strings" 11 | 12 | human "github.com/dustin/go-humanize" 13 | "github.com/syncthing/syncthing/lib/config" 14 | "github.com/syncthing/syncthing/lib/protocol" 15 | ) 16 | 17 | const ( 18 | CurrentVersion = 0 19 | ) 20 | 21 | type Configuration struct { 22 | Version int `xml:"version,attr" json:"version"` 23 | MyID string `xml:"-" json:"myID"` 24 | MountPoint string `xml:"mountPoint" json:"mountPoint"` 25 | Folders []FolderConfiguration `xml:"folder" json:"folders"` 26 | Devices []config.DeviceConfiguration `xml:"device" json:"devices"` 27 | Options OptionsConfiguration `xml:"options" json:"options"` 28 | GUI GUIConfiguration `xml:"gui" json:"gui"` 29 | XMLName xml.Name `xml:"configuration" json:"-"` 30 | } 31 | 32 | type FolderConfiguration struct { 33 | ID string `xml:"id,attr" json:"id"` 34 | Devices []config.FolderDeviceConfiguration `xml:"device" json:"devices"` 35 | CacheSize string `xml:"cacheSize" json:"cacheSize" default:"512MiB"` 36 | PinnedFiles []string `xml:"pinnedFiles" json:"pinnedFiles"` 37 | } 38 | 39 | type GUIConfiguration struct { 40 | Enabled bool `xml:"enabled,attr" json:"enabled" default:"true"` 41 | RawAddress string `xml:"address" json:"address" default:"127.0.0.1:5833"` 42 | } 43 | 44 | func (f FolderConfiguration) GetCacheSizeBytes() (int32, error) { 45 | bytes, err := human.ParseBytes(f.CacheSize) 46 | return int32(bytes), err 47 | } 48 | 49 | type OptionsConfiguration struct { 50 | ListenAddress []string `xml:"listenAddress" json:"listenAddress" default:"tcp://0.0.0.0:22000"` 51 | LocalAnnounceEnabled bool `xml:"localAnnounceEnabled" json:"localAnnounceEnabled" default:"true"` 52 | LocalAnnouncePort int `xml:"localAnnouncePort" json:"localAnnouncePort" default:"21027"` 53 | LocalAnnounceMCAddr string `xml:"localAnnounceMCAddr" json:"localAnnounceMCAddr"` 54 | GlobalAnnounceEnabled bool `xml:"globalAnnounceEnabled" json:"globalAnnounceEnabled" default:"true"` 55 | GlobalAnnounceServers []string `xml:"globalAnnounceServer" json:"globalAnnounceServers" default:"default"` 56 | RelaysEnabled bool `xml:"relaysEnabled" json:"relaysEnabled" default:"true"` 57 | RelayWithoutGlobalAnnounce bool `xml:"relayWithoutGlobalAnn" json:"relayWithoutGlobalAnn" default:"false"` 58 | RelayServers []string `xml:"relayServer" json:"relayServers" default:"dynamic+https://relays.syncthing.net/endpoint"` 59 | RelayReconnectIntervalM int `xml:"relayReconnectIntervalM" json:"relayReconnectIntervalM" default:"10"` 60 | } 61 | 62 | func New(myID protocol.DeviceID, myName string) Configuration { 63 | var cfg Configuration 64 | cfg.Version = CurrentVersion 65 | 66 | cfg.MyID = myID.String() 67 | setDefaults(&cfg) 68 | setDefaults(&cfg.GUI) 69 | setDefaults(&cfg.Options) 70 | 71 | thisDevice, _ := protocol.DeviceIDFromString(cfg.MyID) 72 | thisDeviceCfg := config.NewDeviceConfiguration(thisDevice, myName) 73 | thisDeviceCfg.Addresses = []string{"dynamic"} 74 | cfg.Folders = []FolderConfiguration{} 75 | cfg.Devices = []config.DeviceConfiguration{thisDeviceCfg} 76 | 77 | cfg.prepare() 78 | 79 | usr, _ := user.Current() 80 | cfg.MountPoint = path.Join(usr.HomeDir, "SyncthingFUSE") 81 | 82 | return cfg 83 | } 84 | 85 | func ReadXML(r io.Reader, myID protocol.DeviceID) (Configuration, error) { 86 | var cfg Configuration 87 | 88 | cfg.MyID = myID.String() 89 | setDefaults(&cfg) 90 | setDefaults(&cfg.GUI) 91 | setDefaults(&cfg.Options) 92 | 93 | err := xml.NewDecoder(r).Decode(&cfg) 94 | 95 | cfg.prepare() 96 | 97 | return cfg, err 98 | } 99 | 100 | func (cfg *Configuration) WriteXML(w io.Writer) error { 101 | e := xml.NewEncoder(w) 102 | e.Indent("", " ") 103 | err := e.Encode(cfg) 104 | if err != nil { 105 | return err 106 | } 107 | _, err = w.Write([]byte("\n")) 108 | return err 109 | } 110 | 111 | func (cfg *Configuration) prepare() { 112 | fillNilSlices(cfg) 113 | fillNilSlices(&(cfg.Options)) 114 | 115 | if nil == cfg.Folders { 116 | cfg.Folders = make([]FolderConfiguration, 0) 117 | } 118 | } 119 | 120 | func setDefaults(data interface{}) error { 121 | s := reflect.ValueOf(data).Elem() 122 | t := s.Type() 123 | 124 | for i := 0; i < s.NumField(); i++ { 125 | f := s.Field(i) 126 | tag := t.Field(i).Tag 127 | 128 | v := tag.Get("default") 129 | if len(v) > 0 { 130 | switch f.Interface().(type) { 131 | case string: 132 | f.SetString(v) 133 | 134 | case int: 135 | i, err := strconv.ParseInt(v, 10, 64) 136 | if err != nil { 137 | return err 138 | } 139 | f.SetInt(i) 140 | 141 | case float64: 142 | i, err := strconv.ParseFloat(v, 64) 143 | if err != nil { 144 | return err 145 | } 146 | f.SetFloat(i) 147 | 148 | case bool: 149 | f.SetBool(v == "true") 150 | 151 | case []string: 152 | // We don't do anything with string slices here. Any default 153 | // we set will be appended to by the XML decoder, so we fill 154 | // those after decoding. 155 | 156 | default: 157 | panic(f.Type()) 158 | } 159 | } 160 | } 161 | return nil 162 | } 163 | 164 | // fillNilSlices sets default value on slices that are still nil. 165 | func fillNilSlices(data interface{}) error { 166 | s := reflect.ValueOf(data).Elem() 167 | t := s.Type() 168 | 169 | for i := 0; i < s.NumField(); i++ { 170 | f := s.Field(i) 171 | tag := t.Field(i).Tag 172 | 173 | v := tag.Get("default") 174 | if len(v) > 0 { 175 | switch f.Interface().(type) { 176 | case []string: 177 | if f.IsNil() { 178 | // Treat the default as a comma separated slice 179 | vs := strings.Split(v, ",") 180 | for i := range vs { 181 | vs[i] = strings.TrimSpace(vs[i]) 182 | } 183 | 184 | rv := reflect.MakeSlice(reflect.TypeOf([]string{}), len(vs), len(vs)) 185 | for i, v := range vs { 186 | rv.Index(i).SetString(v) 187 | } 188 | f.Set(rv) 189 | } 190 | } 191 | } 192 | } 193 | return nil 194 | } 195 | -------------------------------------------------------------------------------- /lib/config/converter.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | stconfig "github.com/syncthing/syncthing/lib/config" 5 | "github.com/syncthing/syncthing/lib/protocol" 6 | ) 7 | 8 | func (w *Wrapper) AsStCfg(myID protocol.DeviceID) *stconfig.Wrapper { 9 | cfg := stconfig.New(myID) 10 | 11 | cfg.Folders = make([]stconfig.FolderConfiguration, len(w.Raw().Folders)) 12 | for i, fldr := range w.Raw().Folders { 13 | cfg.Folders[i].ID = fldr.ID 14 | cfg.Folders[i].Devices = make([]stconfig.FolderDeviceConfiguration, len(fldr.Devices)) 15 | copy(cfg.Folders[i].Devices, fldr.Devices) 16 | } 17 | 18 | cfg.Devices = w.Raw().Devices 19 | cfg.Options.ListenAddresses = w.Raw().Options.ListenAddress 20 | cfg.Options.LocalAnnEnabled = w.Raw().Options.LocalAnnounceEnabled 21 | cfg.Options.LocalAnnPort = w.Raw().Options.LocalAnnouncePort 22 | cfg.Options.LocalAnnMCAddr = w.Raw().Options.LocalAnnounceMCAddr 23 | cfg.Options.GlobalAnnEnabled = w.Raw().Options.GlobalAnnounceEnabled 24 | cfg.Options.GlobalAnnServers = w.Raw().Options.GlobalAnnounceServers 25 | cfg.Options.RelaysEnabled = w.Raw().Options.RelaysEnabled 26 | cfg.Options.RelayReconnectIntervalM = w.Raw().Options.RelayReconnectIntervalM 27 | 28 | return stconfig.Wrap("/shouldnotexist", cfg) 29 | } 30 | -------------------------------------------------------------------------------- /lib/config/debug.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | 7 | "github.com/calmh/logger" 8 | ) 9 | 10 | var ( 11 | debug = strings.Contains(os.Getenv("STTRACE"), "config") || os.Getenv("STTRACE") == "all" 12 | l = logger.DefaultLogger 13 | ) 14 | -------------------------------------------------------------------------------- /lib/config/wrapper.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | 6 | stconfig "github.com/syncthing/syncthing/lib/config" 7 | "github.com/syncthing/syncthing/lib/osutil" 8 | "github.com/syncthing/syncthing/lib/protocol" 9 | "github.com/syncthing/syncthing/lib/sync" 10 | ) 11 | 12 | type Wrapper struct { 13 | cfg Configuration 14 | path string 15 | mut sync.Mutex 16 | } 17 | 18 | // Wrap wraps an existing Configuration structure and ties it to a file on 19 | // disk. 20 | func Wrap(path string, cfg Configuration) *Wrapper { 21 | w := &Wrapper{ 22 | cfg: cfg, 23 | path: path, 24 | mut: sync.NewMutex(), 25 | } 26 | return w 27 | } 28 | 29 | // Load loads an existing file on disk and returns a new configuration 30 | // wrapper. 31 | func Load(path string, myID protocol.DeviceID) (*Wrapper, error) { 32 | fd, err := os.Open(path) 33 | if err != nil { 34 | return nil, err 35 | } 36 | defer fd.Close() 37 | 38 | cfg, err := ReadXML(fd, myID) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | return Wrap(path, cfg), nil 44 | } 45 | 46 | func (w *Wrapper) ConfigPath() string { 47 | return w.path 48 | } 49 | 50 | // Raw returns the currently wrapped Configuration object. 51 | func (w *Wrapper) Raw() Configuration { 52 | return w.cfg 53 | } 54 | 55 | func (w *Wrapper) Devices() map[protocol.DeviceID]stconfig.DeviceConfiguration { 56 | w.mut.Lock() 57 | defer w.mut.Unlock() 58 | 59 | deviceMap := make(map[protocol.DeviceID]stconfig.DeviceConfiguration, len(w.cfg.Devices)) 60 | for _, devCfg := range w.cfg.Devices { 61 | deviceMap[devCfg.DeviceID] = devCfg 62 | } 63 | 64 | return deviceMap 65 | } 66 | 67 | func (w *Wrapper) SetDevice(devCfg stconfig.DeviceConfiguration) { 68 | w.mut.Lock() 69 | defer w.mut.Unlock() 70 | 71 | replaced := false 72 | for i := range w.cfg.Devices { 73 | if w.cfg.Devices[i].DeviceID == devCfg.DeviceID { 74 | w.cfg.Devices[i] = devCfg 75 | replaced = true 76 | break 77 | } 78 | } 79 | if !replaced { 80 | w.cfg.Devices = append(w.cfg.Devices, devCfg) 81 | } 82 | } 83 | 84 | func (w *Wrapper) MyDeviceConfiguration() stconfig.DeviceConfiguration { 85 | for _, d := range w.cfg.Devices { 86 | if d.DeviceID.String() == w.cfg.MyID { 87 | return d 88 | } 89 | } 90 | return stconfig.DeviceConfiguration{} 91 | } 92 | 93 | // Folders returns a map of folders. Folder structures should not be changed, 94 | // other than for the purpose of updating via SetFolder(). 95 | func (w *Wrapper) Folders() map[string]FolderConfiguration { 96 | w.mut.Lock() 97 | defer w.mut.Unlock() 98 | 99 | folderMap := make(map[string]FolderConfiguration, len(w.cfg.Folders)) 100 | for _, fld := range w.cfg.Folders { 101 | folderMap[fld.ID] = fld 102 | } 103 | return folderMap 104 | } 105 | 106 | func (w *Wrapper) SetFolder(fldCfg FolderConfiguration) { 107 | w.mut.Lock() 108 | defer w.mut.Unlock() 109 | 110 | replaced := false 111 | for i := range w.cfg.Folders { 112 | if w.cfg.Folders[i].ID == fldCfg.ID { 113 | w.cfg.Folders[i] = fldCfg 114 | replaced = true 115 | break 116 | } 117 | } 118 | if !replaced { 119 | w.cfg.Folders = append(w.cfg.Folders, fldCfg) 120 | } 121 | } 122 | 123 | func (w *Wrapper) Replace(to Configuration) error { 124 | w.mut.Lock() 125 | defer w.mut.Unlock() 126 | 127 | // validate 128 | for _, fldrCfg := range to.Folders { 129 | if _, err := fldrCfg.GetCacheSizeBytes(); err != nil { 130 | l.Debugln("rejected config, cannot parse cache size:", err) 131 | return err 132 | } 133 | } 134 | 135 | // set 136 | w.cfg = to 137 | 138 | return nil 139 | } 140 | 141 | // Save writes the configuration to disk 142 | func (w *Wrapper) Save() error { 143 | fd, err := osutil.CreateAtomic(w.path) 144 | if err != nil { 145 | return err 146 | } 147 | 148 | if err := w.cfg.WriteXML(fd); err != nil { 149 | fd.Close() 150 | return err 151 | } 152 | 153 | if err := fd.Close(); err != nil { 154 | return err 155 | } 156 | 157 | return nil 158 | } 159 | -------------------------------------------------------------------------------- /lib/fileblockcache/debug.go: -------------------------------------------------------------------------------- 1 | package fileblockcache 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | 7 | "github.com/calmh/logger" 8 | ) 9 | 10 | var ( 11 | debug = strings.Contains(os.Getenv("STTRACE"), "fileblockcache") || os.Getenv("STTRACE") == "all" 12 | l = logger.DefaultLogger 13 | ) 14 | -------------------------------------------------------------------------------- /lib/fileblockcache/fileblockcache.go: -------------------------------------------------------------------------------- 1 | package fileblockcache 2 | 3 | import ( 4 | "bytes" 5 | b64 "encoding/base64" 6 | "encoding/gob" 7 | "io/ioutil" 8 | "os" 9 | "path" 10 | 11 | "github.com/boltdb/bolt" 12 | "github.com/burkemw3/syncthingfuse/lib/config" 13 | "github.com/syncthing/syncthing/lib/protocol" 14 | ) 15 | 16 | type FileBlockCache struct { 17 | cfg *config.Wrapper 18 | db *bolt.DB 19 | folder string 20 | folderBucketKey []byte 21 | 22 | maximumBytesStored int32 23 | currentBytesStored int32 24 | mostRecentlyUsed []byte 25 | leastRecentlyUsed []byte 26 | } 27 | 28 | var ( 29 | cachedFilesBucket = []byte("cachedFiles") 30 | pinnedBlocksBucket = []byte("pinnedBlocks") 31 | ) 32 | 33 | type fileCacheEntry struct { 34 | Hash []byte 35 | Previous []byte 36 | Next []byte 37 | Size int32 38 | } 39 | 40 | func NewFileBlockCache(cfg *config.Wrapper, db *bolt.DB, fldrCfg config.FolderConfiguration) (*FileBlockCache, error) { 41 | d := &FileBlockCache{ 42 | cfg: cfg, 43 | db: db, 44 | folder: fldrCfg.ID, 45 | folderBucketKey: []byte(fldrCfg.ID), 46 | } 47 | 48 | cfgBytes, err := fldrCfg.GetCacheSizeBytes() 49 | if err != nil { 50 | l.Warnln("Cannot parse cache size (", fldrCfg.CacheSize, ") for folder", fldrCfg.ID) 51 | return nil, err 52 | } 53 | d.maximumBytesStored = cfgBytes 54 | l.Infoln("Folder", d.folder, "with cache", d.maximumBytesStored, "bytes") 55 | 56 | d.db.Update(func(tx *bolt.Tx) error { 57 | // create buckets 58 | b, err := tx.CreateBucketIfNotExists(d.folderBucketKey) 59 | if err != nil { 60 | l.Warnln("error creating bucket for folder", d.folder, err) 61 | return err 62 | } 63 | cfb, err := b.CreateBucketIfNotExists(cachedFilesBucket) 64 | if err != nil { 65 | l.Warnln("error creating cached files bucket for folder", d.folder, err) 66 | return err 67 | } 68 | pbb, err := b.CreateBucketIfNotExists(pinnedBlocksBucket) 69 | if err != nil { 70 | l.Warnln("error creating pinned block bucket for folder", d.folder, err) 71 | return err 72 | } 73 | 74 | // update in-memory data cache 75 | cfb.ForEach(func(k, v []byte) error { 76 | buf := bytes.NewBuffer(v) 77 | dec := gob.NewDecoder(buf) 78 | var focus fileCacheEntry 79 | dec.Decode(&focus) 80 | 81 | if focus.Previous == nil { 82 | d.mostRecentlyUsed = focus.Hash 83 | } 84 | if focus.Next == nil { 85 | d.leastRecentlyUsed = focus.Hash 86 | } 87 | 88 | _, pinned := getEntryUnsafely(pbb, focus.Hash) 89 | if false == pinned { 90 | d.currentBytesStored += focus.Size 91 | } 92 | 93 | return nil 94 | }) 95 | 96 | // evict, in case cache size has decreased 97 | d.evictForSizeUnsafe(cfb, pbb, 0) 98 | 99 | return nil 100 | }) 101 | 102 | diskCacheFolder := GetDiskCacheBasePath(d.cfg, d.folder) 103 | os.Mkdir(diskCacheFolder, 0744) 104 | 105 | return d, nil 106 | } 107 | 108 | func (d *FileBlockCache) PinExistingBlock(block protocol.BlockInfo) { 109 | if debug { 110 | blockHashString := b64.URLEncoding.EncodeToString(block.Hash) 111 | l.Debugln("Pinning existing block", blockHashString) 112 | } 113 | 114 | d.db.Update(func(tx *bolt.Tx) error { 115 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 116 | 117 | entry := fileCacheEntry{ 118 | Hash: block.Hash, 119 | Size: block.Size, 120 | } 121 | setEntryUnsafely(pbb, entry) 122 | 123 | d.currentBytesStored -= block.Size 124 | 125 | return nil 126 | }) 127 | } 128 | 129 | func (d *FileBlockCache) PinNewBlock(block protocol.BlockInfo, data []byte) { 130 | if debug { 131 | blockHashString := b64.URLEncoding.EncodeToString(block.Hash) 132 | l.Debugln("Pinning new block", blockHashString) 133 | } 134 | 135 | d.db.Update(func(tx *bolt.Tx) error { 136 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 137 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 138 | 139 | _, found := getEntryUnsafely(cfb, block.Hash) 140 | if false == found { 141 | // save to disk 142 | diskCachePath := getDiskCachePath(d.cfg, d.folder, block.Hash) 143 | err := ioutil.WriteFile(diskCachePath, data, 0644) 144 | if err != nil { 145 | l.Warnln("Error writing file", diskCachePath, "for folder", d.folder, "for hash", block.Hash, err) 146 | return err // TODO error handle 147 | } 148 | } else { 149 | d.currentBytesStored -= block.Size 150 | } 151 | 152 | entry := fileCacheEntry{ 153 | Hash: block.Hash, 154 | Size: block.Size, 155 | } 156 | setEntryUnsafely(pbb, entry) 157 | 158 | return nil 159 | }) 160 | } 161 | 162 | func (d *FileBlockCache) HasPinnedBlock(blockHash []byte) bool { 163 | found := false 164 | 165 | d.db.View(func(tx *bolt.Tx) error { 166 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 167 | 168 | v := pbb.Get(blockHash) 169 | if v != nil { 170 | found = true 171 | } 172 | 173 | return nil 174 | }) 175 | 176 | return found 177 | } 178 | 179 | func (d *FileBlockCache) UnpinBlock(blockHash []byte) { 180 | d.db.Update(func(tx *bolt.Tx) error { 181 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 182 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 183 | 184 | entry, pinned := getEntryUnsafely(pbb, blockHash) 185 | if pinned { 186 | _, found := getEntryUnsafely(cfb, blockHash) 187 | if found { 188 | d.currentBytesStored += entry.Size 189 | d.evictForSizeUnsafe(cfb, pbb, 0) 190 | } else { 191 | // delete from disk 192 | diskCachePath := getDiskCachePath(d.cfg, d.folder, blockHash) 193 | os.Remove(diskCachePath) 194 | } 195 | } 196 | 197 | pbb.Delete(blockHash) 198 | 199 | return nil 200 | }) 201 | } 202 | 203 | func (d *FileBlockCache) HasCachedBlockData(blockHash []byte) bool { 204 | found := false 205 | 206 | d.db.View(func(tx *bolt.Tx) error { 207 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 208 | 209 | v := cfb.Get(blockHash) 210 | if v != nil { 211 | found = true 212 | } 213 | 214 | return nil 215 | }) 216 | 217 | return found 218 | } 219 | 220 | func (d *FileBlockCache) GetCachedBlockData(blockHash []byte) ([]byte, bool) { 221 | found := false 222 | var current, previous, next fileCacheEntry 223 | var data []byte 224 | 225 | d.db.Update(func(tx *bolt.Tx) error { 226 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 227 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 228 | 229 | /* get nodes */ 230 | // current 231 | current, found = getEntryUnsafely(cfb, blockHash) 232 | if false == found { 233 | current, found = getEntryUnsafely(pbb, blockHash) 234 | if found { 235 | if debug { 236 | blockHashString := b64.URLEncoding.EncodeToString(blockHash) 237 | l.Debugln("pinned block hit", blockHashString) 238 | } 239 | d.addAsMruUnsafe(cfb, current.Hash, current.Size) 240 | 241 | diskCachePath := getDiskCachePath(d.cfg, d.folder, blockHash) 242 | data, _ = ioutil.ReadFile(diskCachePath) // TODO check error 243 | } 244 | return nil 245 | } 246 | found = true 247 | 248 | // previous 249 | if current.Previous != nil { 250 | previous, _ = getEntryUnsafely(cfb, current.Previous) 251 | } 252 | 253 | // next 254 | if current.Next != nil { 255 | next, _ = getEntryUnsafely(cfb, current.Next) 256 | } 257 | 258 | /* manipulate LRU cache */ 259 | if false == bytes.Equal(blockHash, d.mostRecentlyUsed) { 260 | if nil == current.Previous { 261 | l.Warnln("broken LRU. no previous node for", b64.URLEncoding.EncodeToString(blockHash), "but not at MRU either", b64.URLEncoding.EncodeToString(d.mostRecentlyUsed)) 262 | } 263 | 264 | // remove current node 265 | previous.Next = next.Hash 266 | setEntryUnsafely(cfb, previous) 267 | 268 | if current.Next != nil { 269 | next.Previous = previous.Hash 270 | setEntryUnsafely(cfb, next) 271 | } else { 272 | d.leastRecentlyUsed = previous.Hash 273 | } 274 | 275 | // add current node at front 276 | oldMru, _ := getEntryUnsafely(cfb, d.mostRecentlyUsed) 277 | oldMru.Previous = current.Hash 278 | setEntryUnsafely(cfb, oldMru) 279 | 280 | current.Next = oldMru.Hash 281 | current.Previous = nil 282 | setEntryUnsafely(cfb, current) 283 | d.mostRecentlyUsed = current.Hash 284 | } 285 | 286 | /* get cached data */ 287 | diskCachePath := getDiskCachePath(d.cfg, d.folder, blockHash) 288 | data, _ = ioutil.ReadFile(diskCachePath) // TODO check error 289 | 290 | if debug { 291 | blockHashString := b64.URLEncoding.EncodeToString(blockHash) 292 | l.Debugln("file cache hit for block", blockHashString, "at", diskCachePath) 293 | } 294 | return nil 295 | }) 296 | 297 | if found { 298 | return data, true 299 | } 300 | 301 | if debug { 302 | blockHashString := b64.URLEncoding.EncodeToString(blockHash) 303 | l.Debugln("file cache miss for block", blockHashString) 304 | } 305 | 306 | return []byte(""), false 307 | } 308 | 309 | func (d *FileBlockCache) AddCachedFileData(block protocol.BlockInfo, data []byte) { 310 | d.db.Update(func(tx *bolt.Tx) error { 311 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 312 | pbb := tx.Bucket(d.folderBucketKey).Bucket(pinnedBlocksBucket) 313 | 314 | if debug { 315 | l.Debugln("Putting block", b64.URLEncoding.EncodeToString(block.Hash), "with", block.Size, "bytes. max bytes", d.maximumBytesStored) 316 | } 317 | 318 | d.evictForSizeUnsafe(cfb, pbb, block.Size) 319 | 320 | d.addAsMruUnsafe(cfb, block.Hash, block.Size) 321 | d.currentBytesStored += block.Size 322 | 323 | // write block data to disk 324 | diskCachePath := getDiskCachePath(d.cfg, d.folder, block.Hash) 325 | err := ioutil.WriteFile(diskCachePath, data, 0644) 326 | if err != nil { 327 | l.Warnln("Error writing file", diskCachePath, "for folder", d.folder, "for hash", block.Hash, err) 328 | return err // TODO error handle 329 | } 330 | 331 | return nil 332 | }) 333 | } 334 | 335 | func (d *FileBlockCache) addAsMruUnsafe(cfb *bolt.Bucket, hash []byte, size int32) { 336 | current := fileCacheEntry{ 337 | Hash: hash, 338 | Next: d.mostRecentlyUsed, 339 | Size: size, 340 | } 341 | if d.mostRecentlyUsed != nil { 342 | oldMru, _ := getEntryUnsafely(cfb, d.mostRecentlyUsed) 343 | oldMru.Previous = current.Hash 344 | setEntryUnsafely(cfb, oldMru) 345 | } 346 | setEntryUnsafely(cfb, current) 347 | d.mostRecentlyUsed = current.Hash 348 | 349 | if d.leastRecentlyUsed == nil { 350 | d.leastRecentlyUsed = current.Hash 351 | } 352 | } 353 | 354 | func (d *FileBlockCache) evictForSizeUnsafe(cfb *bolt.Bucket, pbb *bolt.Bucket, blockSize int32) { 355 | for d.currentBytesStored+blockSize > d.maximumBytesStored && d.leastRecentlyUsed != nil { 356 | // evict LRU 357 | victim, _ := getEntryUnsafely(cfb, d.leastRecentlyUsed) 358 | d.leastRecentlyUsed = victim.Previous 359 | 360 | if victim.Previous == nil { 361 | d.mostRecentlyUsed = nil 362 | } else { 363 | previous, _ := getEntryUnsafely(cfb, victim.Previous) 364 | previous.Next = nil 365 | setEntryUnsafely(cfb, previous) 366 | } 367 | 368 | // remove from db 369 | cfb.Delete(victim.Hash) 370 | 371 | // remove from disk if not pinned 372 | _, pinned := getEntryUnsafely(pbb, victim.Hash) 373 | if false == pinned { 374 | diskCachePath := getDiskCachePath(d.cfg, d.folder, victim.Hash) 375 | os.Remove(diskCachePath) 376 | } 377 | 378 | d.currentBytesStored -= victim.Size 379 | 380 | if debug { 381 | l.Debugln("Evicted", b64.URLEncoding.EncodeToString(victim.Hash), "for", victim.Size, "bytes. currently stored", d.currentBytesStored) 382 | } 383 | } 384 | } 385 | 386 | func GetDiskCacheBasePath(cfg *config.Wrapper, folder string) string { 387 | return path.Join(path.Dir(cfg.ConfigPath()), folder) 388 | } 389 | 390 | func getDiskCachePath(cfg *config.Wrapper, folder string, blockHash []byte) string { 391 | blockHashString := b64.URLEncoding.EncodeToString(blockHash) 392 | return path.Join(path.Dir(cfg.ConfigPath()), folder, blockHashString) 393 | } 394 | 395 | func getEntryUnsafely(bucket *bolt.Bucket, blockHash []byte) (fileCacheEntry, bool) { 396 | v := bucket.Get(blockHash) 397 | if v == nil { 398 | // not found, escape! 399 | return fileCacheEntry{}, false 400 | } 401 | buf := bytes.NewBuffer(v) 402 | dec := gob.NewDecoder(buf) 403 | var entry fileCacheEntry 404 | dec.Decode(&entry) 405 | return entry, true 406 | } 407 | 408 | func setEntryUnsafely(bucket *bolt.Bucket, entry fileCacheEntry) { 409 | var buf bytes.Buffer 410 | enc := gob.NewEncoder(&buf) 411 | enc.Encode(entry) 412 | bucket.Put(entry.Hash, buf.Bytes()) 413 | } 414 | 415 | func (d *FileBlockCache) logCacheEntries() { 416 | if debug { 417 | d.db.View(func(tx *bolt.Tx) error { 418 | cfb := tx.Bucket(d.folderBucketKey).Bucket(cachedFilesBucket) 419 | 420 | hashes := make([]string, 0) 421 | entry, found := getEntryUnsafely(cfb, d.mostRecentlyUsed) 422 | for found { 423 | hashes = append(hashes, string(entry.Hash)) 424 | entry, found = getEntryUnsafely(cfb, entry.Next) 425 | } 426 | 427 | l.Debugln("MRU to LRU", hashes) 428 | 429 | return nil 430 | }) 431 | } 432 | } 433 | -------------------------------------------------------------------------------- /lib/fileblockcache/fileblockcache_test.go: -------------------------------------------------------------------------------- 1 | package fileblockcache 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "testing" 8 | 9 | "github.com/boltdb/bolt" 10 | "github.com/burkemw3/syncthingfuse/lib/config" 11 | "github.com/syncthing/syncthing/lib/protocol" 12 | ) 13 | 14 | var ( 15 | folder = "fileblockcache_test" 16 | ) 17 | 18 | func TestGetSetGet(t *testing.T) { 19 | cfg, db, fldrCfg := setup(t, "1b") 20 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 21 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 22 | 23 | hash := []byte("teh hash") 24 | 25 | // check empty get 26 | assertUnavailable(t, fbc, hash) 27 | 28 | // add data 29 | expectedData := []byte("dead beef") 30 | block := protocol.BlockInfo{Hash: hash, Size: int32(len(expectedData))} 31 | fbc.AddCachedFileData(block, expectedData) 32 | 33 | // check full get 34 | assertAvailable(t, fbc, hash, expectedData) 35 | } 36 | 37 | func TestBlockGetsEvicted1(t *testing.T) { 38 | cfg, db, fldrCfg := setup(t, "2b") 39 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 40 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 41 | 42 | data1 := []byte("data1") 43 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 44 | fbc.AddCachedFileData(block1, data1) 45 | assertAvailable(t, fbc, block1.Hash, data1) 46 | 47 | data2 := []byte("data2") 48 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 49 | fbc.AddCachedFileData(block2, data2) 50 | assertAvailable(t, fbc, block1.Hash, data1) 51 | assertAvailable(t, fbc, block2.Hash, data2) 52 | 53 | data3 := []byte("data3") 54 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 55 | fbc.AddCachedFileData(block3, data3) 56 | 57 | assertAvailable(t, fbc, block2.Hash, data2) 58 | assertAvailable(t, fbc, block3.Hash, data3) 59 | assertUnavailable(t, fbc, block1.Hash) 60 | } 61 | 62 | func TestBlockGetsEvicted1AfterRestart(t *testing.T) { 63 | cfg, db, fldrCfg := setup(t, "2b") 64 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 65 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 66 | 67 | data1 := []byte("data1") 68 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 69 | fbc.AddCachedFileData(block1, data1) 70 | assertAvailable(t, fbc, block1.Hash, data1) 71 | 72 | data2 := []byte("data2") 73 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 74 | fbc.AddCachedFileData(block2, data2) 75 | assertAvailable(t, fbc, block1.Hash, data1) 76 | assertAvailable(t, fbc, block2.Hash, data2) 77 | 78 | fbc, _ = NewFileBlockCache(cfg, db, fldrCfg) 79 | 80 | data3 := []byte("data3") 81 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 82 | fbc.AddCachedFileData(block3, data3) 83 | 84 | assertAvailable(t, fbc, block2.Hash, data2) 85 | assertAvailable(t, fbc, block3.Hash, data3) 86 | assertUnavailable(t, fbc, block1.Hash) 87 | } 88 | 89 | func TestBlockGetsEvicted2(t *testing.T) { 90 | cfg, db, fldrCfg := setup(t, "2b") 91 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 92 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 93 | 94 | data1 := []byte("data1") 95 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 96 | fbc.AddCachedFileData(block1, data1) 97 | 98 | data2 := []byte("data2") 99 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 100 | fbc.AddCachedFileData(block2, data2) 101 | 102 | assertAvailable(t, fbc, block1.Hash, data1) 103 | assertAvailable(t, fbc, block2.Hash, data2) 104 | 105 | data3 := []byte("data3") 106 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 107 | fbc.AddCachedFileData(block3, data3) 108 | 109 | assertUnavailable(t, fbc, block1.Hash) 110 | assertAvailable(t, fbc, block2.Hash, data2) 111 | assertAvailable(t, fbc, block3.Hash, data3) 112 | } 113 | 114 | func TestEvictMultipleBlocks(t *testing.T) { 115 | cfg, db, fldrCfg := setup(t, "2b") 116 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 117 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 118 | 119 | data1 := []byte("data1") 120 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 121 | fbc.AddCachedFileData(block1, data1) 122 | 123 | data2 := []byte("data2") 124 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 125 | fbc.AddCachedFileData(block2, data2) 126 | 127 | assertAvailable(t, fbc, block1.Hash, data1) 128 | assertAvailable(t, fbc, block2.Hash, data2) 129 | 130 | data3 := []byte("data3") 131 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 2} 132 | fbc.AddCachedFileData(block3, data3) 133 | 134 | assertUnavailable(t, fbc, block1.Hash) 135 | assertUnavailable(t, fbc, block2.Hash) 136 | assertAvailable(t, fbc, block3.Hash, data3) 137 | } 138 | 139 | func TestTrivialPin(t *testing.T) { 140 | cfg, db, fldrCfg := setup(t, "2b") 141 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 142 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 143 | 144 | data1 := []byte("data1") 145 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 146 | assertPin(t, fbc, block1.Hash, false) 147 | fbc.PinNewBlock(block1, data1) 148 | assertPin(t, fbc, block1.Hash, true) 149 | 150 | assertAvailable(t, fbc, block1.Hash, data1) 151 | 152 | fbc.UnpinBlock(block1.Hash) 153 | 154 | assertPin(t, fbc, block1.Hash, false) 155 | } 156 | 157 | func TestPinStays(t *testing.T) { 158 | cfg, db, fldrCfg := setup(t, "2b") 159 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 160 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 161 | 162 | data1 := []byte("data1") 163 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 164 | fbc.PinNewBlock(block1, data1) 165 | 166 | data2 := []byte("data2") 167 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 168 | fbc.AddCachedFileData(block2, data2) 169 | 170 | data3 := []byte("data3") 171 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 172 | fbc.AddCachedFileData(block3, data3) 173 | 174 | assertAvailable(t, fbc, block1.Hash, data1) 175 | assertAvailable(t, fbc, block2.Hash, data2) 176 | assertAvailable(t, fbc, block3.Hash, data3) 177 | } 178 | 179 | func TestPinExistingStays(t *testing.T) { 180 | cfg, db, fldrCfg := setup(t, "2b") 181 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 182 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 183 | 184 | data1 := []byte("data1") 185 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 186 | fbc.AddCachedFileData(block1, data1) 187 | 188 | data2 := []byte("data2") 189 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 190 | fbc.AddCachedFileData(block2, data2) 191 | 192 | fbc.PinExistingBlock(block1) 193 | 194 | data3 := []byte("data3") 195 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 196 | fbc.AddCachedFileData(block3, data3) 197 | 198 | assertAvailable(t, fbc, block1.Hash, data1) 199 | assertAvailable(t, fbc, block2.Hash, data2) 200 | assertAvailable(t, fbc, block3.Hash, data3) 201 | } 202 | 203 | func TestPinNewBlockDespiteExistingStays(t *testing.T) { 204 | cfg, db, fldrCfg := setup(t, "2b") 205 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 206 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 207 | 208 | data1 := []byte("data1") 209 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 210 | fbc.AddCachedFileData(block1, data1) 211 | 212 | data2 := []byte("data2") 213 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 214 | fbc.AddCachedFileData(block2, data2) 215 | 216 | fbc.PinNewBlock(block1, data1) 217 | 218 | data3 := []byte("data3") 219 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 220 | fbc.AddCachedFileData(block3, data3) 221 | 222 | assertAvailable(t, fbc, block1.Hash, data1) 223 | assertAvailable(t, fbc, block2.Hash, data2) 224 | assertAvailable(t, fbc, block3.Hash, data3) 225 | } 226 | 227 | func TestPinStaysAfterUnpin(t *testing.T) { 228 | cfg, db, fldrCfg := setup(t, "2b") 229 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 230 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 231 | 232 | data1 := []byte("data1") 233 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 234 | fbc.PinNewBlock(block1, data1) 235 | 236 | data2 := []byte("data2") 237 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 238 | fbc.AddCachedFileData(block2, data2) 239 | 240 | data3 := []byte("data3") 241 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 242 | fbc.AddCachedFileData(block3, data3) 243 | 244 | assertAvailable(t, fbc, block1.Hash, data1) 245 | 246 | fbc.UnpinBlock(block1.Hash) 247 | } 248 | 249 | func TestPinLeavesAfterUnpin(t *testing.T) { 250 | cfg, db, fldrCfg := setup(t, "2b") 251 | defer os.RemoveAll(path.Dir(cfg.ConfigPath())) 252 | fbc, _ := NewFileBlockCache(cfg, db, fldrCfg) 253 | 254 | data1 := []byte("data1") 255 | block1 := protocol.BlockInfo{Hash: []byte("hash1"), Size: 1} 256 | fbc.PinNewBlock(block1, data1) 257 | 258 | data2 := []byte("data2") 259 | block2 := protocol.BlockInfo{Hash: []byte("hash2"), Size: 1} 260 | fbc.AddCachedFileData(block2, data2) 261 | 262 | data3 := []byte("data3") 263 | block3 := protocol.BlockInfo{Hash: []byte("hash3"), Size: 1} 264 | fbc.AddCachedFileData(block3, data3) 265 | 266 | assertAvailable(t, fbc, block1.Hash, data1) 267 | assertAvailable(t, fbc, block2.Hash, data2) 268 | assertAvailable(t, fbc, block3.Hash, data3) 269 | 270 | fbc.UnpinBlock(block1.Hash) 271 | 272 | assertUnavailable(t, fbc, block1.Hash) 273 | } 274 | 275 | func assertAvailable(t *testing.T, fbc *FileBlockCache, hash []byte, expectedData []byte) { 276 | actualData, found := fbc.GetCachedBlockData(hash) 277 | if false == found { 278 | t.Error("entry should exist") 279 | } 280 | if len(actualData) != len(expectedData) { 281 | t.Error("actual data", len(actualData), "and expected data", len(expectedData), "sizes differ") 282 | } 283 | for i := 0; i < len(expectedData); i++ { 284 | if actualData[i] != expectedData[i] { 285 | t.Error("actual data mismatches expected data at index", i) 286 | } 287 | } 288 | } 289 | 290 | func assertUnavailable(t *testing.T, fbc *FileBlockCache, hash []byte) { 291 | _, found := fbc.GetCachedBlockData(hash) 292 | if found { 293 | t.Error("entry should not exist, but does. hash", string(hash)) 294 | } 295 | } 296 | 297 | func assertPin(t *testing.T, fbc *FileBlockCache, hash []byte, expected bool) { 298 | actual := fbc.HasPinnedBlock(hash) 299 | 300 | if expected != actual { 301 | t.Error("Pin", actual, "not expected", expected) 302 | } 303 | } 304 | 305 | func setup(t *testing.T, cacheSize string) (*config.Wrapper, *bolt.DB, config.FolderConfiguration) { 306 | dir, _ := ioutil.TempDir("", "stf-mt") 307 | configFile, _ := ioutil.TempFile(dir, "config") 308 | deviceID, _ := protocol.DeviceIDFromString("FFR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR") 309 | realCfg := config.New(deviceID, "local") 310 | cfg := config.Wrap(configFile.Name(), realCfg) 311 | 312 | databasePath := path.Join(path.Dir(cfg.ConfigPath()), "boltdb") 313 | database, _ := bolt.Open(databasePath, 0600, nil) 314 | 315 | folderCfg := config.FolderConfiguration{ 316 | ID: folder, 317 | CacheSize: cacheSize, 318 | } 319 | cfg.SetFolder(folderCfg) 320 | 321 | return cfg, database, folderCfg 322 | } 323 | -------------------------------------------------------------------------------- /lib/filetreecache/debug.go: -------------------------------------------------------------------------------- 1 | package filetreecache 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | 7 | "github.com/calmh/logger" 8 | ) 9 | 10 | var ( 11 | debug = strings.Contains(os.Getenv("STTRACE"), "filetreecache") || os.Getenv("STTRACE") == "all" 12 | l = logger.DefaultLogger 13 | ) 14 | -------------------------------------------------------------------------------- /lib/filetreecache/filetreecache.go: -------------------------------------------------------------------------------- 1 | package filetreecache 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | "fmt" 7 | "path" 8 | "strings" 9 | 10 | "github.com/boltdb/bolt" 11 | "github.com/burkemw3/syncthingfuse/lib/config" 12 | "github.com/syncthing/syncthing/lib/protocol" 13 | ) 14 | 15 | type FileTreeCache struct { 16 | fldrCfg config.FolderConfiguration 17 | db *bolt.DB 18 | folder string 19 | folderBucketKey []byte 20 | } 21 | 22 | var ( 23 | entriesBucket = []byte("entries") 24 | entryDevicesBucket = []byte("entryDevices") // devices that have the current version 25 | childLookupBucket = []byte("childLookup") 26 | ) 27 | 28 | func NewFileTreeCache(fldrCfg config.FolderConfiguration, db *bolt.DB, folder string) *FileTreeCache { 29 | d := &FileTreeCache{ 30 | fldrCfg: fldrCfg, 31 | db: db, 32 | folder: folder, 33 | folderBucketKey: []byte(folder), 34 | } 35 | 36 | d.db.Update(func(tx *bolt.Tx) error { 37 | b, err := tx.CreateBucketIfNotExists(d.folderBucketKey) 38 | if err != nil { 39 | return fmt.Errorf("create bucket: %s", err) 40 | } 41 | 42 | _, err = b.CreateBucketIfNotExists([]byte(entriesBucket)) 43 | if err != nil { 44 | return fmt.Errorf("create bucket: %s", err) 45 | } 46 | 47 | _, err = b.CreateBucketIfNotExists([]byte(entryDevicesBucket)) 48 | if err != nil { 49 | return fmt.Errorf("create bucket: %s", err) 50 | } 51 | 52 | _, err = b.CreateBucketIfNotExists([]byte(childLookupBucket)) 53 | if err != nil { 54 | return fmt.Errorf("create bucket: %s", err) 55 | } 56 | 57 | return nil 58 | }) 59 | 60 | d.cleanupForUnsharedDevices() 61 | 62 | return d 63 | } 64 | 65 | func (d *FileTreeCache) cleanupForUnsharedDevices() { 66 | configuredDevices := make(map[string]bool) 67 | for _, device := range d.fldrCfg.Devices { 68 | configuredDevices[device.DeviceID.String()] = true 69 | } 70 | 71 | victims := make([]string, 0) 72 | 73 | d.db.Update(func(tx *bolt.Tx) error { 74 | edb := tx.Bucket(d.folderBucketKey).Bucket(entryDevicesBucket) 75 | edb.ForEach(func(key []byte, v []byte) error { 76 | var devices map[string]bool 77 | rbuf := bytes.NewBuffer(v) 78 | dec := gob.NewDecoder(rbuf) 79 | dec.Decode(&devices) 80 | 81 | changed := false 82 | for k, _ := range devices { 83 | if _, ok := configuredDevices[k]; !ok { 84 | delete(devices, k) 85 | changed = true 86 | } 87 | } 88 | 89 | if 0 == len(devices) { 90 | victims = append(victims, string(key)) 91 | } else if changed { 92 | var wbuf bytes.Buffer 93 | enc := gob.NewEncoder(&wbuf) 94 | enc.Encode(devices) 95 | edb.Put(key, wbuf.Bytes()) 96 | } 97 | 98 | return nil 99 | }) 100 | return nil 101 | }) 102 | 103 | for _, victim := range victims { 104 | d.RemoveEntry(victim) 105 | } 106 | } 107 | 108 | func (d *FileTreeCache) AddEntry(entry protocol.FileInfo, peer protocol.DeviceID) { 109 | d.db.Update(func(tx *bolt.Tx) error { 110 | eb := tx.Bucket(d.folderBucketKey).Bucket(entriesBucket) 111 | 112 | /* save entry */ 113 | var buf bytes.Buffer 114 | enc := gob.NewEncoder(&buf) 115 | enc.Encode(entry) 116 | eb.Put([]byte(entry.Name), buf.Bytes()) // TODO handle error? 117 | 118 | /* add peer */ 119 | edb := tx.Bucket(d.folderBucketKey).Bucket(entryDevicesBucket) 120 | v := edb.Get([]byte(entry.Name)) 121 | var devices map[string]bool 122 | if v == nil { 123 | devices = make(map[string]bool) 124 | } else { 125 | rbuf := bytes.NewBuffer(v) 126 | dec := gob.NewDecoder(rbuf) 127 | dec.Decode(&devices) 128 | } 129 | devices[peer.String()] = true 130 | var dbuf bytes.Buffer 131 | enc = gob.NewEncoder(&dbuf) 132 | enc.Encode(devices) 133 | edb.Put([]byte(entry.Name), dbuf.Bytes()) 134 | 135 | /* add child lookup */ 136 | dir := path.Dir(entry.Name) 137 | clb := tx.Bucket(d.folderBucketKey).Bucket(childLookupBucket) 138 | v = clb.Get([]byte(dir)) 139 | if debug { 140 | l.Debugln("Adding child", entry.Name, "for dir", dir) 141 | } 142 | 143 | var children map[string]bool 144 | if v == nil { 145 | children = make(map[string]bool) 146 | } else { 147 | rbuf := bytes.NewBuffer(v) 148 | dec := gob.NewDecoder(rbuf) 149 | dec.Decode(&children) 150 | } 151 | children[entry.Name] = true 152 | 153 | var cbuf bytes.Buffer 154 | enc = gob.NewEncoder(&cbuf) 155 | enc.Encode(children) 156 | clb.Put([]byte(dir), cbuf.Bytes()) 157 | 158 | return nil 159 | }) 160 | } 161 | 162 | func (d *FileTreeCache) GetEntry(filepath string) (protocol.FileInfo, bool) { 163 | var entry protocol.FileInfo 164 | found := false 165 | 166 | d.db.View(func(tx *bolt.Tx) error { 167 | eb := tx.Bucket(d.folderBucketKey).Bucket(entriesBucket) 168 | v := eb.Get([]byte(filepath)) 169 | if v == nil { 170 | return nil 171 | } 172 | found = true 173 | buf := bytes.NewBuffer(v) 174 | dec := gob.NewDecoder(buf) 175 | dec.Decode(&entry) 176 | return nil 177 | }) 178 | 179 | return entry, found 180 | } 181 | 182 | func (d *FileTreeCache) GetEntryDevices(filepath string) ([]protocol.DeviceID, bool) { 183 | var devices []protocol.DeviceID 184 | found := false 185 | 186 | d.db.View(func(tx *bolt.Tx) error { 187 | edb := tx.Bucket(d.folderBucketKey).Bucket(entryDevicesBucket) 188 | d := edb.Get([]byte(filepath)) 189 | 190 | if d == nil { 191 | devices = make([]protocol.DeviceID, 0) 192 | } else { 193 | found = true 194 | var deviceMap map[string]bool 195 | rbuf := bytes.NewBuffer(d) 196 | dec := gob.NewDecoder(rbuf) 197 | dec.Decode(&deviceMap) 198 | 199 | devices = make([]protocol.DeviceID, len(deviceMap)) 200 | i := 0 201 | for k, _ := range deviceMap { 202 | devices[i], _ = protocol.DeviceIDFromString(k) 203 | i += 1 204 | } 205 | } 206 | 207 | return nil 208 | }) 209 | 210 | return devices, found 211 | } 212 | 213 | func (d *FileTreeCache) RemoveEntry(filepath string) { 214 | entries := d.GetChildren(filepath) 215 | for _, childPath := range entries { 216 | d.RemoveEntry(childPath) 217 | } 218 | 219 | d.db.Update(func(tx *bolt.Tx) error { 220 | // remove from entries 221 | eb := tx.Bucket(d.folderBucketKey).Bucket(entriesBucket) 222 | eb.Delete([]byte(filepath)) // TODO handle error? 223 | 224 | // remove devices 225 | db := tx.Bucket(d.folderBucketKey).Bucket(entryDevicesBucket) 226 | db.Delete([]byte(filepath)) 227 | 228 | // remove from children lookup 229 | dir := path.Dir(filepath) 230 | clb := tx.Bucket(d.folderBucketKey).Bucket(childLookupBucket) 231 | v := clb.Get([]byte(dir)) 232 | if v != nil { 233 | var children map[string]bool 234 | cbuf := bytes.NewBuffer(v) 235 | dec := gob.NewDecoder(cbuf) 236 | dec.Decode(&children) 237 | 238 | delete(children, filepath) 239 | 240 | var wbuf bytes.Buffer 241 | enc := gob.NewEncoder(&wbuf) 242 | enc.Encode(children) 243 | clb.Put([]byte(dir), wbuf.Bytes()) 244 | } else { 245 | l.Warnln("missing expected parent entry for", filepath) 246 | } 247 | 248 | return nil 249 | }) 250 | } 251 | 252 | func (d *FileTreeCache) GetChildren(path string) []string { 253 | var children []string 254 | d.db.View(func(tx *bolt.Tx) error { 255 | clb := tx.Bucket(d.folderBucketKey).Bucket(childLookupBucket) 256 | v := clb.Get([]byte(path)) 257 | 258 | if v != nil { 259 | var childrenMap map[string]bool 260 | cbuf := bytes.NewBuffer(v) 261 | dec := gob.NewDecoder(cbuf) 262 | dec.Decode(&childrenMap) 263 | 264 | children = make([]string, len(childrenMap)) 265 | i := 0 266 | for k, _ := range childrenMap { 267 | children[i] = k 268 | i += 1 269 | } 270 | } 271 | return nil 272 | }) 273 | 274 | if debug { 275 | l.Debugln("Found", len(children), "children for path", path) 276 | } 277 | 278 | return children 279 | } 280 | 281 | func (d *FileTreeCache) GetPathsMatchingPrefix(pathPrefix string) []string { 282 | result := make([]string, 0) 283 | 284 | prefixBase := path.Base(pathPrefix) 285 | prefixDir := path.Dir(pathPrefix) 286 | 287 | d.db.View(func(tx *bolt.Tx) error { 288 | edb := tx.Bucket(d.folderBucketKey).Bucket(entryDevicesBucket) 289 | edb.ForEach(func(key []byte, v []byte) error { 290 | if len(result) > 13 { 291 | return nil 292 | } 293 | 294 | candidatePath := string(key) 295 | candidateDir := path.Dir(candidatePath) 296 | if candidateDir == prefixDir { 297 | candidateBase := path.Base(candidatePath) 298 | if strings.HasPrefix(candidateBase, prefixBase) { 299 | result = append(result, candidatePath) 300 | } 301 | } 302 | 303 | return nil 304 | }) 305 | return nil 306 | }) 307 | 308 | return result 309 | } 310 | -------------------------------------------------------------------------------- /lib/model/debug.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | 7 | "github.com/calmh/logger" 8 | ) 9 | 10 | var ( 11 | debug = strings.Contains(os.Getenv("STTRACE"), "model") || os.Getenv("STTRACE") == "all" 12 | l = logger.DefaultLogger 13 | ) 14 | -------------------------------------------------------------------------------- /lib/model/model.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "bytes" 5 | "container/list" 6 | "crypto/sha256" 7 | b64 "encoding/base64" 8 | "errors" 9 | "fmt" 10 | "math/rand" 11 | "net" 12 | "os" 13 | "sort" 14 | "sync" 15 | "time" 16 | 17 | "github.com/boltdb/bolt" 18 | "github.com/burkemw3/syncthingfuse/lib/config" 19 | "github.com/burkemw3/syncthingfuse/lib/fileblockcache" 20 | "github.com/burkemw3/syncthingfuse/lib/filetreecache" 21 | "github.com/cznic/mathutil" 22 | human "github.com/dustin/go-humanize" 23 | "github.com/syncthing/syncthing/lib/connections" 24 | "github.com/syncthing/syncthing/lib/protocol" 25 | stsync "github.com/syncthing/syncthing/lib/sync" 26 | ) 27 | 28 | type Model struct { 29 | cfg *config.Wrapper 30 | db *bolt.DB 31 | pinnedFiles map[string][]string // read-only after initialization 32 | 33 | blockCaches map[string]*fileblockcache.FileBlockCache 34 | treeCaches map[string]*filetreecache.FileTreeCache 35 | folderDevices map[string][]protocol.DeviceID 36 | pulls map[string]map[string]*blockPullStatus 37 | fmut stsync.RWMutex // protects file information. must not be acquired after pmut 38 | 39 | pinnedList list.List 40 | lmut *sync.Cond // protects pull list. must not be acquired before fmut, nor after pmut 41 | 42 | protoConn map[protocol.DeviceID]connections.Connection 43 | pmut stsync.RWMutex // protects protoConn. must not be acquired before fmut 44 | } 45 | 46 | func NewModel(cfg *config.Wrapper, db *bolt.DB) *Model { 47 | var lmutex sync.Mutex 48 | m := &Model{ 49 | cfg: cfg, 50 | db: db, 51 | pinnedFiles: make(map[string][]string), 52 | 53 | blockCaches: make(map[string]*fileblockcache.FileBlockCache), 54 | treeCaches: make(map[string]*filetreecache.FileTreeCache), 55 | folderDevices: make(map[string][]protocol.DeviceID), 56 | pulls: make(map[string]map[string]*blockPullStatus), 57 | fmut: stsync.NewRWMutex(), 58 | 59 | lmut: sync.NewCond(&lmutex), 60 | 61 | protoConn: make(map[protocol.DeviceID]connections.Connection), 62 | pmut: stsync.NewRWMutex(), 63 | } 64 | 65 | for _, folderCfg := range m.cfg.Folders() { 66 | folder := folderCfg.ID 67 | 68 | fbc, err := fileblockcache.NewFileBlockCache(m.cfg, db, folderCfg) 69 | if err != nil { 70 | l.Warnln("Skipping folder", folder, "because fileblockcache init failed:", err) 71 | continue 72 | } 73 | m.blockCaches[folder] = fbc 74 | m.treeCaches[folder] = filetreecache.NewFileTreeCache(folderCfg, db, folder) 75 | 76 | m.folderDevices[folder] = make([]protocol.DeviceID, len(folderCfg.Devices)) 77 | for i, device := range folderCfg.Devices { 78 | m.folderDevices[folder][i] = device.DeviceID 79 | } 80 | 81 | m.pulls[folder] = make(map[string]*blockPullStatus) 82 | 83 | m.pinnedFiles[folder] = make([]string, len(folderCfg.PinnedFiles)) 84 | copy(m.pinnedFiles[folder], folderCfg.PinnedFiles) 85 | sort.Strings(m.pinnedFiles[folder]) 86 | m.unpinUnnecessaryBlocks(folder) 87 | } 88 | 89 | m.removeUnconfiguredFolders() 90 | 91 | for i := 0; i < 4; i++ { 92 | go m.backgroundPinnerRoutine() 93 | } 94 | 95 | return m 96 | } 97 | 98 | var ( 99 | errDeviceUnknown = errors.New("unknown device") 100 | ) 101 | 102 | func (m *Model) unpinUnnecessaryBlocks(folder string) { 103 | candidates := list.New() 104 | first, _ := m.treeCaches[folder].GetEntry("") 105 | candidates.PushBack(first) 106 | 107 | for candidates.Len() > 0 { 108 | el := candidates.Front() 109 | candidates.Remove(el) 110 | entry, _ := el.Value.(protocol.FileInfo) 111 | 112 | if false == m.isFilePinned(folder, entry.Name) { 113 | for _, block := range entry.Blocks { 114 | m.blockCaches[folder].UnpinBlock(block.Hash) 115 | } 116 | } 117 | 118 | if entry.IsDirectory() { 119 | children := m.treeCaches[folder].GetChildren(entry.Name) 120 | for _, child := range children { 121 | candidates.PushBack(child) 122 | } 123 | } 124 | } 125 | } 126 | 127 | func (m *Model) removeUnconfiguredFolders() { 128 | m.db.Update(func(tx *bolt.Tx) error { 129 | deletedFolders := make([]string, 0) 130 | 131 | tx.ForEach(func(name []byte, b *bolt.Bucket) error { 132 | folderName := string(name) 133 | if _, ok := m.blockCaches[folderName]; ok { 134 | return nil 135 | } 136 | 137 | // folder no longer in configuration, clean it out! 138 | 139 | if debug { 140 | l.Debugln("cleaning up deleted folder", folderName) 141 | } 142 | 143 | diskCacheFolder := fileblockcache.GetDiskCacheBasePath(m.cfg, folderName) 144 | err := os.RemoveAll(diskCacheFolder) 145 | if err != nil { 146 | l.Warnln("Cannot cleanup deleted folder", folderName, err) 147 | } 148 | 149 | deletedFolders = append(deletedFolders, folderName) 150 | 151 | return nil 152 | }) 153 | 154 | for _, deletedFolder := range deletedFolders { 155 | err := tx.DeleteBucket([]byte(deletedFolder)) 156 | if err != nil { 157 | l.Warnln("Cannot cleanup deleted folder's bucket", deletedFolder, err) 158 | } 159 | } 160 | 161 | return nil 162 | }) 163 | } 164 | 165 | // GetHello is called when we are about to connect to some remote device. 166 | func (m *Model) GetHello(protocol.DeviceID) protocol.HelloIntf { 167 | return &protocol.Hello{ 168 | DeviceName: m.cfg.MyDeviceConfiguration().Name, 169 | ClientName: "SyncthingFUSE", 170 | ClientVersion: "0.2.0", 171 | } 172 | } 173 | 174 | // OnHello is called when an device connects to us. 175 | // This allows us to extract some information from the Hello message 176 | // and add it to a list of known devices ahead of any checks. 177 | func (m *Model) OnHello(remoteID protocol.DeviceID, addr net.Addr, hello protocol.HelloResult) error { 178 | if _, ok := m.cfg.Devices()[remoteID]; ok { 179 | // The device exists 180 | return nil 181 | } 182 | 183 | return errDeviceUnknown 184 | } 185 | 186 | func (m *Model) AddConnection(conn connections.Connection, hello protocol.HelloResult) { 187 | deviceID := conn.ID() 188 | 189 | m.fmut.RLock() 190 | defer m.fmut.RUnlock() 191 | m.pmut.Lock() 192 | defer m.pmut.Unlock() 193 | 194 | if _, ok := m.protoConn[deviceID]; ok { 195 | // TODO syncthing replaces connections, possibly for relays, so we should to that 196 | panic("add existing device") 197 | } 198 | m.protoConn[deviceID] = conn 199 | 200 | device, ok := m.cfg.Devices()[deviceID] 201 | if ok && device.Name == "" { 202 | device.Name = hello.DeviceName 203 | m.cfg.SetDevice(device) 204 | m.cfg.Save() 205 | } 206 | 207 | conn.Start() 208 | 209 | // TODO how do we know the device is in our config and we should send cluster config? 210 | 211 | /* build and send cluster config */ 212 | cm := protocol.ClusterConfig{} 213 | 214 | for folderName, devices := range m.folderDevices { 215 | found := false 216 | for _, device := range devices { 217 | if device == deviceID { 218 | found = true 219 | break 220 | } 221 | } 222 | if false == found { 223 | continue 224 | } 225 | 226 | cr := protocol.Folder{ 227 | ID: folderName, 228 | } 229 | for _, device := range devices { 230 | deviceCfg := m.cfg.Devices()[device] 231 | cn := protocol.Device{ 232 | ID: device, 233 | Name: deviceCfg.Name, 234 | Addresses: deviceCfg.Addresses, 235 | Compression: deviceCfg.Compression, 236 | CertName: deviceCfg.CertName, 237 | } 238 | cr.Devices = append(cr.Devices, cn) 239 | } 240 | 241 | cm.Folders = append(cm.Folders, cr) 242 | } 243 | 244 | conn.ClusterConfig(cm) 245 | } 246 | 247 | func (m *Model) ConnectedTo(deviceID protocol.DeviceID) bool { 248 | m.pmut.RLock() 249 | _, ok := m.protoConn[deviceID] 250 | m.pmut.RUnlock() 251 | return ok 252 | } 253 | 254 | func (m *Model) IsPaused(deviceID protocol.DeviceID) bool { 255 | return false 256 | } 257 | 258 | func (m *Model) GetFolders() []string { 259 | m.fmut.RLock() 260 | folders := make([]string, 0, len(m.treeCaches)) 261 | for k := range m.treeCaches { 262 | folders = append(folders, k) 263 | } 264 | m.fmut.RUnlock() 265 | return folders 266 | } 267 | 268 | func (m *Model) HasFolder(folder string) bool { 269 | result := false 270 | m.fmut.RLock() 271 | if _, ok := m.treeCaches[folder]; ok { 272 | result = true 273 | } 274 | m.fmut.RUnlock() 275 | return result 276 | } 277 | 278 | func (m *Model) GetPathsMatchingPrefix(folderID string, pathPrefix string) []string { 279 | m.fmut.RLock() 280 | defer m.fmut.RUnlock() 281 | 282 | if ftc, ok := m.treeCaches[folderID]; ok { 283 | return ftc.GetPathsMatchingPrefix(pathPrefix) 284 | } 285 | 286 | l.Debugln("no tree cache for", folderID) 287 | 288 | return make([]string, 0) 289 | } 290 | 291 | func (m *Model) GetEntry(folder string, path string) (protocol.FileInfo, bool) { 292 | m.fmut.RLock() 293 | defer m.fmut.RUnlock() 294 | 295 | return m.treeCaches[folder].GetEntry(path) 296 | } 297 | 298 | func (m *Model) GetFileData(folder string, filepath string, readStart int64, readSize int) ([]byte, error) { 299 | start := time.Now() 300 | 301 | m.fmut.Lock() 302 | if debug { 303 | flet := time.Now() 304 | dur := flet.Sub(start).Seconds() 305 | l.Debugln("Read for", folder, filepath, readStart, readSize, "Lock took", dur) 306 | } 307 | m.pmut.RLock() 308 | 309 | entry, found := m.treeCaches[folder].GetEntry(filepath) 310 | if false == found { 311 | l.Warnln("File not found", folder, filepath) 312 | return []byte(""), protocol.ErrNoSuchFile 313 | } 314 | 315 | data := make([]byte, readSize) 316 | readEnd := readStart + int64(readSize) 317 | pendingBlocks := make([]pendingBlockRead, 0) 318 | fbc := m.blockCaches[folder] 319 | 320 | m.pmut.RLock() 321 | defer m.pmut.RUnlock() 322 | 323 | // create workers for pulling 324 | for i, block := range entry.Blocks { 325 | blockStart := int64(i * protocol.BlockSize) 326 | blockEnd := blockStart + int64(block.Size) 327 | 328 | if blockEnd > readStart { 329 | if blockStart < readEnd { 330 | // need this block 331 | blockData, found := fbc.GetCachedBlockData(block.Hash) 332 | if found { 333 | copyBlockData(blockData, readStart, blockStart, readEnd, blockEnd, data) 334 | } else { 335 | // pull block 336 | pendingBlock := pendingBlockRead{ 337 | readStart: readStart, 338 | blockStart: blockStart, 339 | readEnd: readEnd, 340 | blockEnd: blockEnd, 341 | blockPullStatus: m.getOrCreatePullStatus("Fetch", folder, filepath, block, blockStart, assigned), 342 | } 343 | pendingBlocks = append(pendingBlocks, pendingBlock) 344 | } 345 | } else if blockStart < readEnd+protocol.BlockSize { 346 | if false == fbc.HasCachedBlockData(block.Hash) && false == fbc.HasPinnedBlock(block.Hash) { 347 | // prefetch this block 348 | m.getOrCreatePullStatus("Prefetch", folder, filepath, block, blockStart, assigned) 349 | } 350 | } 351 | } 352 | } 353 | 354 | m.fmut.Unlock() 355 | m.pmut.RUnlock() 356 | 357 | // wait for needed blocks 358 | for _, pendingBlock := range pendingBlocks { 359 | pendingBlock.blockPullStatus.cv.L.Lock() 360 | for done != pendingBlock.blockPullStatus.state { 361 | pendingBlock.blockPullStatus.cv.Wait() 362 | } 363 | pendingBlock.blockPullStatus.cv.L.Unlock() 364 | pendingBlock.blockPullStatus.mutex.RLock() 365 | if pendingBlock.blockPullStatus.error != nil { 366 | return []byte(""), pendingBlock.blockPullStatus.error 367 | } 368 | copyBlockData(pendingBlock.blockPullStatus.data, pendingBlock.readStart, pendingBlock.blockStart, pendingBlock.readEnd, pendingBlock.blockEnd, data) 369 | pendingBlock.blockPullStatus.mutex.RUnlock() 370 | } 371 | 372 | if debug { 373 | end := time.Now() 374 | fullDur := end.Sub(start).Seconds() 375 | l.Debugln("Read for", folder, filepath, readStart, readSize, "completed", fullDur) 376 | } 377 | 378 | return data, nil 379 | } 380 | 381 | func copyBlockData(blockData []byte, readStart int64, blockStart int64, readEnd int64, blockEnd int64, data []byte) { 382 | for j := mathutil.MaxInt64(readStart, blockStart); j < readEnd && j < blockEnd; j++ { 383 | outputItr := j - readStart 384 | inputItr := j - blockStart 385 | 386 | data[outputItr] = blockData[inputItr] 387 | } 388 | } 389 | 390 | type pendingBlockRead struct { 391 | readStart int64 392 | blockStart int64 393 | readEnd int64 394 | blockEnd int64 395 | blockPullStatus *blockPullStatus 396 | } 397 | 398 | type blockPullState int 399 | 400 | const ( 401 | queued blockPullState = iota 402 | assigned 403 | done 404 | ) 405 | 406 | type blockPullStatus struct { 407 | comment string 408 | folder string 409 | file string 410 | block protocol.BlockInfo 411 | offset int64 412 | state blockPullState 413 | data []byte 414 | error error 415 | mutex *sync.RWMutex 416 | cv *sync.Cond // protects this data structure. cannot be acquired before any global locks (e.g. fmut) 417 | } 418 | 419 | // requires fmut write lock and pmut read lock (or better) before entry 420 | func (m *Model) getOrCreatePullStatus(comment string, folder string, file string, block protocol.BlockInfo, offset int64, state blockPullState) *blockPullStatus { 421 | hash := b64.URLEncoding.EncodeToString(block.Hash) 422 | 423 | pullStatus, ok := m.pulls[folder][hash] 424 | if ok { 425 | return pullStatus 426 | } 427 | 428 | var mutex sync.RWMutex 429 | pullStatus = &blockPullStatus{ 430 | comment: comment, 431 | folder: folder, 432 | file: file, 433 | block: block, 434 | offset: offset, 435 | state: state, 436 | mutex: &mutex, 437 | cv: sync.NewCond(&mutex), 438 | } 439 | 440 | m.pulls[folder][hash] = pullStatus 441 | 442 | if assigned == state { 443 | go m.pullBlock(pullStatus, true) 444 | } 445 | 446 | return pullStatus 447 | } 448 | 449 | func (m *Model) backgroundPinnerRoutine() { 450 | var status *blockPullStatus 451 | 452 | for { 453 | m.lmut.L.Lock() 454 | for 0 == m.pinnedList.Len() { 455 | m.lmut.Wait() 456 | } 457 | el := m.pinnedList.Front() 458 | m.pinnedList.Remove(el) 459 | status, _ = el.Value.(*blockPullStatus) 460 | m.lmut.L.Unlock() 461 | 462 | m.fmut.Lock() 463 | status.mutex.RLock() 464 | if m.isBlockStillNeeded(status) { 465 | if m.blockCaches[status.folder].HasCachedBlockData(status.block.Hash) { 466 | m.blockCaches[status.folder].PinExistingBlock(status.block) 467 | } else { 468 | m.fmut.Unlock() 469 | status.mutex.RUnlock() 470 | 471 | m.pullBlock(status, false) 472 | 473 | m.fmut.Lock() 474 | status.mutex.RLock() 475 | if m.isBlockStillNeeded(status) { 476 | m.blockCaches[status.folder].PinNewBlock(status.block, status.data) 477 | } 478 | } 479 | } 480 | m.fmut.Unlock() 481 | status.mutex.RUnlock() 482 | } 483 | } 484 | 485 | // requires read locks or better on fmut and status.cv.L 486 | func (m *Model) isBlockStillNeeded(status *blockPullStatus) bool { 487 | entry, found := m.treeCaches[status.folder].GetEntry(status.file) 488 | if false == found { 489 | return false 490 | } 491 | 492 | for i, block := range entry.Blocks { 493 | blockStart := int64(i * protocol.BlockSize) 494 | if blockStart == status.offset && bytes.Equal(block.Hash, status.block.Hash) { 495 | return true 496 | } 497 | } 498 | 499 | return false 500 | } 501 | 502 | func (m *Model) pullBlock(status *blockPullStatus, addToCache bool) { 503 | m.fmut.RLock() 504 | m.pmut.RLock() 505 | status.cv.L.Lock() 506 | 507 | requestError := errors.New("can't get block from any devices") 508 | 509 | if done != status.state { 510 | devices, _ := m.treeCaches[status.folder].GetEntryDevices(status.file) 511 | conns := make([]connections.Connection, 0) 512 | for _, deviceIndex := range rand.Perm(len(devices)) { 513 | deviceWithFile := devices[deviceIndex] 514 | if conn, ok := m.protoConn[deviceWithFile]; ok { 515 | conns = append(conns, conn) 516 | } 517 | } 518 | m.fmut.RUnlock() 519 | m.pmut.RUnlock() 520 | 521 | if debug { 522 | l.Debugln(status.comment, "block at offset", status.offset, "size", status.block.Size, "for", status.folder, status.file) 523 | } 524 | 525 | var requestedData []byte 526 | 527 | for _, conn := range conns { 528 | if debug { 529 | l.Debugln("Trying to fetch block at offset", status.offset, "for", status.folder, status.file, "from device", conn.ID().String()[:5]) 530 | } 531 | 532 | requestedData, requestError = conn.Request(status.folder, status.file, status.offset, int(status.block.Size), status.block.Hash, false) 533 | if requestError == nil { 534 | // check hash 535 | actualHash := sha256.Sum256(requestedData) 536 | if bytes.Equal(actualHash[:], status.block.Hash) { 537 | break 538 | } else { 539 | requestError = errors.New(fmt.Sprint("Hash mismatch expected", status.block.Hash, "received", actualHash)) 540 | } 541 | } 542 | } 543 | 544 | status.state = done 545 | status.error = requestError 546 | status.data = requestedData 547 | 548 | status.cv.Broadcast() 549 | } else { 550 | m.fmut.RUnlock() 551 | m.pmut.RUnlock() 552 | } 553 | 554 | status.cv.L.Unlock() 555 | 556 | m.fmut.Lock() 557 | status.mutex.RLock() 558 | hash := b64.URLEncoding.EncodeToString(status.block.Hash) 559 | if requestError == nil && addToCache { 560 | m.blockCaches[status.folder].AddCachedFileData(status.block, status.data) 561 | } 562 | delete(m.pulls[status.folder], hash) 563 | m.fmut.Unlock() 564 | status.mutex.RUnlock() 565 | } 566 | 567 | func (m *Model) GetChildren(folder string, path string) []protocol.FileInfo { 568 | m.fmut.RLock() 569 | 570 | // TODO assert is directory? 571 | 572 | entries := m.treeCaches[folder].GetChildren(path) 573 | result := make([]protocol.FileInfo, len(entries)) 574 | for i, childPath := range entries { 575 | result[i], _ = m.treeCaches[folder].GetEntry(childPath) 576 | } 577 | 578 | m.fmut.RUnlock() 579 | 580 | return result 581 | } 582 | 583 | // required fmut read (or better) lock before entry 584 | func (m *Model) isFolderSharedWithDevice(folder string, deviceID protocol.DeviceID) bool { 585 | for _, device := range m.folderDevices[folder] { 586 | if device.Equals(deviceID) { 587 | return true 588 | } 589 | } 590 | return false 591 | } 592 | 593 | func (m *Model) isFilePinned(folder string, filename string) bool { 594 | pins := m.pinnedFiles[folder] 595 | i := sort.SearchStrings(pins, filename) 596 | 597 | return i < len(pins) && pins[i] == filename 598 | } 599 | 600 | // An index was received from the peer device 601 | func (m *Model) Index(deviceID protocol.DeviceID, folder string, files []protocol.FileInfo) { 602 | if debug { 603 | l.Debugln("model: receiving index from device", deviceID.String()[:5], "for folder", folder) 604 | } 605 | 606 | m.fmut.Lock() 607 | defer m.fmut.Unlock() 608 | m.lmut.L.Lock() 609 | defer m.lmut.L.Unlock() 610 | 611 | if false == m.isFolderSharedWithDevice(folder, deviceID) { 612 | if debug { 613 | l.Debugln("model:", deviceID.String()[:5], "not shared with folder", folder, "so ignoring") 614 | } 615 | return 616 | } 617 | 618 | m.updateIndex(deviceID, folder, files) 619 | } 620 | 621 | // An index update was received from the peer device 622 | func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, files []protocol.FileInfo) { 623 | if debug { 624 | l.Debugln("model: receiving index update from device", deviceID.String()[:5], "for folder", folder) 625 | } 626 | 627 | m.fmut.Lock() 628 | defer m.fmut.Unlock() 629 | m.lmut.L.Lock() 630 | defer m.lmut.L.Unlock() 631 | 632 | if false == m.isFolderSharedWithDevice(folder, deviceID) { 633 | if debug { 634 | l.Debugln("model:", deviceID.String()[:5], "not shared with folder", folder, "so ignoring") 635 | } 636 | return 637 | } 638 | 639 | m.updateIndex(deviceID, folder, files) 640 | } 641 | 642 | // requires write locks on fmut and lmut before entry 643 | func (m *Model) updateIndex(deviceID protocol.DeviceID, folder string, files []protocol.FileInfo) { 644 | treeCache, ok := m.treeCaches[folder] 645 | if !ok { 646 | if debug { 647 | l.Debugln("folder", folder, "from", deviceID.String()[:5], "tree not configured, skipping") 648 | } 649 | return 650 | } 651 | fbc, ok := m.blockCaches[folder] 652 | if !ok { 653 | if debug { 654 | l.Debugln("folder", folder, "from", deviceID.String()[:5], "block not configured, skipping") 655 | } 656 | return 657 | } 658 | 659 | for _, file := range files { 660 | entry, existsInLocalModel := treeCache.GetEntry(file.Name) 661 | 662 | var globalToLocal protocol.Ordering 663 | if existsInLocalModel { 664 | globalToLocal = file.Version.Compare(entry.Version) 665 | } 666 | 667 | if debug { 668 | l.Debugln("updating entry for", file.Name, "from", deviceID.String()[:5], existsInLocalModel, globalToLocal) 669 | } 670 | 671 | // remove if necessary 672 | if existsInLocalModel && (globalToLocal == protocol.Greater || (file.Version.Concurrent(entry.Version) && file.WinsConflict(entry))) { 673 | if debug { 674 | l.Debugln("remove entry for", file.Name, "from", deviceID.String()[:5]) 675 | } 676 | 677 | treeCache.RemoveEntry(file.Name) 678 | 679 | if m.isFilePinned(folder, file.Name) { 680 | for _, block := range entry.Blocks { 681 | fbc.UnpinBlock(block.Hash) 682 | } 683 | } 684 | } 685 | 686 | // add if necessary 687 | if !existsInLocalModel || (globalToLocal == protocol.Greater || (file.Version.Concurrent(entry.Version) && file.WinsConflict(entry))) || (globalToLocal == protocol.Equal) { 688 | if file.IsDeleted() { 689 | if debug { 690 | l.Debugln("peer", deviceID.String()[:5], "has deleted file, doing nothing", file.Name) 691 | } 692 | continue 693 | } 694 | if file.IsInvalid() { 695 | if debug { 696 | l.Debugln("peer", deviceID.String()[:5], "has invalid file, doing nothing", file.Name) 697 | } 698 | continue 699 | } 700 | if file.IsSymlink() { 701 | if debug { 702 | l.Debugln("peer", deviceID.String()[:5], "has symlink, doing nothing", file.Name) 703 | } 704 | continue 705 | } 706 | 707 | if debug && file.IsDirectory() { 708 | l.Debugln("add directory", file.Name, "from", deviceID.String()[:5]) 709 | } else if debug { 710 | l.Debugln("add file", file.Name, "from", deviceID.String()[:5]) 711 | } 712 | 713 | treeCache.AddEntry(file, deviceID) 714 | 715 | // trigger pull on unsatisfied blocks for pinned files 716 | if m.isFilePinned(folder, file.Name) { 717 | for i, block := range file.Blocks { 718 | if false == fbc.HasPinnedBlock(block.Hash) { 719 | blockStart := int64(i * protocol.BlockSize) 720 | status := m.getOrCreatePullStatus("Pin fetch", folder, file.Name, block, blockStart, queued) 721 | m.pinnedList.PushBack(status) 722 | } 723 | } 724 | } 725 | } 726 | } 727 | 728 | m.lmut.Broadcast() 729 | } 730 | 731 | // A request was made by the peer device 732 | func (m *Model) Request(deviceID protocol.DeviceID, folder string, name string, offset int64, hash []byte, fromTemporary bool, buf []byte) error { 733 | return protocol.ErrNoSuchFile 734 | } 735 | 736 | // A cluster configuration message was received 737 | func (m *Model) ClusterConfig(deviceID protocol.DeviceID, config protocol.ClusterConfig) { 738 | if debug { 739 | l.Debugln("model: receiving cluster config from device", deviceID.String()[:5]) 740 | } 741 | } 742 | 743 | func (m *Model) DownloadProgress(device protocol.DeviceID, folder string, updates []protocol.FileDownloadProgressUpdate) { 744 | // no-op for us! 745 | } 746 | 747 | // The peer device closed the connection 748 | func (m *Model) Closed(conn protocol.Connection, err error) { 749 | deviceID := conn.ID() 750 | 751 | m.pmut.Lock() 752 | delete(m.protoConn, deviceID) 753 | m.pmut.Unlock() 754 | } 755 | 756 | func (m *Model) GetPinsStatusByFolder() map[string]string { 757 | result := make(map[string]string) 758 | 759 | m.fmut.RLock() 760 | defer m.fmut.RUnlock() 761 | 762 | for fldr, files := range m.pinnedFiles { 763 | pendingBytes := uint64(0) 764 | pendingFileCount := 0 765 | pinnedBytes := uint64(0) 766 | pinnedFileCount := 0 767 | fbc := m.blockCaches[fldr] 768 | tc := m.treeCaches[fldr] 769 | 770 | for _, file := range files { 771 | pending := false 772 | 773 | fileEntry, _ := tc.GetEntry(file) 774 | for _, block := range fileEntry.Blocks { 775 | if false == fbc.HasPinnedBlock(block.Hash) { 776 | pending = true 777 | pendingBytes += uint64(block.Size) 778 | } else { 779 | pinnedBytes += uint64(block.Size) 780 | } 781 | } 782 | 783 | if pending { 784 | pendingFileCount += 1 785 | } else { 786 | pinnedFileCount += 1 787 | } 788 | } 789 | 790 | if pendingFileCount > 0 { 791 | pendingByteComment := human.Bytes(pendingBytes) 792 | fileLabel := "files" 793 | if pendingFileCount == 1 { 794 | fileLabel = "file" 795 | } 796 | result[fldr] = fmt.Sprintf("%d %s (%s) pending", pendingFileCount, fileLabel, pendingByteComment) 797 | } else { 798 | if pinnedFileCount > 0 { 799 | pinnedByteComment := human.Bytes(pinnedBytes) 800 | fileLabel := "files" 801 | if pinnedFileCount == 1 { 802 | fileLabel = "file" 803 | } 804 | result[fldr] = fmt.Sprintf("%d %s (%s) pinned", pinnedFileCount, fileLabel, pinnedByteComment) 805 | } 806 | } 807 | } 808 | 809 | return result 810 | } 811 | 812 | type ConnectionInfo struct { 813 | DeviceID string 814 | Address string 815 | } 816 | 817 | func (m *Model) GetConnections() []ConnectionInfo { 818 | m.pmut.RLock() 819 | defer m.pmut.RUnlock() 820 | 821 | connections := make([]ConnectionInfo, 0) 822 | for _, conn := range m.protoConn { 823 | ci := ConnectionInfo{ 824 | DeviceID: conn.ID().String(), 825 | Address: conn.RemoteAddr().String(), 826 | } 827 | connections = append(connections, ci) 828 | } 829 | 830 | return connections 831 | } 832 | -------------------------------------------------------------------------------- /lib/model/model_test.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "io/ioutil" 5 | "os" 6 | "path" 7 | "testing" 8 | 9 | "github.com/boltdb/bolt" 10 | "github.com/burkemw3/syncthingfuse/lib/config" 11 | stconfig "github.com/syncthing/syncthing/lib/config" 12 | "github.com/syncthing/syncthing/lib/protocol" 13 | ) 14 | 15 | var ( 16 | deviceAlice, _ = protocol.DeviceIDFromString("AIR6LPZ-7K4PTTV-UXQSMUU-CPQ5YWH-OEDFIIQ-JUG777G-2YQXXR5-YD6AWQR") 17 | deviceBob, _ = protocol.DeviceIDFromString("GYRZZQB-IRNPV4Z-T7TC52W-EQYJ3TT-FDQW6MW-DFLMU42-SSSU6EM-FBK2VAY") 18 | deviceCarol, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU") 19 | ) 20 | 21 | func TestModelSingleIndex(t *testing.T) { 22 | // init 23 | dir, _ := ioutil.TempDir("", "stf-mt") 24 | defer os.RemoveAll(dir) 25 | 26 | cfg, database, folder := setup(deviceAlice, dir, deviceBob) 27 | 28 | // Arrange 29 | model := NewModel(cfg, database) 30 | 31 | files := []protocol.FileInfo{ 32 | protocol.FileInfo{Name: "file1"}, 33 | protocol.FileInfo{Name: "file2"}, 34 | protocol.FileInfo{Name: "dir1", Type: protocol.FileInfoTypeDirectory}, 35 | protocol.FileInfo{Name: "dir1/dirfile1"}, 36 | protocol.FileInfo{Name: "dir1/dirfile2"}, 37 | } 38 | 39 | // Act 40 | model.Index(deviceBob, folder, files) 41 | 42 | // Assert 43 | children := model.GetChildren(folder, ".") 44 | assertContainsChild(t, children, "file2", 0) 45 | assertContainsChild(t, children, "file2", 0) 46 | assertContainsChild(t, children, "dir1", protocol.FileInfoTypeDirectory) 47 | if len(children) != 3 { 48 | t.Error("expected 3 children, but got", len(children)) 49 | } 50 | 51 | children = model.GetChildren(folder, "dir1") 52 | assertContainsChild(t, children, "dir1/dirfile1", 0) 53 | assertContainsChild(t, children, "dir1/dirfile2", 0) 54 | if len(children) != 2 { 55 | t.Error("expected 2 children, but got", len(children)) 56 | } 57 | 58 | assertEntry(t, model, folder, "file1", 0) 59 | assertEntry(t, model, folder, "file2", 0) 60 | assertEntry(t, model, folder, "dir1", protocol.FileInfoTypeDirectory) 61 | assertEntry(t, model, folder, "dir1/dirfile1", 0) 62 | assertEntry(t, model, folder, "dir1/dirfile2", 0) 63 | } 64 | 65 | func TestIndexFromUnsharedPeerIgnored(t *testing.T) { 66 | // init 67 | dir, _ := ioutil.TempDir("", "stf-mt") 68 | defer os.RemoveAll(dir) 69 | cfg, database, folder := setup(deviceAlice, dir, deviceBob) 70 | 71 | // Arrange 72 | model := NewModel(cfg, database) 73 | 74 | files := []protocol.FileInfo{ 75 | protocol.FileInfo{Name: "file1"}, 76 | } 77 | 78 | // Act 79 | model.Index(deviceCarol, folder, files) 80 | 81 | // Assert 82 | children := model.GetChildren(folder, ".") 83 | if len(children) != 0 { 84 | t.Error("expected 0 children, but got", len(children)) 85 | } 86 | 87 | _, found := model.GetEntry(folder, files[0].Name) 88 | if found { 89 | t.Error("expected unfound file, but found", files[0].Name) 90 | } 91 | } 92 | 93 | func TestPeerRemovedRestart(t *testing.T) { 94 | // init 95 | dir, _ := ioutil.TempDir("", "stf-mt") 96 | defer os.RemoveAll(dir) 97 | cfg, database, folder := setup(deviceAlice, dir, deviceBob, deviceCarol) 98 | 99 | // Arrange 100 | model := NewModel(cfg, database) 101 | 102 | files := []protocol.FileInfo{ 103 | protocol.FileInfo{Name: "file1"}, 104 | } 105 | model.Index(deviceBob, folder, files) 106 | 107 | files = []protocol.FileInfo{ 108 | protocol.FileInfo{Name: "file2"}, 109 | } 110 | model.Index(deviceCarol, folder, files) 111 | 112 | // Act 113 | cfg.Raw().Folders[0].Devices = []stconfig.FolderDeviceConfiguration{ 114 | stconfig.FolderDeviceConfiguration{DeviceID: deviceCarol}, 115 | } 116 | model = NewModel(cfg, database) 117 | 118 | // Assert 119 | children := model.GetChildren(folder, ".") 120 | assertContainsChild(t, children, "file2", 0) 121 | if len(children) != 1 { 122 | t.Error("expected 1 children, but got", len(children)) 123 | } 124 | 125 | assertEntry(t, model, folder, "file2", 0) 126 | } 127 | 128 | func TestModelIndexWithRestart(t *testing.T) { 129 | // init 130 | dir, _ := ioutil.TempDir("", "stf-mt") 131 | defer os.RemoveAll(dir) 132 | cfg, database, folder := setup(deviceAlice, dir, deviceBob) 133 | 134 | // Arrange 135 | model := NewModel(cfg, database) 136 | 137 | files := []protocol.FileInfo{ 138 | protocol.FileInfo{Name: "file1"}, 139 | protocol.FileInfo{Name: "file2"}, 140 | protocol.FileInfo{Name: "dir1", Type: protocol.FileInfoTypeDirectory}, 141 | protocol.FileInfo{Name: "dir1/dirfile1"}, 142 | protocol.FileInfo{Name: "dir1/dirfile2"}, 143 | } 144 | 145 | model.Index(deviceBob, folder, files) 146 | 147 | // Act (restart db and model) 148 | databasePath := database.Path() 149 | database.Close() 150 | database, _ = bolt.Open(databasePath, 0600, nil) 151 | model = NewModel(cfg, database) 152 | 153 | // Assert 154 | children := model.GetChildren(folder, ".") 155 | assertContainsChild(t, children, "file2", 0) 156 | assertContainsChild(t, children, "file2", 0) 157 | assertContainsChild(t, children, "dir1", protocol.FileInfoTypeDirectory) 158 | if len(children) != 3 { 159 | t.Error("expected 3 children, but got", len(children)) 160 | } 161 | 162 | children = model.GetChildren(folder, "dir1") 163 | assertContainsChild(t, children, "dir1/dirfile1", 0) 164 | assertContainsChild(t, children, "dir1/dirfile2", 0) 165 | if len(children) != 2 { 166 | t.Error("expected 2 children, but got", len(children)) 167 | } 168 | 169 | assertEntry(t, model, folder, "file1", 0) 170 | assertEntry(t, model, folder, "file2", 0) 171 | assertEntry(t, model, folder, "dir1", protocol.FileInfoTypeDirectory) 172 | assertEntry(t, model, folder, "dir1/dirfile1", 0) 173 | assertEntry(t, model, folder, "dir1/dirfile2", 0) 174 | } 175 | 176 | func TestModelSingleIndexUpdate(t *testing.T) { 177 | // init 178 | dir, _ := ioutil.TempDir("", "stf-mt") 179 | defer os.RemoveAll(dir) 180 | cfg, database, folder := setup(deviceAlice, dir, deviceBob) 181 | 182 | // Arrange 183 | model := NewModel(cfg, database) 184 | 185 | version := protocol.Vector{Counters: []protocol.Counter{{1, 0}}} 186 | 187 | files := []protocol.FileInfo{ 188 | protocol.FileInfo{Name: "unchangedFile", Version: version}, 189 | protocol.FileInfo{Name: "file2dir", Version: version}, 190 | protocol.FileInfo{Name: "removedFile", Version: version}, 191 | protocol.FileInfo{Name: "dir1", Type: protocol.FileInfoTypeDirectory, Version: version}, 192 | protocol.FileInfo{Name: "dir1/dirfile1", Version: version}, 193 | protocol.FileInfo{Name: "dir1/dirfile2", Version: version}, 194 | protocol.FileInfo{Name: "dir2file", Type: protocol.FileInfoTypeDirectory, Version: version}, 195 | protocol.FileInfo{Name: "dir2file/file1", Version: version}, 196 | protocol.FileInfo{Name: "dir2file/file2", Version: version}, 197 | protocol.FileInfo{Name: "file2symlink", Version: version}, 198 | } 199 | model.Index(deviceBob, folder, files) 200 | 201 | // Act 202 | version = protocol.Vector{Counters: []protocol.Counter{{1, 1}}} 203 | files = []protocol.FileInfo{ 204 | protocol.FileInfo{Name: "file2dir", Type: protocol.FileInfoTypeDirectory, Version: version}, 205 | protocol.FileInfo{Name: "removedFile", Deleted: true, Version: version}, 206 | protocol.FileInfo{Name: "dir2file", Version: version}, 207 | protocol.FileInfo{Name: "dir2file/file1", Deleted: true, Version: version}, 208 | protocol.FileInfo{Name: "file2symlink", Type: protocol.FileInfoTypeSymlinkFile, Version: version}, 209 | } 210 | model.IndexUpdate(deviceBob, folder, files) 211 | 212 | // Assert 213 | children := model.GetChildren(folder, ".") 214 | assertContainsChild(t, children, "unchangedFile", 0) 215 | assertContainsChild(t, children, "file2dir", protocol.FileInfoTypeDirectory) 216 | assertContainsChild(t, children, "dir1", protocol.FileInfoTypeDirectory) 217 | assertContainsChild(t, children, "dir2file", 0) 218 | if len(children) != 4 { 219 | t.Error("expected 4 children, but got", len(children)) 220 | } 221 | 222 | children = model.GetChildren(folder, "dir1") 223 | assertContainsChild(t, children, "dir1/dirfile1", 0) 224 | assertContainsChild(t, children, "dir1/dirfile2", 0) 225 | if len(children) != 2 { 226 | t.Error("expected 2 children, but got", len(children)) 227 | } 228 | 229 | assertEntry(t, model, folder, "unchangedFile", 0) 230 | assertEntry(t, model, folder, "file2dir", protocol.FileInfoTypeDirectory) 231 | assertEntry(t, model, folder, "dir1", protocol.FileInfoTypeDirectory) 232 | assertEntry(t, model, folder, "dir1/dirfile1", 0) 233 | assertEntry(t, model, folder, "dir1/dirfile2", 0) 234 | assertEntry(t, model, folder, "dir2file", 0) 235 | } 236 | 237 | func assertContainsChild(t *testing.T, children []protocol.FileInfo, name string, infoType protocol.FileInfoType) { 238 | for _, child := range children { 239 | if child.Name == name && child.Type == infoType { 240 | return 241 | } 242 | } 243 | 244 | t.Error("Missing file", name) 245 | } 246 | 247 | func assertEntry(t *testing.T, model *Model, folder string, name string, infoType protocol.FileInfoType) { 248 | entry, found := model.GetEntry(folder, name) 249 | 250 | if false == found { 251 | t.Error("file expected, but not found:", name) 252 | return 253 | } 254 | 255 | if entry.Name == name && entry.Type == infoType { 256 | return 257 | } 258 | 259 | t.Error("incorrect entry for file", name) 260 | } 261 | 262 | func setup(deviceID protocol.DeviceID, dir string, peers ...protocol.DeviceID) (*config.Wrapper, *bolt.DB, string) { 263 | configFile, _ := ioutil.TempFile(dir, "config") 264 | realCfg := config.New(deviceID, deviceID.String()[:5]) 265 | cfg := config.Wrap(configFile.Name(), realCfg) 266 | 267 | databasePath := path.Join(path.Dir(cfg.ConfigPath()), "boltdb") 268 | database, _ := bolt.Open(databasePath, 0600, nil) 269 | 270 | folder := "syncthingfusetest" 271 | folderCfg := config.FolderConfiguration{ 272 | ID: folder, 273 | CacheSize: "1MiB", 274 | Devices: make([]stconfig.FolderDeviceConfiguration, len(peers)), 275 | } 276 | for i, peer := range peers { 277 | folderCfg.Devices[i] = stconfig.FolderDeviceConfiguration{DeviceID: peer} 278 | } 279 | cfg.SetFolder(folderCfg) 280 | 281 | return cfg, database, folder 282 | } 283 | -------------------------------------------------------------------------------- /old-status.md: -------------------------------------------------------------------------------- 1 | This file is outdated, but reserved for future development fodder. Focus on github issues instead. 2 | --- 3 | 4 | A special FUSE client for Syncthing 5 | 6 | - doesn't try to stay in full sync, just cache files locally when they are read 7 | - read-only currently 8 | 9 | Filled with lots of crappy code, for now :( 10 | 11 | TODO 12 | ---- 13 | 14 | - manage configuration (web GUI?) 15 | - handle errors 16 | - move todo to github issues 17 | - Figure out releasing, installing, configuring, updating, etc 18 | - test on linux 19 | - base on recent ST Prime code 20 | - Why is linux slower? platters? 21 | - show connection status in gui 22 | - undo actions in UI 23 | - FUSE 24 | - should probably prevent spotlight indexing with metadata_never_index. (spotlight might not work anyway https://github.com/osxfuse/osxfuse/wiki/FAQ#46-can-i-enable-spotlight-on-a-fuse-for-os-x-file-system) 25 | - support symlinks 26 | - would be nice to allow some files to be indexed. maybe we can detect the spotlight process and index conditionally 27 | - show status information in special FUSE files 28 | - track cache statistics 29 | - prefetch data blocks, based on patterns 30 | - file blocks in order 31 | - files in a folder in an order (name, size, mod date, etc) 32 | - switch to LRU-2Q file cache 33 | - Pin files for offline 34 | - Support writes. 35 | - restart from UI/API 36 | - upnp? 37 | - manage Unified Buffer Cache 38 | - OSX caches files! probably good, hard to update correctly 39 | - https://github.com/osxfuse/osxfuse/wiki/SSHFS#frequently-asked-questions 40 | - http://wagerlabs.com/blog/2008/03/04/hacking-the-mac-osx-unified-buffer-cache/ 41 | 42 | Decisions 43 | --------- 44 | 45 | - FUSE: likely Bazil FUSE over alternatives like hanwen go-fuse because 46 | - present state to peers as what we have in the cache 47 | - actually accurate 48 | - can respond to fill block Requests from peers correctly 49 | - process crashes before cached data spread to peers -------------------------------------------------------------------------------- /pre-commit-hook: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Copyright 2012 The Go Authors. All rights reserved. 3 | # Use of this source code is governed by a BSD-style 4 | # license that can be found in the LICENSE file. 5 | 6 | # check if assets haven't been re-generated 7 | 8 | assets_generation=$(stat -f %m lib/autogenerated/gui.files.go) 9 | for f in $(git diff --cached --name-only --diff-filter=ACMDR | grep '^gui/'); do 10 | file_modification=$(stat -f %m $f) 11 | if (( $file_modification > $assets_generation )); then 12 | echo >&2 "Assets should be re-packed. Run:" 13 | echo >&2 " go run scripts/packassets.go gui > lib/autogenerated/gui.files.go" 14 | exit 1 15 | fi 16 | done 17 | 18 | # git gofmt pre-commit hook 19 | # 20 | # To use, store as .git/hooks/pre-commit inside your repository and make sure 21 | # it has execute permissions. 22 | # 23 | # This script does not handle file names that contain spaces. 24 | 25 | gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$') 26 | [ -z "$gofiles" ] && exit 0 27 | 28 | unformatted=$(gofmt -l $gofiles) 29 | [ -z "$unformatted" ] && exit 0 30 | 31 | # Some files are not gofmt'd. Print message and fail. 32 | 33 | echo >&2 "Go files must be formatted with gofmt. Please run:" 34 | for fn in $unformatted; do 35 | echo >&2 " gofmt -w $PWD/$fn" 36 | done 37 | 38 | exit 1 39 | 40 | -------------------------------------------------------------------------------- /scripts/packassets.go: -------------------------------------------------------------------------------- 1 | // Copyright (C) 2014 The Syncthing Authors. 2 | // 3 | // This Source Code Form is subject to the terms of the Mozilla Public 4 | // License, v. 2.0. If a copy of the MPL was not distributed with this file, 5 | // You can obtain one at http://mozilla.org/MPL/2.0/. 6 | 7 | // +build ignore 8 | 9 | package main 10 | 11 | import ( 12 | "bytes" 13 | "compress/gzip" 14 | "encoding/base64" 15 | "flag" 16 | "go/format" 17 | "io" 18 | "net/http" 19 | "os" 20 | "path/filepath" 21 | "strings" 22 | "text/template" 23 | "time" 24 | ) 25 | 26 | var tpl = template.Must(template.New("assets").Parse(`package autogenerated 27 | 28 | import ( 29 | "encoding/base64" 30 | ) 31 | 32 | const ( 33 | AssetsBuildDate = "{{.BuildDate}}" 34 | ) 35 | 36 | func Assets() map[string][]byte { 37 | var assets = make(map[string][]byte, {{.Assets | len}}) 38 | {{range $asset := .Assets}} 39 | assets["{{$asset.Name}}"], _ = base64.StdEncoding.DecodeString("{{$asset.Data}}"){{end}} 40 | return assets 41 | } 42 | 43 | `)) 44 | 45 | type asset struct { 46 | Name string 47 | Data string 48 | } 49 | 50 | var assets []asset 51 | 52 | func walkerFor(basePath string) filepath.WalkFunc { 53 | return func(name string, info os.FileInfo, err error) error { 54 | if err != nil { 55 | return err 56 | } 57 | 58 | if strings.HasPrefix(filepath.Base(name), ".") { 59 | // Skip dotfiles 60 | return nil 61 | } 62 | 63 | if info.Mode().IsRegular() { 64 | fd, err := os.Open(name) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | var buf bytes.Buffer 70 | gw := gzip.NewWriter(&buf) 71 | io.Copy(gw, fd) 72 | fd.Close() 73 | gw.Flush() 74 | gw.Close() 75 | 76 | name, _ = filepath.Rel(basePath, name) 77 | assets = append(assets, asset{ 78 | Name: filepath.ToSlash(name), 79 | Data: base64.StdEncoding.EncodeToString(buf.Bytes()), 80 | }) 81 | } 82 | 83 | return nil 84 | } 85 | } 86 | 87 | type templateVars struct { 88 | Assets []asset 89 | BuildDate string 90 | } 91 | 92 | func main() { 93 | flag.Parse() 94 | 95 | filepath.Walk(flag.Arg(0), walkerFor(flag.Arg(0))) 96 | var buf bytes.Buffer 97 | tpl.Execute(&buf, templateVars{ 98 | Assets: assets, 99 | BuildDate: time.Now().UTC().Format(http.TimeFormat), 100 | }) 101 | bs, err := format.Source(buf.Bytes()) 102 | if err != nil { 103 | panic(err) 104 | } 105 | os.Stdout.Write(bs) 106 | } 107 | --------------------------------------------------------------------------------