├── .github ├── FUNDING.yml └── workflows │ └── build.yml ├── .gitignore ├── .goreleaser.yml ├── CONTRIBUTING.md ├── LICENSE.md ├── Makefile ├── README.md ├── cache ├── cache.go └── sa.go ├── cmd ├── clean.go ├── dedupe.go ├── manual.go ├── root.go ├── sync.go ├── update.go └── upload.go ├── config ├── config.go ├── rclone.go ├── syncer.go └── uploader.go ├── go.mod ├── go.sum ├── logger ├── log.go ├── rotatefilehook.go └── util.go ├── main.go ├── maputils └── maputils.go ├── pathutils ├── file.go └── find.go ├── rclone ├── copy.go ├── dedupe.go ├── deletefile.go ├── enum.go ├── filter.go ├── misc.go ├── move.go ├── param.go ├── rclone.go ├── rmdir.go ├── sa.go └── sync.go ├── reutils ├── glob.go └── numbers.go ├── runtime └── runtime.go ├── stringutils ├── default.go ├── left.go └── until.go ├── syncer ├── copy.go ├── dedupe.go ├── move.go ├── sync.go └── syncer.go ├── systemd ├── crop_clean.service ├── crop_clean.timer ├── crop_sync.service ├── crop_sync.timer ├── crop_upload.service └── crop_upload.timer ├── uploader ├── check.go ├── checker │ ├── age.go │ ├── interface.go │ └── size.go ├── clean.go ├── cleaner │ ├── interface.go │ └── unionfs.go ├── cleans.go ├── copy.go ├── dedupe.go ├── file.go ├── move.go └── uploader.go └── web ├── handler.go ├── server.go └── struct.go /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | github: l3uddz -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | tags: 8 | - 'v*' 9 | pull_request: 10 | types: 11 | - opened 12 | - reopened 13 | - edited 14 | 15 | jobs: 16 | build: 17 | runs-on: ubuntu-latest 18 | steps: 19 | # dependencies 20 | - name: dependencies 21 | run: | 22 | curl -sfL https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh | sudo sh -s -- -b /usr/local/bin 23 | 24 | # checkout 25 | - name: checkout 26 | uses: actions/checkout@v2 27 | with: 28 | fetch-depth: 0 29 | 30 | # setup go 31 | - name: go 32 | uses: actions/setup-go@v1 33 | with: 34 | go-version: 1.17 35 | 36 | - name: go info 37 | run: | 38 | go version 39 | go env 40 | 41 | # cache 42 | - name: cache 43 | uses: actions/cache@v1 44 | with: 45 | path: vendor 46 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 47 | restore-keys: | 48 | ${{ runner.os }}-go- 49 | 50 | # vendor 51 | - name: vendor 52 | run: | 53 | make vendor 54 | 55 | # git status 56 | - name: git status 57 | run: git status 58 | 59 | # build 60 | - name: build 61 | if: startsWith(github.ref, 'refs/tags/') == false 62 | run: | 63 | make snapshot 64 | 65 | # publish 66 | - name: publish 67 | if: startsWith(github.ref, 'refs/tags/') 68 | env: 69 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 70 | run: | 71 | make publish 72 | 73 | # artifacts 74 | - name: artifact_linux 75 | uses: actions/upload-artifact@v2-preview 76 | with: 77 | name: build_linux 78 | path: dist/*linux* 79 | 80 | - name: artifact_darwin 81 | uses: actions/upload-artifact@v2-preview 82 | with: 83 | name: build_darwin 84 | path: dist/*darwin* 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # misc 2 | /.idea/ 3 | /.vscode/ 4 | 5 | # configuration 6 | config.json 7 | config.yaml 8 | *.db 9 | 10 | # vendor files 11 | /vendor/ 12 | 13 | # dist folder 14 | /dist/ 15 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | # https://goreleaser.com 2 | project_name: crop 3 | 4 | # Build 5 | builds: 6 | - env: 7 | - CGO_ENABLED=0 8 | goos: 9 | - linux 10 | - darwin 11 | goarch: 12 | - amd64 13 | - arm64 14 | ldflags: 15 | - -s -w 16 | - -X "github.com/l3uddz/crop/runtime.Version={{ .Version }}" 17 | - -X "github.com/l3uddz/crop/runtime.GitCommit={{ .ShortCommit }}" 18 | - -X "github.com/l3uddz/crop/runtime.Timestamp={{ .Timestamp }}" 19 | flags: 20 | - -trimpath 21 | 22 | # MacOS Universal Binaries 23 | universal_binaries: 24 | - 25 | replace: true 26 | 27 | # Archive 28 | archives: 29 | - 30 | name_template: "{{ .ProjectName }}_v{{ .Version }}_{{ .Os }}_{{ .Arch }}" 31 | format: "binary" 32 | 33 | # Checksum 34 | checksum: 35 | name_template: "checksums.txt" 36 | algorithm: sha512 37 | 38 | # Snapshot 39 | snapshot: 40 | name_template: "{{ .Major }}.{{ .Minor }}.{{ .Patch }}-dev+{{ .ShortCommit }}" 41 | 42 | # Changelog 43 | changelog: 44 | filters: 45 | exclude: 46 | - "^docs:" 47 | - "^test:" 48 | - "^Merge branch" -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Contributing 2 | 3 | Guide below will explain the process of submitting a pull request (PR). 4 | 5 | 1. Fork it. 6 | 7 | 1. Clone your forked project: 8 | 9 | ``` 10 | git clone http://github.com//crop 11 | ``` 12 | 13 | 1. Create a feature branch off of the **develop** branch: 14 | 15 | ``` 16 | git checkout -b 'feature/my-new-feature' develop 17 | ``` 18 | 19 | 1. Keep up to date with latest **develop** branch changes: 20 | 21 | ``` 22 | git pull --rebase upstream develop 23 | ``` 24 | 25 | 1. Commit your changes: 26 | 27 | ``` 28 | git commit -am 'Added some feature' 29 | ``` 30 | 31 | 1. Push commits to the feature branch: 32 | 33 | ``` 34 | git push origin feature/my-new-feature 35 | ``` 36 | 37 | 1. Submit feature branch as a PR to _our_ **develop** branch. 38 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | Preamble 9 | 10 | The GNU General Public License is a free, copyleft license for 11 | software and other kinds of works. 12 | 13 | The licenses for most software and other practical works are designed 14 | to take away your freedom to share and change the works. By contrast, 15 | the GNU General Public License is intended to guarantee your freedom to 16 | share and change all versions of a program--to make sure it remains free 17 | software for all its users. We, the Free Software Foundation, use the 18 | GNU General Public License for most of our software; it applies also to 19 | any other work released this way by its authors. You can apply it to 20 | your programs, too. 21 | 22 | When we speak of free software, we are referring to freedom, not 23 | price. Our General Public Licenses are designed to make sure that you 24 | have the freedom to distribute copies of free software (and charge for 25 | them if you wish), that you receive source code or can get it if you 26 | want it, that you can change the software or use pieces of it in new 27 | free programs, and that you know you can do these things. 28 | 29 | To protect your rights, we need to prevent others from denying you 30 | these rights or asking you to surrender the rights. Therefore, you have 31 | certain responsibilities if you distribute copies of the software, or if 32 | you modify it: responsibilities to respect the freedom of others. 33 | 34 | For example, if you distribute copies of such a program, whether 35 | gratis or for a fee, you must pass on to the recipients the same 36 | freedoms that you received. You must make sure that they, too, receive 37 | or can get the source code. And you must show them these terms so they 38 | know their rights. 39 | 40 | Developers that use the GNU GPL protect your rights with two steps: 41 | (1) assert copyright on the software, and (2) offer you this License 42 | giving you legal permission to copy, distribute and/or modify it. 43 | 44 | For the developers' and authors' protection, the GPL clearly explains 45 | that there is no warranty for this free software. For both users' and 46 | authors' sake, the GPL requires that modified versions be marked as 47 | changed, so that their problems will not be attributed erroneously to 48 | authors of previous versions. 49 | 50 | Some devices are designed to deny users access to install or run 51 | modified versions of the software inside them, although the manufacturer 52 | can do so. This is fundamentally incompatible with the aim of 53 | protecting users' freedom to change the software. The systematic 54 | pattern of such abuse occurs in the area of products for individuals to 55 | use, which is precisely where it is most unacceptable. Therefore, we 56 | have designed this version of the GPL to prohibit the practice for those 57 | products. If such problems arise substantially in other domains, we 58 | stand ready to extend this provision to those domains in future versions 59 | of the GPL, as needed to protect the freedom of users. 60 | 61 | Finally, every program is threatened constantly by software patents. 62 | States should not allow patents to restrict development and use of 63 | software on general-purpose computers, but in those that do, we wish to 64 | avoid the special danger that patents applied to a free program could 65 | make it effectively proprietary. To prevent this, the GPL assures that 66 | patents cannot be used to render the program non-free. 67 | 68 | The precise terms and conditions for copying, distribution and 69 | modification follow. 70 | 71 | TERMS AND CONDITIONS 72 | 73 | 0. Definitions. 74 | 75 | "This License" refers to version 3 of the GNU General Public License. 76 | 77 | "Copyright" also means copyright-like laws that apply to other kinds of 78 | works, such as semiconductor masks. 79 | 80 | "The Program" refers to any copyrightable work licensed under this 81 | License. Each licensee is addressed as "you". "Licensees" and 82 | "recipients" may be individuals or organizations. 83 | 84 | To "modify" a work means to copy from or adapt all or part of the work 85 | in a fashion requiring copyright permission, other than the making of an 86 | exact copy. The resulting work is called a "modified version" of the 87 | earlier work or a work "based on" the earlier work. 88 | 89 | A "covered work" means either the unmodified Program or a work based 90 | on the Program. 91 | 92 | To "propagate" a work means to do anything with it that, without 93 | permission, would make you directly or secondarily liable for 94 | infringement under applicable copyright law, except executing it on a 95 | computer or modifying a private copy. Propagation includes copying, 96 | distribution (with or without modification), making available to the 97 | public, and in some countries other activities as well. 98 | 99 | To "convey" a work means any kind of propagation that enables other 100 | parties to make or receive copies. Mere interaction with a user through 101 | a computer network, with no transfer of a copy, is not conveying. 102 | 103 | An interactive user interface displays "Appropriate Legal Notices" 104 | to the extent that it includes a convenient and prominently visible 105 | feature that (1) displays an appropriate copyright notice, and (2) 106 | tells the user that there is no warranty for the work (except to the 107 | extent that warranties are provided), that licensees may convey the 108 | work under this License, and how to view a copy of this License. If 109 | the interface presents a list of user commands or options, such as a 110 | menu, a prominent item in the list meets this criterion. 111 | 112 | 1. Source Code. 113 | 114 | The "source code" for a work means the preferred form of the work 115 | for making modifications to it. "Object code" means any non-source 116 | form of a work. 117 | 118 | A "Standard Interface" means an interface that either is an official 119 | standard defined by a recognized standards body, or, in the case of 120 | interfaces specified for a particular programming language, one that 121 | is widely used among developers working in that language. 122 | 123 | The "System Libraries" of an executable work include anything, other 124 | than the work as a whole, that (a) is included in the normal form of 125 | packaging a Major Component, but which is not part of that Major 126 | Component, and (b) serves only to enable use of the work with that 127 | Major Component, or to implement a Standard Interface for which an 128 | implementation is available to the public in source code form. A 129 | "Major Component", in this context, means a major essential component 130 | (kernel, window system, and so on) of the specific operating system 131 | (if any) on which the executable work runs, or a compiler used to 132 | produce the work, or an object code interpreter used to run it. 133 | 134 | The "Corresponding Source" for a work in object code form means all 135 | the source code needed to generate, install, and (for an executable 136 | work) run the object code and to modify the work, including scripts to 137 | control those activities. However, it does not include the work's 138 | System Libraries, or general-purpose tools or generally available free 139 | programs which are used unmodified in performing those activities but 140 | which are not part of the work. For example, Corresponding Source 141 | includes interface definition files associated with source files for 142 | the work, and the source code for shared libraries and dynamically 143 | linked subprograms that the work is specifically designed to require, 144 | such as by intimate data communication or control flow between those 145 | subprograms and other parts of the work. 146 | 147 | The Corresponding Source need not include anything that users 148 | can regenerate automatically from other parts of the Corresponding 149 | Source. 150 | 151 | The Corresponding Source for a work in source code form is that 152 | same work. 153 | 154 | 2. Basic Permissions. 155 | 156 | All rights granted under this License are granted for the term of 157 | copyright on the Program, and are irrevocable provided the stated 158 | conditions are met. This License explicitly affirms your unlimited 159 | permission to run the unmodified Program. The output from running a 160 | covered work is covered by this License only if the output, given its 161 | content, constitutes a covered work. This License acknowledges your 162 | rights of fair use or other equivalent, as provided by copyright law. 163 | 164 | You may make, run and propagate covered works that you do not 165 | convey, without conditions so long as your license otherwise remains 166 | in force. You may convey covered works to others for the sole purpose 167 | of having them make modifications exclusively for you, or provide you 168 | with facilities for running those works, provided that you comply with 169 | the terms of this License in conveying all material for which you do 170 | not control copyright. Those thus making or running the covered works 171 | for you must do so exclusively on your behalf, under your direction 172 | and control, on terms that prohibit them from making any copies of 173 | your copyrighted material outside their relationship with you. 174 | 175 | Conveying under any other circumstances is permitted solely under 176 | the conditions stated below. Sublicensing is not allowed; section 10 177 | makes it unnecessary. 178 | 179 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 180 | 181 | No covered work shall be deemed part of an effective technological 182 | measure under any applicable law fulfilling obligations under article 183 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 184 | similar laws prohibiting or restricting circumvention of such 185 | measures. 186 | 187 | When you convey a covered work, you waive any legal power to forbid 188 | circumvention of technological measures to the extent such circumvention 189 | is effected by exercising rights under this License with respect to 190 | the covered work, and you disclaim any intention to limit operation or 191 | modification of the work as a means of enforcing, against the work's 192 | users, your or third parties' legal rights to forbid circumvention of 193 | technological measures. 194 | 195 | 4. Conveying Verbatim Copies. 196 | 197 | You may convey verbatim copies of the Program's source code as you 198 | receive it, in any medium, provided that you conspicuously and 199 | appropriately publish on each copy an appropriate copyright notice; 200 | keep intact all notices stating that this License and any 201 | non-permissive terms added in accord with section 7 apply to the code; 202 | keep intact all notices of the absence of any warranty; and give all 203 | recipients a copy of this License along with the Program. 204 | 205 | You may charge any price or no price for each copy that you convey, 206 | and you may offer support or warranty protection for a fee. 207 | 208 | 5. Conveying Modified Source Versions. 209 | 210 | You may convey a work based on the Program, or the modifications to 211 | produce it from the Program, in the form of source code under the 212 | terms of section 4, provided that you also meet all of these conditions: 213 | 214 | a) The work must carry prominent notices stating that you modified 215 | it, and giving a relevant date. 216 | 217 | b) The work must carry prominent notices stating that it is 218 | released under this License and any conditions added under section 219 | 7. This requirement modifies the requirement in section 4 to 220 | "keep intact all notices". 221 | 222 | c) You must license the entire work, as a whole, under this 223 | License to anyone who comes into possession of a copy. This 224 | License will therefore apply, along with any applicable section 7 225 | additional terms, to the whole of the work, and all its parts, 226 | regardless of how they are packaged. This License gives no 227 | permission to license the work in any other way, but it does not 228 | invalidate such permission if you have separately received it. 229 | 230 | d) If the work has interactive user interfaces, each must display 231 | Appropriate Legal Notices; however, if the Program has interactive 232 | interfaces that do not display Appropriate Legal Notices, your 233 | work need not make them do so. 234 | 235 | A compilation of a covered work with other separate and independent 236 | works, which are not by their nature extensions of the covered work, 237 | and which are not combined with it such as to form a larger program, 238 | in or on a volume of a storage or distribution medium, is called an 239 | "aggregate" if the compilation and its resulting copyright are not 240 | used to limit the access or legal rights of the compilation's users 241 | beyond what the individual works permit. Inclusion of a covered work 242 | in an aggregate does not cause this License to apply to the other 243 | parts of the aggregate. 244 | 245 | 6. Conveying Non-Source Forms. 246 | 247 | You may convey a covered work in object code form under the terms 248 | of sections 4 and 5, provided that you also convey the 249 | machine-readable Corresponding Source under the terms of this License, 250 | in one of these ways: 251 | 252 | a) Convey the object code in, or embodied in, a physical product 253 | (including a physical distribution medium), accompanied by the 254 | Corresponding Source fixed on a durable physical medium 255 | customarily used for software interchange. 256 | 257 | b) Convey the object code in, or embodied in, a physical product 258 | (including a physical distribution medium), accompanied by a 259 | written offer, valid for at least three years and valid for as 260 | long as you offer spare parts or customer support for that product 261 | model, to give anyone who possesses the object code either (1) a 262 | copy of the Corresponding Source for all the software in the 263 | product that is covered by this License, on a durable physical 264 | medium customarily used for software interchange, for a price no 265 | more than your reasonable cost of physically performing this 266 | conveying of source, or (2) access to copy the 267 | Corresponding Source from a network server at no charge. 268 | 269 | c) Convey individual copies of the object code with a copy of the 270 | written offer to provide the Corresponding Source. This 271 | alternative is allowed only occasionally and noncommercially, and 272 | only if you received the object code with such an offer, in accord 273 | with subsection 6b. 274 | 275 | d) Convey the object code by offering access from a designated 276 | place (gratis or for a charge), and offer equivalent access to the 277 | Corresponding Source in the same way through the same place at no 278 | further charge. You need not require recipients to copy the 279 | Corresponding Source along with the object code. If the place to 280 | copy the object code is a network server, the Corresponding Source 281 | may be on a different server (operated by you or a third party) 282 | that supports equivalent copying facilities, provided you maintain 283 | clear directions next to the object code saying where to find the 284 | Corresponding Source. Regardless of what server hosts the 285 | Corresponding Source, you remain obligated to ensure that it is 286 | available for as long as needed to satisfy these requirements. 287 | 288 | e) Convey the object code using peer-to-peer transmission, provided 289 | you inform other peers where the object code and Corresponding 290 | Source of the work are being offered to the general public at no 291 | charge under subsection 6d. 292 | 293 | A separable portion of the object code, whose source code is excluded 294 | from the Corresponding Source as a System Library, need not be 295 | included in conveying the object code work. 296 | 297 | A "User Product" is either (1) a "consumer product", which means any 298 | tangible personal property which is normally used for personal, family, 299 | or household purposes, or (2) anything designed or sold for incorporation 300 | into a dwelling. In determining whether a product is a consumer product, 301 | doubtful cases shall be resolved in favor of coverage. For a particular 302 | product received by a particular user, "normally used" refers to a 303 | typical or common use of that class of product, regardless of the status 304 | of the particular user or of the way in which the particular user 305 | actually uses, or expects or is expected to use, the product. A product 306 | is a consumer product regardless of whether the product has substantial 307 | commercial, industrial or non-consumer uses, unless such uses represent 308 | the only significant mode of use of the product. 309 | 310 | "Installation Information" for a User Product means any methods, 311 | procedures, authorization keys, or other information required to install 312 | and execute modified versions of a covered work in that User Product from 313 | a modified version of its Corresponding Source. The information must 314 | suffice to ensure that the continued functioning of the modified object 315 | code is in no case prevented or interfered with solely because 316 | modification has been made. 317 | 318 | If you convey an object code work under this section in, or with, or 319 | specifically for use in, a User Product, and the conveying occurs as 320 | part of a transaction in which the right of possession and use of the 321 | User Product is transferred to the recipient in perpetuity or for a 322 | fixed term (regardless of how the transaction is characterized), the 323 | Corresponding Source conveyed under this section must be accompanied 324 | by the Installation Information. But this requirement does not apply 325 | if neither you nor any third party retains the ability to install 326 | modified object code on the User Product (for example, the work has 327 | been installed in ROM). 328 | 329 | The requirement to provide Installation Information does not include a 330 | requirement to continue to provide support service, warranty, or updates 331 | for a work that has been modified or installed by the recipient, or for 332 | the User Product in which it has been modified or installed. Access to a 333 | network may be denied when the modification itself materially and 334 | adversely affects the operation of the network or violates the rules and 335 | protocols for communication across the network. 336 | 337 | Corresponding Source conveyed, and Installation Information provided, 338 | in accord with this section must be in a format that is publicly 339 | documented (and with an implementation available to the public in 340 | source code form), and must require no special password or key for 341 | unpacking, reading or copying. 342 | 343 | 7. Additional Terms. 344 | 345 | "Additional permissions" are terms that supplement the terms of this 346 | License by making exceptions from one or more of its conditions. 347 | Additional permissions that are applicable to the entire Program shall 348 | be treated as though they were included in this License, to the extent 349 | that they are valid under applicable law. If additional permissions 350 | apply only to part of the Program, that part may be used separately 351 | under those permissions, but the entire Program remains governed by 352 | this License without regard to the additional permissions. 353 | 354 | When you convey a copy of a covered work, you may at your option 355 | remove any additional permissions from that copy, or from any part of 356 | it. (Additional permissions may be written to require their own 357 | removal in certain cases when you modify the work.) You may place 358 | additional permissions on material, added by you to a covered work, 359 | for which you have or can give appropriate copyright permission. 360 | 361 | Notwithstanding any other provision of this License, for material you 362 | add to a covered work, you may (if authorized by the copyright holders of 363 | that material) supplement the terms of this License with terms: 364 | 365 | a) Disclaiming warranty or limiting liability differently from the 366 | terms of sections 15 and 16 of this License; or 367 | 368 | b) Requiring preservation of specified reasonable legal notices or 369 | author attributions in that material or in the Appropriate Legal 370 | Notices displayed by works containing it; or 371 | 372 | c) Prohibiting misrepresentation of the origin of that material, or 373 | requiring that modified versions of such material be marked in 374 | reasonable ways as different from the original version; or 375 | 376 | d) Limiting the use for publicity purposes of names of licensors or 377 | authors of the material; or 378 | 379 | e) Declining to grant rights under trademark law for use of some 380 | trade names, trademarks, or service marks; or 381 | 382 | f) Requiring indemnification of licensors and authors of that 383 | material by anyone who conveys the material (or modified versions of 384 | it) with contractual assumptions of liability to the recipient, for 385 | any liability that these contractual assumptions directly impose on 386 | those licensors and authors. 387 | 388 | All other non-permissive additional terms are considered "further 389 | restrictions" within the meaning of section 10. If the Program as you 390 | received it, or any part of it, contains a notice stating that it is 391 | governed by this License along with a term that is a further 392 | restriction, you may remove that term. If a license document contains 393 | a further restriction but permits relicensing or conveying under this 394 | License, you may add to a covered work material governed by the terms 395 | of that license document, provided that the further restriction does 396 | not survive such relicensing or conveying. 397 | 398 | If you add terms to a covered work in accord with this section, you 399 | must place, in the relevant source files, a statement of the 400 | additional terms that apply to those files, or a notice indicating 401 | where to find the applicable terms. 402 | 403 | Additional terms, permissive or non-permissive, may be stated in the 404 | form of a separately written license, or stated as exceptions; 405 | the above requirements apply either way. 406 | 407 | 8. Termination. 408 | 409 | You may not propagate or modify a covered work except as expressly 410 | provided under this License. Any attempt otherwise to propagate or 411 | modify it is void, and will automatically terminate your rights under 412 | this License (including any patent licenses granted under the third 413 | paragraph of section 11). 414 | 415 | However, if you cease all violation of this License, then your 416 | license from a particular copyright holder is reinstated (a) 417 | provisionally, unless and until the copyright holder explicitly and 418 | finally terminates your license, and (b) permanently, if the copyright 419 | holder fails to notify you of the violation by some reasonable means 420 | prior to 60 days after the cessation. 421 | 422 | Moreover, your license from a particular copyright holder is 423 | reinstated permanently if the copyright holder notifies you of the 424 | violation by some reasonable means, this is the first time you have 425 | received notice of violation of this License (for any work) from that 426 | copyright holder, and you cure the violation prior to 30 days after 427 | your receipt of the notice. 428 | 429 | Termination of your rights under this section does not terminate the 430 | licenses of parties who have received copies or rights from you under 431 | this License. If your rights have been terminated and not permanently 432 | reinstated, you do not qualify to receive new licenses for the same 433 | material under section 10. 434 | 435 | 9. Acceptance Not Required for Having Copies. 436 | 437 | You are not required to accept this License in order to receive or 438 | run a copy of the Program. Ancillary propagation of a covered work 439 | occurring solely as a consequence of using peer-to-peer transmission 440 | to receive a copy likewise does not require acceptance. However, 441 | nothing other than this License grants you permission to propagate or 442 | modify any covered work. These actions infringe copyright if you do 443 | not accept this License. Therefore, by modifying or propagating a 444 | covered work, you indicate your acceptance of this License to do so. 445 | 446 | 10. Automatic Licensing of Downstream Recipients. 447 | 448 | Each time you convey a covered work, the recipient automatically 449 | receives a license from the original licensors, to run, modify and 450 | propagate that work, subject to this License. You are not responsible 451 | for enforcing compliance by third parties with this License. 452 | 453 | An "entity transaction" is a transaction transferring control of an 454 | organization, or substantially all assets of one, or subdividing an 455 | organization, or merging organizations. If propagation of a covered 456 | work results from an entity transaction, each party to that 457 | transaction who receives a copy of the work also receives whatever 458 | licenses to the work the party's predecessor in interest had or could 459 | give under the previous paragraph, plus a right to possession of the 460 | Corresponding Source of the work from the predecessor in interest, if 461 | the predecessor has it or can get it with reasonable efforts. 462 | 463 | You may not impose any further restrictions on the exercise of the 464 | rights granted or affirmed under this License. For example, you may 465 | not impose a license fee, royalty, or other charge for exercise of 466 | rights granted under this License, and you may not initiate litigation 467 | (including a cross-claim or counterclaim in a lawsuit) alleging that 468 | any patent claim is infringed by making, using, selling, offering for 469 | sale, or importing the Program or any portion of it. 470 | 471 | 11. Patents. 472 | 473 | A "contributor" is a copyright holder who authorizes use under this 474 | License of the Program or a work on which the Program is based. The 475 | work thus licensed is called the contributor's "contributor version". 476 | 477 | A contributor's "essential patent claims" are all patent claims 478 | owned or controlled by the contributor, whether already acquired or 479 | hereafter acquired, that would be infringed by some manner, permitted 480 | by this License, of making, using, or selling its contributor version, 481 | but do not include claims that would be infringed only as a 482 | consequence of further modification of the contributor version. For 483 | purposes of this definition, "control" includes the right to grant 484 | patent sublicenses in a manner consistent with the requirements of 485 | this License. 486 | 487 | Each contributor grants you a non-exclusive, worldwide, royalty-free 488 | patent license under the contributor's essential patent claims, to 489 | make, use, sell, offer for sale, import and otherwise run, modify and 490 | propagate the contents of its contributor version. 491 | 492 | In the following three paragraphs, a "patent license" is any express 493 | agreement or commitment, however denominated, not to enforce a patent 494 | (such as an express permission to practice a patent or covenant not to 495 | sue for patent infringement). To "grant" such a patent license to a 496 | party means to make such an agreement or commitment not to enforce a 497 | patent against the party. 498 | 499 | If you convey a covered work, knowingly relying on a patent license, 500 | and the Corresponding Source of the work is not available for anyone 501 | to copy, free of charge and under the terms of this License, through a 502 | publicly available network server or other readily accessible means, 503 | then you must either (1) cause the Corresponding Source to be so 504 | available, or (2) arrange to deprive yourself of the benefit of the 505 | patent license for this particular work, or (3) arrange, in a manner 506 | consistent with the requirements of this License, to extend the patent 507 | license to downstream recipients. "Knowingly relying" means you have 508 | actual knowledge that, but for the patent license, your conveying the 509 | covered work in a country, or your recipient's use of the covered work 510 | in a country, would infringe one or more identifiable patents in that 511 | country that you have reason to believe are valid. 512 | 513 | If, pursuant to or in connection with a single transaction or 514 | arrangement, you convey, or propagate by procuring conveyance of, a 515 | covered work, and grant a patent license to some of the parties 516 | receiving the covered work authorizing them to use, propagate, modify 517 | or convey a specific copy of the covered work, then the patent license 518 | you grant is automatically extended to all recipients of the covered 519 | work and works based on it. 520 | 521 | A patent license is "discriminatory" if it does not include within 522 | the scope of its coverage, prohibits the exercise of, or is 523 | conditioned on the non-exercise of one or more of the rights that are 524 | specifically granted under this License. You may not convey a covered 525 | work if you are a party to an arrangement with a third party that is 526 | in the business of distributing software, under which you make payment 527 | to the third party based on the extent of your activity of conveying 528 | the work, and under which the third party grants, to any of the 529 | parties who would receive the covered work from you, a discriminatory 530 | patent license (a) in connection with copies of the covered work 531 | conveyed by you (or copies made from those copies), or (b) primarily 532 | for and in connection with specific products or compilations that 533 | contain the covered work, unless you entered into that arrangement, 534 | or that patent license was granted, prior to 28 March 2007. 535 | 536 | Nothing in this License shall be construed as excluding or limiting 537 | any implied license or other defenses to infringement that may 538 | otherwise be available to you under applicable patent law. 539 | 540 | 12. No Surrender of Others' Freedom. 541 | 542 | If conditions are imposed on you (whether by court order, agreement or 543 | otherwise) that contradict the conditions of this License, they do not 544 | excuse you from the conditions of this License. If you cannot convey a 545 | covered work so as to satisfy simultaneously your obligations under this 546 | License and any other pertinent obligations, then as a consequence you may 547 | not convey it at all. For example, if you agree to terms that obligate you 548 | to collect a royalty for further conveying from those to whom you convey 549 | the Program, the only way you could satisfy both those terms and this 550 | License would be to refrain entirely from conveying the Program. 551 | 552 | 13. Use with the GNU Affero General Public License. 553 | 554 | Notwithstanding any other provision of this License, you have 555 | permission to link or combine any covered work with a work licensed 556 | under version 3 of the GNU Affero General Public License into a single 557 | combined work, and to convey the resulting work. The terms of this 558 | License will continue to apply to the part which is the covered work, 559 | but the special requirements of the GNU Affero General Public License, 560 | section 13, concerning interaction through a network will apply to the 561 | combination as such. 562 | 563 | 14. Revised Versions of this License. 564 | 565 | The Free Software Foundation may publish revised and/or new versions of 566 | the GNU General Public License from time to time. Such new versions will 567 | be similar in spirit to the present version, but may differ in detail to 568 | address new problems or concerns. 569 | 570 | Each version is given a distinguishing version number. If the 571 | Program specifies that a certain numbered version of the GNU General 572 | Public License "or any later version" applies to it, you have the 573 | option of following the terms and conditions either of that numbered 574 | version or of any later version published by the Free Software 575 | Foundation. If the Program does not specify a version number of the 576 | GNU General Public License, you may choose any version ever published 577 | by the Free Software Foundation. 578 | 579 | If the Program specifies that a proxy can decide which future 580 | versions of the GNU General Public License can be used, that proxy's 581 | public statement of acceptance of a version permanently authorizes you 582 | to choose that version for the Program. 583 | 584 | Later license versions may give you additional or different 585 | permissions. However, no additional obligations are imposed on any 586 | author or copyright holder as a result of your choosing to follow a 587 | later version. 588 | 589 | 15. Disclaimer of Warranty. 590 | 591 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 592 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 593 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 594 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 595 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 596 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 597 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 598 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 599 | 600 | 16. Limitation of Liability. 601 | 602 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 603 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 604 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 605 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 606 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 607 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 608 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 609 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 610 | SUCH DAMAGES. 611 | 612 | 17. Interpretation of Sections 15 and 16. 613 | 614 | If the disclaimer of warranty and limitation of liability provided 615 | above cannot be given local legal effect according to their terms, 616 | reviewing courts shall apply local law that most closely approximates 617 | an absolute waiver of all civil liability in connection with the 618 | Program, unless a warranty or assumption of liability accompanies a 619 | copy of the Program in return for a fee. 620 | 621 | END OF TERMS AND CONDITIONS 622 | 623 | How to Apply These Terms to Your New Programs 624 | 625 | If you develop a new program, and you want it to be of the greatest 626 | possible use to the public, the best way to achieve this is to make it 627 | free software which everyone can redistribute and change under these terms. 628 | 629 | To do so, attach the following notices to the program. It is safest 630 | to attach them to the start of each source file to most effectively 631 | state the exclusion of warranty; and each file should have at least 632 | the "copyright" line and a pointer to where the full notice is found. 633 | 634 | 635 | Copyright (C) 636 | 637 | This program is free software: you can redistribute it and/or modify 638 | it under the terms of the GNU General Public License as published by 639 | the Free Software Foundation, either version 3 of the License, or 640 | (at your option) any later version. 641 | 642 | This program is distributed in the hope that it will be useful, 643 | but WITHOUT ANY WARRANTY; without even the implied warranty of 644 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 645 | GNU General Public License for more details. 646 | 647 | You should have received a copy of the GNU General Public License 648 | along with this program. If not, see . 649 | 650 | Also add information on how to contact you by electronic and paper mail. 651 | 652 | If the program does terminal interaction, make it output a short 653 | notice like this when it starts in an interactive mode: 654 | 655 | Copyright (C) 656 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 657 | This is free software, and you are welcome to redistribute it 658 | under certain conditions; type `show c' for details. 659 | 660 | The hypothetical commands `show w' and `show c' should show the appropriate 661 | parts of the General Public License. Of course, your program's commands 662 | might be different; for a GUI interface, you would use an "about box". 663 | 664 | You should also get your employer (if you work as a programmer) or school, 665 | if any, to sign a "copyright disclaimer" for the program, if necessary. 666 | For more information on this, and how to apply and follow the GNU GPL, see 667 | . 668 | 669 | The GNU General Public License does not permit incorporating your program 670 | into proprietary programs. If your program is a subroutine library, you 671 | may consider it more useful to permit linking proprietary applications with 672 | the library. If this is what you want to do, use the GNU Lesser General 673 | Public License instead of this License. But first, please read 674 | . 675 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := build 2 | CMD = crop 3 | GOARCH = $(shell go env GOARCH) 4 | GOOS = $(shell go env GOOS) 5 | TARGET = ${GOOS}_${GOARCH} 6 | DIST_PATH = dist 7 | BUILD_PATH = ${DIST_PATH}/${CMD}_${TARGET} 8 | DESTDIR = /usr/local/bin 9 | GO_FILES = $(shell find . -path ./vendor -prune -or -type f -name '*.go' -print) 10 | GO_PACKAGES = $(shell go list -mod vendor ./...) 11 | GIT_COMMIT = $(shell git rev-parse --short HEAD) 12 | TIMESTAMP = $(shell date +%s) 13 | VERSION ?= 0.0.0-dev 14 | 15 | # Deps 16 | .PHONY: check_goreleaser 17 | check_goreleaser: 18 | @command -v goreleaser >/dev/null || (echo "goreleaser is required."; exit 1) 19 | 20 | .PHONY: test 21 | test: ## Run tests 22 | @echo "*** go test ***" 23 | go test ./... -cover -v -race ${GO_PACKAGES} 24 | 25 | .PHONY: vendor 26 | vendor: ## Vendor files and tidy go.mod 27 | go mod vendor 28 | go mod tidy 29 | 30 | .PHONY: vendor_update 31 | vendor_update: ## Update vendor dependencies 32 | go get -u ./... 33 | ${MAKE} vendor 34 | 35 | .PHONY: build 36 | build: vendor ${BUILD_PATH}/${CMD} ## Build application 37 | 38 | # Binary 39 | ${BUILD_PATH}/${CMD}: ${GO_FILES} go.sum 40 | @echo "Building for ${TARGET}..." && \ 41 | mkdir -p ${BUILD_PATH} && \ 42 | CGO_ENABLED=0 go build \ 43 | -mod vendor \ 44 | -trimpath \ 45 | -ldflags "-s -w -X github.com/l3uddz/crop/runtime.Version=${VERSION} -X github.com/l3uddz/crop/runtime.GitCommit=${GIT_COMMIT} -X github.com/l3uddz/crop/runtime.Timestamp=${TIMESTAMP}" \ 46 | -o ${BUILD_PATH}/${CMD} \ 47 | . 48 | 49 | .PHONY: install 50 | install: build ## Install binary 51 | install -m 0755 ${BUILD_PATH}/${CMD} ${DESTDIR}/${CMD} 52 | 53 | .PHONY: clean 54 | clean: ## Cleanup 55 | rm -rf ${DIST_PATH} 56 | 57 | .PHONY: fetch 58 | fetch: ## Fetch vendor files 59 | go mod vendor 60 | 61 | .PHONY: release 62 | release: check_goreleaser ## Generate a release, but don't publish 63 | goreleaser --skip-publish --rm-dist 64 | 65 | .PHONY: publish 66 | publish: check_goreleaser ## Generate a release, and publish 67 | goreleaser --rm-dist 68 | 69 | .PHONY: snapshot 70 | snapshot: check_goreleaser ## Generate a snapshot release 71 | goreleaser --snapshot --skip-publish --rm-dist 72 | 73 | .PHONY: help 74 | help: 75 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![made-with-golang](https://img.shields.io/badge/Made%20with-Golang-blue.svg?style=flat-square)](https://golang.org/) 2 | [![License: GPL v3](https://img.shields.io/badge/License-GPL%203-blue.svg?style=flat-square)](https://github.com/l3uddz/crop/blob/master/LICENSE.md) 3 | [![last commit (master)](https://img.shields.io/github/last-commit/l3uddz/crop/master.svg?colorB=177DC1&label=Last%20Commit&style=flat-square)](https://github.com/l3uddz/crop/commits/master) 4 | [![Discord](https://img.shields.io/discord/381077432285003776.svg?colorB=177DC1&label=Discord&style=flat-square)](https://discord.io/cloudbox) 5 | [![Contributing](https://img.shields.io/badge/Contributing-gray.svg?style=flat-square)](CONTRIBUTING.md) 6 | [![Donate](https://img.shields.io/badge/Donate-gray.svg?style=flat-square)](#donate) 7 | 8 | # crop 9 | 10 | CLI tool to run upload/sync jobs with rclone. 11 | 12 | ## Example Configuration 13 | 14 | ```yaml 15 | rclone: 16 | config: /home/seed/.config/rclone/rclone.conf 17 | path: /usr/bin/rclone 18 | stats: 30s 19 | live_rotate: false 20 | service_account_remotes: 21 | '/opt/rclone/service_accounts/crop': 22 | - tv 23 | - movies 24 | - music 25 | - 4k_movies 26 | - source_4k_movies 27 | - staging 28 | global_params: 29 | default: 30 | move: 31 | - '--order-by=modtime,ascending' 32 | - '--transfers=8' 33 | - '--delete-empty-src-dirs' 34 | sync: 35 | - '--fast-list' 36 | - '--tpslimit-burst=50' 37 | - '--max-backlog=2000000' 38 | - '--track-renames' 39 | - '--use-mmap' 40 | - '--no-update-modtime' 41 | - '--drive-chunk-size=128M' 42 | dedupe: 43 | - '--dedupe-mode=newest' 44 | - '--tpslimit=5' 45 | uploader: 46 | - name: cloudbox_unionfs 47 | enabled: true 48 | check: 49 | limit: 360 50 | type: age 51 | hidden: 52 | cleanup: true 53 | enabled: true 54 | folder: /mnt/local/.unionfs-fuse 55 | type: unionfs 56 | local_folder: /mnt/local/Media 57 | remotes: 58 | clean: 59 | - 'gdrive:' 60 | - 'staging:' 61 | move: 'staging:/Media' 62 | move_server_side: 63 | - from: 'staging:/Media' 64 | to: 'gdrive:/Media' 65 | rclone_params: 66 | global_move: default 67 | move_server_side: 68 | - '--delete-empty-src-dirs' 69 | global_dedupe: default 70 | - name: tv 71 | enabled: true 72 | check: 73 | limit: 1440 74 | type: age 75 | local_folder: /mnt/local/Media/TV 76 | remotes: 77 | move: 'tv:/Media/TV' 78 | rclone_params: 79 | global_move: default 80 | - name: movies 81 | enabled: true 82 | check: 83 | limit: 720 84 | type: age 85 | local_folder: /mnt/local/Media/Movies 86 | remotes: 87 | move: 'movies:/Media/Movies' 88 | rclone_params: 89 | global_move: default 90 | syncer: 91 | - name: 4k_movies 92 | enabled: true 93 | source_remote: 'source_4k_movies:/' 94 | remotes: 95 | sync: 96 | - '4k_movies:/' 97 | dedupe: 98 | - '4k_movies:/' 99 | rclone_params: 100 | global_sync: default 101 | global_dedupe: default 102 | ``` 103 | 104 | ## Example Commands 105 | 106 | - Clean - Perform clean for associated uploader job(s). 107 | 108 | `crop clean --dry-run` 109 | 110 | `crop clean -u google` 111 | 112 | `crop clean` 113 | 114 | - Upload - Perform uploader job(s) 115 | 116 | `crop upload --dry-run` 117 | 118 | `crop upload -u google` 119 | 120 | `crop upload -u google --no-check` 121 | 122 | `crop upload` 123 | 124 | - Sync - Perform syncer job(s) 125 | 126 | `crop sync --dry-run` 127 | 128 | `crop sync -s google` 129 | 130 | `crop sync` 131 | 132 | `crop sync -p 2` 133 | 134 | - Manual - Perform manual sync/copy job(s) 135 | 136 | `crop manual --copy --src remote1:/Backups --dst remote2:/Backups --sa /opt/service_accounts -- --dry-run` 137 | 138 | `crop manual --sync --src remote1:/Backups --dst remote2:/Backups --sa /opt/service_accounts --dedupe --` 139 | 140 | *** 141 | 142 | ## Notes 143 | 144 | - Make use of `--dry-run` and `-vv` to ensure your configuration is correct and yielding expected results. 145 | 146 | - `live_rotate` will enable on-demand live-rotation of service accounts for a customized build of rclone / gclone. 147 | 148 | 149 | ## Credits 150 | 151 | - [rclone](https://github.com/rclone/rclone) - Without this awesome tool, this project would not exist! 152 | - [sasync](https://github.com/88lex/sasync) - Sync ideas and service account technique originated from here. 153 | 154 | # Donate 155 | 156 | If you find this project helpful, feel free to make a small donation to the developer: 157 | 158 | - [Monzo](https://monzo.me/today): Credit Cards, Apple Pay, Google Pay 159 | 160 | - [Paypal: l3uddz@gmail.com](https://www.paypal.me/l3uddz) 161 | 162 | - [GitHub Sponsor](https://github.com/sponsors/l3uddz): GitHub matches contributions for first 12 months. 163 | 164 | - BTC: 3CiHME1HZQsNNcDL6BArG7PbZLa8zUUgjL 165 | -------------------------------------------------------------------------------- /cache/cache.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "github.com/l3uddz/crop/logger" 5 | "github.com/l3uddz/crop/stringutils" 6 | "github.com/pkg/errors" 7 | "github.com/zippoxer/bow" 8 | ) 9 | 10 | var ( 11 | log = logger.GetLogger("cache") 12 | cacheFilePath string 13 | 14 | // Internal 15 | db *bow.DB 16 | ) 17 | 18 | /* Public */ 19 | 20 | func Init(cachePath string, logLevel int) error { 21 | // set globals 22 | cacheFilePath = cachePath 23 | 24 | // set badger options 25 | opts := make([]bow.Option, 0) 26 | 27 | if logLevel < 2 { 28 | // disable badger logging for non trace log level 29 | opts = append(opts, bow.SetLogger(nil)) 30 | } 31 | 32 | // init database 33 | v, err := bow.Open(cachePath, opts...) 34 | if err != nil { 35 | return errors.WithMessage(err, "failed opening cache") 36 | } 37 | 38 | db = v 39 | 40 | return nil 41 | } 42 | 43 | func Close() { 44 | // clear banned sa's 45 | ClearExpiredBans() 46 | 47 | // close 48 | if err := db.Close(); err != nil { 49 | log.WithError(err).Error("Failed closing cache gracefully...") 50 | } 51 | } 52 | 53 | func ShowUsing() { 54 | log.Infof("Using %s = %q", stringutils.LeftJust("CACHE", " ", 10), cacheFilePath) 55 | } 56 | -------------------------------------------------------------------------------- /cache/sa.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "github.com/zippoxer/bow" 5 | "time" 6 | ) 7 | 8 | type Banned struct { 9 | Path string `bow:"key"` 10 | Expires time.Time 11 | } 12 | 13 | func ClearExpiredBans() { 14 | iter := db.Bucket("banned").Iter() 15 | defer iter.Close() 16 | 17 | var page Banned 18 | for iter.Next(&page) { 19 | _, _ = IsBanned(page.Path) 20 | } 21 | } 22 | 23 | func IsBanned(key string) (bool, time.Time) { 24 | // check if key was found in banned bucket 25 | var item Banned 26 | err := db.Bucket("banned").Get(key, &item) 27 | 28 | // was key not found 29 | if err == bow.ErrNotFound { 30 | // this key is not banned 31 | return false, time.Time{} 32 | } else if err != nil { 33 | log.WithError(err).Errorf("Failed checking banned bucket for: %q", key) 34 | return false, time.Time{} 35 | } 36 | 37 | // check if the ban has expired 38 | if item.Expires.Before(time.Now().UTC()) { 39 | // the ban has expired, remove 40 | log.Warnf("Expired %q: %v", key, item.Expires) 41 | 42 | err := db.Bucket("banned").Delete(key) 43 | if err != nil { 44 | log.WithError(err).Errorf("Failed removing from banned bucket: %q", key) 45 | return false, time.Time{} 46 | } 47 | 48 | return false, time.Time{} 49 | } 50 | 51 | // this key is still banned 52 | return true, item.Expires 53 | } 54 | 55 | func SetBanned(key string, hours int) error { 56 | expiry := time.Now().UTC().Add(time.Duration(hours) * time.Hour) 57 | 58 | return db.Bucket("banned").Put(Banned{ 59 | Path: key, 60 | Expires: expiry, 61 | }) 62 | } 63 | -------------------------------------------------------------------------------- /cmd/clean.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/dustin/go-humanize" 5 | "github.com/l3uddz/crop/config" 6 | "github.com/l3uddz/crop/uploader" 7 | "github.com/pkg/errors" 8 | "github.com/spf13/cobra" 9 | "github.com/yale8848/gorpool" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | var cleanCmd = &cobra.Command{ 15 | Use: "clean", 16 | Short: "Perform cleans associated with uploader(s)", 17 | Long: `This command can be used to trigger a clean associated with uploader(s).`, 18 | 19 | Run: func(cmd *cobra.Command, args []string) { 20 | // init core 21 | initCore(true) 22 | defer releaseFileLock() 23 | 24 | // iterate uploader's 25 | started := time.Now().UTC() 26 | 27 | for _, uploaderConfig := range config.Config.Uploader { 28 | log := log.WithField("uploader", uploaderConfig.Name) 29 | 30 | // skip disabled uploader(s) 31 | if !uploaderConfig.Enabled { 32 | log.Debug("Skipping disabled uploader") 33 | continue 34 | } 35 | 36 | // skip uploader specific chosen 37 | if flagUploader != "" && !strings.EqualFold(uploaderConfig.Name, flagUploader) { 38 | log.Debugf("Skipping uploader as not: %q", flagUploader) 39 | continue 40 | } 41 | 42 | // create uploader 43 | upload, err := uploader.New(config.Config, &uploaderConfig, uploaderConfig.Name) 44 | if err != nil { 45 | log.WithError(err).Error("Failed initializing uploader, skipping...") 46 | continue 47 | } 48 | 49 | log.Info("Clean commencing...") 50 | 51 | // perform upload 52 | if err := performClean(upload); err != nil { 53 | upload.Log.WithError(err).Error("Error occurred while running clean, skipping...") 54 | continue 55 | } 56 | } 57 | 58 | log.Infof("Finished in: %v", humanize.RelTime(started, time.Now().UTC(), "", "")) 59 | }, 60 | } 61 | 62 | func init() { 63 | rootCmd.AddCommand(cleanCmd) 64 | 65 | cleanCmd.Flags().StringVarP(&flagUploader, "uploader", "u", "", "Run for a specific uploader") 66 | } 67 | 68 | func performClean(u *uploader.Uploader) error { 69 | u.Log.Info("Running cleans...") 70 | 71 | /* Cleans */ 72 | if u.Config.Hidden.Enabled { 73 | // set worker count 74 | workers := u.Config.Hidden.Workers 75 | if workers == 0 { 76 | workers = 8 77 | } 78 | 79 | // create worker pool 80 | gp := gorpool.NewPool(workers, 0). 81 | Start(). 82 | EnableWaitForAll(true) 83 | 84 | // queue clean tasks 85 | err := u.PerformCleans(gp) 86 | if err != nil { 87 | return errors.Wrap(err, "failed clearing remotes") 88 | } 89 | } 90 | 91 | u.Log.Info("Finished cleans!") 92 | return nil 93 | } 94 | -------------------------------------------------------------------------------- /cmd/dedupe.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/dustin/go-humanize" 5 | "github.com/l3uddz/crop/config" 6 | "github.com/l3uddz/crop/uploader" 7 | "github.com/pkg/errors" 8 | "github.com/spf13/cobra" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | var dedupeCmd = &cobra.Command{ 14 | Use: "dedupe", 15 | Short: "Perform dedupe associated with uploader(s)", 16 | Long: `This command can be used to trigger a dedupe associated with uploader(s).`, 17 | 18 | Run: func(cmd *cobra.Command, args []string) { 19 | // init core 20 | initCore(true) 21 | defer releaseFileLock() 22 | 23 | // iterate uploader's 24 | started := time.Now().UTC() 25 | 26 | for _, uploaderConfig := range config.Config.Uploader { 27 | log := log.WithField("uploader", uploaderConfig.Name) 28 | 29 | // skip disabled uploader(s) 30 | if !uploaderConfig.Enabled { 31 | log.Debug("Skipping disabled uploader") 32 | continue 33 | } 34 | 35 | // skip uploader specific chosen 36 | if flagUploader != "" && !strings.EqualFold(uploaderConfig.Name, flagUploader) { 37 | log.Debugf("Skipping uploader as not: %q", flagUploader) 38 | continue 39 | } 40 | 41 | // create uploader 42 | upload, err := uploader.New(config.Config, &uploaderConfig, uploaderConfig.Name) 43 | if err != nil { 44 | log.WithError(err).Error("Failed initializing uploader, skipping...") 45 | continue 46 | } 47 | 48 | log.Info("Dedupe commencing...") 49 | 50 | // perform upload 51 | if err := performDedupe(upload); err != nil { 52 | upload.Log.WithError(err).Error("Error occurred while running dedupe, skipping...") 53 | continue 54 | } 55 | } 56 | 57 | log.Infof("Finished in: %v", humanize.RelTime(started, time.Now().UTC(), "", "")) 58 | }, 59 | } 60 | 61 | func init() { 62 | rootCmd.AddCommand(dedupeCmd) 63 | 64 | dedupeCmd.Flags().StringVarP(&flagUploader, "uploader", "u", "", "Run for a specific uploader") 65 | } 66 | 67 | func performDedupe(u *uploader.Uploader) error { 68 | u.Log.Info("Running dedupe...") 69 | 70 | /* Dedupe */ 71 | err := u.Dedupe(nil) 72 | if err != nil { 73 | return errors.Wrap(err, "failed dedupe remotes") 74 | } 75 | 76 | u.Log.Info("Finished dedupe!") 77 | return nil 78 | } 79 | -------------------------------------------------------------------------------- /cmd/manual.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/dustin/go-humanize" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/config" 7 | "github.com/l3uddz/crop/rclone" 8 | "github.com/l3uddz/crop/stringutils" 9 | "github.com/l3uddz/crop/syncer" 10 | "github.com/sirupsen/logrus" 11 | "github.com/spf13/cobra" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | var ( 17 | flagSrc string 18 | flagDest string 19 | flagSaFolder string 20 | flagDedupe bool 21 | flagCopy bool 22 | flagSync bool 23 | ) 24 | 25 | var manualCmd = &cobra.Command{ 26 | Use: "manual", 27 | Short: "Perform a manual copy/sync task", 28 | Long: `This command can be used to trigger a copy/sync without requiring configuration changes.`, 29 | Run: func(cmd *cobra.Command, args []string) { 30 | // init core 31 | initCore(true) 32 | defer cache.Close() 33 | defer releaseFileLock() 34 | 35 | // determine destination remotes 36 | syncRemotes := make([]string, 0) 37 | copyRemotes := make([]string, 0) 38 | 39 | switch { 40 | case flagCopy && flagSync: 41 | log.Fatal("You should must a single mode to use, --sync / --copy") 42 | case flagCopy: 43 | copyRemotes = append(copyRemotes, flagDest) 44 | case flagSync: 45 | syncRemotes = append(syncRemotes, flagDest) 46 | default: 47 | log.Fatal("You must specify a mode to use, --sync / --copy") 48 | } 49 | 50 | // create remote to service account map 51 | remoteSaFolders := make(map[string][]string) 52 | 53 | switch flagSaFolder != "" { 54 | case true: 55 | if strings.Contains(flagSrc, ":") { 56 | // source is a remote 57 | srcRemote := stringutils.FromLeftUntil(flagSrc, ":") 58 | log.Debugf("Using service account folder for %q: %v", srcRemote, flagSaFolder) 59 | remoteSaFolders[flagSaFolder] = []string{srcRemote} 60 | } 61 | 62 | if strings.Contains(flagDest, ":") { 63 | // dest is a remote 64 | dstRemote := stringutils.FromLeftUntil(flagDest, ":") 65 | log.Debugf("Using service account folder for %q: %v", dstRemote, flagSaFolder) 66 | remoteSaFolders[flagSaFolder] = append(remoteSaFolders[flagSaFolder], dstRemote) 67 | } 68 | 69 | default: 70 | break 71 | } 72 | 73 | // create syncer config 74 | syncerConfig := config.SyncerConfig{ 75 | Name: "manual", 76 | Enabled: true, 77 | SourceRemote: flagSrc, 78 | Remotes: config.SyncerRemotes{ 79 | Copy: copyRemotes, 80 | Sync: syncRemotes, 81 | }, 82 | RcloneParams: config.SyncerRcloneParams{ 83 | Copy: args, 84 | Sync: args, 85 | Dedupe: []string{ 86 | "--tpslimit=5", 87 | }, 88 | }, 89 | } 90 | 91 | if flagDedupe { 92 | // dedupe was enabled 93 | syncerConfig.Remotes.Dedupe = []string{ 94 | flagDest, 95 | } 96 | } 97 | 98 | // create a config structure for manual sync 99 | cfg := config.Configuration{ 100 | Rclone: config.RcloneConfig{ 101 | Path: config.Config.Rclone.Path, 102 | Config: config.Config.Rclone.Config, 103 | Stats: config.Config.Rclone.Stats, 104 | DryRun: config.Config.Rclone.DryRun, 105 | ServiceAccountRemotes: remoteSaFolders, 106 | }, 107 | Uploader: nil, 108 | Syncer: []config.SyncerConfig{ 109 | syncerConfig, 110 | }, 111 | } 112 | 113 | // create syncer 114 | started := time.Now().UTC() 115 | sync, err := syncer.New(&cfg, &syncerConfig, syncerConfig.Name, 1) 116 | if err != nil { 117 | log.WithError(err).Fatal("Failed initializing syncer, skipping...") 118 | } 119 | 120 | // load service accounts 121 | serviceAccountCount := sync.RemoteServiceAccountFiles.ServiceAccountsCount() 122 | if serviceAccountCount > 0 { 123 | sync.Log.WithField("found_files", serviceAccountCount).Info("Loaded service accounts") 124 | } else { 125 | // no service accounts were loaded 126 | // check to see if any of the copy or sync remote(s) are banned 127 | banned, expiry := rclone.AnyRemotesBanned(sync.Config.Remotes.Copy) 128 | if banned && !expiry.IsZero() { 129 | // one of the copy remotes is banned, abort 130 | sync.Log.WithFields(logrus.Fields{ 131 | "expires_time": expiry, 132 | "expires_in": humanize.Time(expiry), 133 | }).Fatal("Cannot proceed as a copy remote is banned") 134 | } 135 | 136 | banned, expiry = rclone.AnyRemotesBanned(sync.Config.Remotes.Sync) 137 | if banned && !expiry.IsZero() { 138 | // one of the sync remotes is banned, abort 139 | sync.Log.WithFields(logrus.Fields{ 140 | "expires_time": expiry, 141 | "expires_in": humanize.Time(expiry), 142 | }).Fatal("Cannot proceed as a sync remote is banned") 143 | } 144 | } 145 | 146 | log.Info("Syncer commencing...") 147 | 148 | // perform sync 149 | if err := performSync(sync); err != nil { 150 | sync.Log.WithError(err).Fatal("Error occurred while running syncer, skipping...") 151 | } 152 | 153 | log.Infof("Finished in: %v", humanize.RelTime(started, time.Now().UTC(), "", "")) 154 | }, 155 | } 156 | 157 | func init() { 158 | rootCmd.AddCommand(manualCmd) 159 | 160 | manualCmd.Flags().StringVar(&flagSrc, "src", "", "Source") 161 | manualCmd.Flags().StringVar(&flagDest, "dst", "", "Destination") 162 | 163 | _ = manualCmd.MarkFlagRequired("from") 164 | _ = manualCmd.MarkFlagRequired("dest") 165 | 166 | manualCmd.Flags().StringVar(&flagSaFolder, "sa", "", "Service account folder") 167 | 168 | manualCmd.Flags().BoolVar(&flagCopy, "copy", false, "Copy to destination") 169 | manualCmd.Flags().BoolVar(&flagSync, "sync", false, "Sync to destination") 170 | manualCmd.Flags().BoolVar(&flagDedupe, "dedupe", false, "Dedupe destination") 171 | } 172 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/config" 7 | "github.com/l3uddz/crop/logger" 8 | "github.com/l3uddz/crop/pathutils" 9 | "github.com/l3uddz/crop/rclone" 10 | "github.com/l3uddz/crop/runtime" 11 | "github.com/l3uddz/crop/stringutils" 12 | "github.com/nightlyone/lockfile" 13 | "github.com/sirupsen/logrus" 14 | "github.com/spf13/cobra" 15 | "os" 16 | "path/filepath" 17 | "time" 18 | ) 19 | 20 | var ( 21 | // Global flags 22 | flagLogLevel = 0 23 | flagConfigFolder = pathutils.GetDefaultConfigPath() 24 | flagConfigFile = "config.yaml" 25 | flagCachePath = "cache" 26 | flagLogFile = "activity.log" 27 | flagLockFile = "crop.lock" 28 | flagDryRun bool 29 | flagNoDedupe bool 30 | 31 | // Global command specific 32 | flagUploader string 33 | 34 | // Global vars 35 | log *logrus.Entry 36 | flock lockfile.Lockfile 37 | ) 38 | 39 | var rootCmd = &cobra.Command{ 40 | Use: "crop", 41 | Short: "CLI application to assist harvesting your media", 42 | Long: `A CLI application that can be used to harvest your local media. 43 | `, 44 | } 45 | 46 | func Execute() { 47 | if err := rootCmd.Execute(); err != nil { 48 | fmt.Println(err) 49 | os.Exit(1) 50 | } 51 | } 52 | 53 | func init() { 54 | // Parse persistent flags 55 | rootCmd.PersistentFlags().StringVar(&flagConfigFolder, "config-dir", flagConfigFolder, "Config folder") 56 | rootCmd.PersistentFlags().StringVarP(&flagConfigFile, "config", "c", flagConfigFile, "Config file") 57 | rootCmd.PersistentFlags().StringVarP(&flagCachePath, "cache", "d", flagCachePath, "Cache path") 58 | rootCmd.PersistentFlags().StringVarP(&flagLogFile, "log", "l", flagLogFile, "Log file") 59 | rootCmd.PersistentFlags().StringVarP(&flagLockFile, "lock", "f", flagLockFile, "Lock file") 60 | rootCmd.PersistentFlags().CountVarP(&flagLogLevel, "verbose", "v", "Verbose level") 61 | 62 | rootCmd.PersistentFlags().BoolVar(&flagDryRun, "dry-run", false, "Dry run mode") 63 | } 64 | 65 | func initCore(showAppInfo bool) { 66 | // Set core variables 67 | if !rootCmd.PersistentFlags().Changed("config") { 68 | flagConfigFile = filepath.Join(flagConfigFolder, flagConfigFile) 69 | } 70 | if !rootCmd.PersistentFlags().Changed("cache") { 71 | flagCachePath = filepath.Join(flagConfigFolder, flagCachePath) 72 | } 73 | if !rootCmd.PersistentFlags().Changed("log") { 74 | flagLogFile = filepath.Join(flagConfigFolder, flagLogFile) 75 | } 76 | if !rootCmd.PersistentFlags().Changed("lock") { 77 | flagLockFile = filepath.Join(flagConfigFolder, flagLockFile) 78 | } 79 | 80 | // Init Logging 81 | if err := logger.Init(flagLogLevel, flagLogFile); err != nil { 82 | log.WithError(err).Fatal("Failed to initialize logging") 83 | } 84 | 85 | log = logger.GetLogger("crop") 86 | 87 | // Init File Lock 88 | if err := acquireFileLock(); err != nil { 89 | log.WithError(err).Fatalf("Failed acquiring file lock for %q", flagLockFile) 90 | } 91 | 92 | // Init Config 93 | if err := config.Init(flagConfigFile); err != nil { 94 | log.WithError(err).Fatal("Failed to initialize config") 95 | } 96 | 97 | setConfigOverrides() 98 | 99 | // Init Cache 100 | if err := cache.Init(flagCachePath, flagLogLevel); err != nil { 101 | log.WithError(err).Fatal("Failed to initialize cache") 102 | } 103 | 104 | // Init Rclone 105 | if err := rclone.Init(config.Config); err != nil { 106 | log.WithError(err).Fatal("Failed to initialize rclone") 107 | } 108 | 109 | // Show App Info 110 | if showAppInfo { 111 | showUsing() 112 | } 113 | } 114 | 115 | func setConfigOverrides() { 116 | // set dry-run if enabled by flag 117 | if flagDryRun { 118 | config.Config.Rclone.DryRun = true 119 | } 120 | } 121 | 122 | func acquireFileLock() error { 123 | f, err := lockfile.New(flagLockFile) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | flock = f 129 | 130 | // loop until lock has been acquired 131 | for { 132 | err = flock.TryLock() 133 | switch { 134 | case err == nil: 135 | // lock has been acquired 136 | return nil 137 | case err == lockfile.ErrBusy: 138 | // another instance is already running 139 | log.Warnf("There is another crop instance running, re-checking in 1 minute...") 140 | time.Sleep(1 * time.Minute) 141 | default: 142 | // an un-expected error, propagate down-stream 143 | return err 144 | } 145 | } 146 | } 147 | 148 | func releaseFileLock() { 149 | if err := flock.Unlock(); err != nil { 150 | log.WithError(err).Fatalf("Failed releasing file lock for %q", flagLockFile) 151 | } 152 | } 153 | 154 | func showUsing() { 155 | // show app info 156 | log.Infof("Using %s = %s (%s@%s)", stringutils.LeftJust("VERSION", " ", 10), 157 | runtime.Version, runtime.GitCommit, runtime.Timestamp) 158 | logger.ShowUsing() 159 | config.ShowUsing() 160 | cache.ShowUsing() 161 | log.Info("------------------") 162 | } 163 | -------------------------------------------------------------------------------- /cmd/sync.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/dustin/go-humanize" 6 | "github.com/l3uddz/crop/cache" 7 | "github.com/l3uddz/crop/config" 8 | "github.com/l3uddz/crop/rclone" 9 | "github.com/l3uddz/crop/syncer" 10 | "github.com/pkg/errors" 11 | "github.com/sirupsen/logrus" 12 | "github.com/spf13/cobra" 13 | "strings" 14 | "sync" 15 | "time" 16 | ) 17 | 18 | var ( 19 | flagSyncer string 20 | flagParallelism int 21 | flagDaisyChain bool 22 | ) 23 | 24 | var syncCmd = &cobra.Command{ 25 | Use: "sync", 26 | Short: "Perform syncer task(s)", 27 | Long: `This command can be used to trigger a sync.`, 28 | 29 | Run: func(cmd *cobra.Command, args []string) { 30 | // init core 31 | initCore(true) 32 | defer cache.Close() 33 | defer releaseFileLock() 34 | 35 | // create workers 36 | var wg sync.WaitGroup 37 | jobs := make(chan *syncer.Syncer, len(config.Config.Syncer)) 38 | 39 | for w := 1; w <= flagParallelism; w++ { 40 | wg.Add(1) 41 | go worker(&wg, jobs) 42 | } 43 | 44 | // iterate syncer's 45 | started := time.Now().UTC() 46 | 47 | for _, syncerConfig := range config.Config.Syncer { 48 | syncerConfig := syncerConfig 49 | 50 | slog := log.WithField("syncer", syncerConfig.Name) 51 | 52 | // skip disabled syncer(s) 53 | if !syncerConfig.Enabled { 54 | slog.Debug("Skipping disabled syncer") 55 | continue 56 | } 57 | 58 | // skip syncer specific chosen 59 | if flagSyncer != "" && !strings.EqualFold(syncerConfig.Name, flagSyncer) { 60 | slog.Debugf("Skipping syncer as not: %q", flagSyncer) 61 | continue 62 | } 63 | 64 | // create syncer 65 | syncr, err := syncer.New(config.Config, &syncerConfig, syncerConfig.Name, flagParallelism) 66 | if err != nil { 67 | slog.WithError(err).Error("Failed initializing syncer, skipping...") 68 | continue 69 | } 70 | 71 | serviceAccountCount := syncr.RemoteServiceAccountFiles.ServiceAccountsCount() 72 | if serviceAccountCount > 0 { 73 | syncr.Log.WithField("found_files", serviceAccountCount).Info("Loaded service accounts") 74 | } else { 75 | // no service accounts were loaded 76 | // check to see if any of the copy or sync remote(s) are banned 77 | banned, expiry := rclone.AnyRemotesBanned(syncr.Config.Remotes.Copy) 78 | if banned && !expiry.IsZero() { 79 | // one of the copy remotes is banned, abort 80 | syncr.Log.WithFields(logrus.Fields{ 81 | "expires_time": expiry, 82 | "expires_in": humanize.Time(expiry), 83 | }).Warn("Cannot proceed with sync as a copy remote is banned") 84 | continue 85 | } 86 | 87 | banned, expiry = rclone.AnyRemotesBanned(syncr.Config.Remotes.Sync) 88 | if banned && !expiry.IsZero() { 89 | // one of the sync remotes is banned, abort 90 | syncr.Log.WithFields(logrus.Fields{ 91 | "expires_time": expiry, 92 | "expires_in": humanize.Time(expiry), 93 | }).Warn("Cannot proceed with sync as a sync remote is banned") 94 | continue 95 | } 96 | } 97 | 98 | // queue sync job 99 | jobs <- syncr 100 | } 101 | 102 | // wait for all syncers to finish 103 | log.Info("Waiting for syncer(s) to finish") 104 | close(jobs) 105 | wg.Wait() 106 | 107 | log.Infof("Finished in: %v", humanize.RelTime(started, time.Now().UTC(), "", "")) 108 | }, 109 | } 110 | 111 | func init() { 112 | rootCmd.AddCommand(syncCmd) 113 | 114 | syncCmd.Flags().StringVarP(&flagSyncer, "syncer", "s", "", "Run for a specific syncer") 115 | syncCmd.Flags().IntVarP(&flagParallelism, "parallelism", "p", 1, "Max parallel syncers") 116 | 117 | syncCmd.Flags().BoolVar(&flagDaisyChain, "daisy-chain", false, "Daisy chain source remotes") 118 | syncCmd.Flags().BoolVar(&flagNoDedupe, "no-dedupe", false, "Ignore dedupe tasks for syncer") 119 | } 120 | 121 | func worker(wg *sync.WaitGroup, jobs <-chan *syncer.Syncer) { 122 | defer wg.Done() 123 | 124 | for j := range jobs { 125 | // perform syncer job 126 | if err := performSync(j); err != nil { 127 | j.Log.WithError(err).Error("Error occurred while running syncer, skipping...") 128 | } 129 | } 130 | } 131 | 132 | func performSync(s *syncer.Syncer) error { 133 | s.Log.Info("Running...") 134 | 135 | var liveRotateParams []string 136 | if s.GlobalConfig.Rclone.LiveRotate && s.RemoteServiceAccountFiles.ServiceAccountsCount() > 0 { 137 | // start web-server 138 | s.Ws.Run() 139 | defer s.Ws.Stop() 140 | 141 | liveRotateParams = append(liveRotateParams, 142 | "--drive-service-account-url", 143 | fmt.Sprintf("http://%s:%d", s.Ws.Host, s.Ws.Port), 144 | ) 145 | } 146 | 147 | /* Copies */ 148 | if len(s.Config.Remotes.Copy) > 0 { 149 | s.Log.Info("Running copies...") 150 | 151 | if err := s.Copy(liveRotateParams, flagDaisyChain); err != nil { 152 | return errors.WithMessage(err, "failed performing all copies") 153 | } 154 | 155 | s.Log.Info("Finished copies!") 156 | } 157 | 158 | /* Sync */ 159 | if len(s.Config.Remotes.Sync) > 0 { 160 | s.Log.Info("Running syncs...") 161 | 162 | if err := s.Sync(liveRotateParams, flagDaisyChain); err != nil { 163 | return errors.WithMessage(err, "failed performing all syncs") 164 | } 165 | 166 | s.Log.Info("Finished syncs!") 167 | } 168 | 169 | /* Move Server Side */ 170 | if len(s.Config.Remotes.MoveServerSide) > 0 { 171 | s.Log.Info("Running move server-sides...") 172 | 173 | if err := s.Move(nil); err != nil { 174 | return errors.WithMessage(err, "failed performing server-side moves") 175 | } 176 | 177 | s.Log.Info("Finished move server-sides!") 178 | } 179 | 180 | /* Dedupe */ 181 | if !flagNoDedupe && len(s.Config.Remotes.Dedupe) > 0 { 182 | s.Log.Info("Running dedupes...") 183 | 184 | if err := s.Dedupe(nil); err != nil { 185 | return errors.WithMessage(err, "failed performing all dedupes") 186 | } 187 | 188 | s.Log.Info("Finished dedupes!") 189 | } 190 | 191 | s.Log.Info("Finished!") 192 | return nil 193 | } 194 | -------------------------------------------------------------------------------- /cmd/update.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "bufio" 5 | "github.com/blang/semver" 6 | "github.com/l3uddz/crop/cache" 7 | "github.com/l3uddz/crop/runtime" 8 | "github.com/rhysd/go-github-selfupdate/selfupdate" 9 | "github.com/spf13/cobra" 10 | "os" 11 | ) 12 | 13 | var updateCmd = &cobra.Command{ 14 | Use: "update", 15 | Short: "Update to latest version", 16 | Long: `This command can be used to self-update to the latest version.`, 17 | 18 | Run: func(cmd *cobra.Command, args []string) { 19 | // init core 20 | initCore(false) 21 | defer cache.Close() 22 | defer releaseFileLock() 23 | 24 | // parse current version 25 | v, err := semver.Parse(runtime.Version) 26 | if err != nil { 27 | log.WithError(err).Fatal("Failed parsing current build version") 28 | } 29 | 30 | // detect latest version 31 | log.Info("Checking for the latest version...") 32 | latest, found, err := selfupdate.DetectLatest("l3uddz/crop") 33 | if err != nil { 34 | log.WithError(err).Fatal("Failed determining latest available version") 35 | } 36 | 37 | // check version 38 | if !found || latest.Version.LTE(v) { 39 | log.Infof("Already using the latest version: %v", runtime.Version) 40 | return 41 | } 42 | 43 | // ask update 44 | log.Infof("Do you want to update to the latest version: %v? (y/n):", latest.Version) 45 | input, err := bufio.NewReader(os.Stdin).ReadString('\n') 46 | if err != nil || (input != "y\n" && input != "n\n") { 47 | log.Fatal("Failed validating input...") 48 | } else if input == "n\n" { 49 | return 50 | } 51 | 52 | // get existing executable path 53 | exe, err := os.Executable() 54 | if err != nil { 55 | log.WithError(err).Fatal("Failed locating current executable path") 56 | } 57 | 58 | if err := selfupdate.UpdateTo(latest.AssetURL, exe); err != nil { 59 | log.WithError(err).Fatal("Failed updating existing binary to latest release") 60 | } 61 | 62 | log.Infof("Successfully updated to the latest version: %v", latest.Version) 63 | }, 64 | } 65 | 66 | func init() { 67 | rootCmd.AddCommand(updateCmd) 68 | } 69 | -------------------------------------------------------------------------------- /cmd/upload.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/dustin/go-humanize" 6 | "github.com/l3uddz/crop/cache" 7 | "github.com/l3uddz/crop/config" 8 | "github.com/l3uddz/crop/rclone" 9 | "github.com/l3uddz/crop/uploader" 10 | "github.com/pkg/errors" 11 | "github.com/shirou/gopsutil/disk" 12 | "github.com/sirupsen/logrus" 13 | "github.com/spf13/cobra" 14 | "strings" 15 | "time" 16 | ) 17 | 18 | var ( 19 | flagNoCheck bool 20 | ) 21 | 22 | var uploadCmd = &cobra.Command{ 23 | Use: "upload", 24 | Short: "Perform uploader task(s)", 25 | Long: `This command can be used to trigger an uploader check, clean & upload.`, 26 | 27 | Run: func(cmd *cobra.Command, args []string) { 28 | // init core 29 | initCore(true) 30 | defer cache.Close() 31 | defer releaseFileLock() 32 | 33 | // iterate uploader's 34 | started := time.Now().UTC() 35 | 36 | for _, uploaderConfig := range config.Config.Uploader { 37 | log := log.WithField("uploader", uploaderConfig.Name) 38 | 39 | // skip disabled uploader(s) 40 | if !uploaderConfig.Enabled { 41 | log.Debug("Skipping disabled uploader") 42 | continue 43 | } 44 | 45 | // skip uploader specific chosen 46 | if flagUploader != "" && !strings.EqualFold(uploaderConfig.Name, flagUploader) { 47 | log.Debugf("Skipping uploader as not: %q", flagUploader) 48 | continue 49 | } 50 | 51 | // create uploader 52 | upload, err := uploader.New(config.Config, &uploaderConfig, uploaderConfig.Name) 53 | if err != nil { 54 | log.WithError(err).Error("Failed initializing uploader, skipping...") 55 | continue 56 | } 57 | 58 | serviceAccountCount := upload.RemoteServiceAccountFiles.ServiceAccountsCount() 59 | if serviceAccountCount > 0 { 60 | upload.Log.WithField("found_files", serviceAccountCount).Info("Loaded service accounts") 61 | } else { 62 | // no service accounts were loaded 63 | // check to see if any of the copy or move remote(s) are banned 64 | banned, expiry := rclone.AnyRemotesBanned(upload.Config.Remotes.Copy) 65 | if banned && !expiry.IsZero() { 66 | // one of the copy remotes is banned, abort 67 | upload.Log.WithFields(logrus.Fields{ 68 | "expires_time": expiry, 69 | "expires_in": humanize.Time(expiry), 70 | }).Warn("Cannot proceed with upload as a copy remote is banned") 71 | continue 72 | } 73 | 74 | banned, expiry = rclone.AnyRemotesBanned([]string{upload.Config.Remotes.Move}) 75 | if banned && !expiry.IsZero() { 76 | // the move remote is banned, abort 77 | upload.Log.WithFields(logrus.Fields{ 78 | "expires_time": expiry, 79 | "expires_in": humanize.Time(expiry), 80 | }).Warn("Cannot proceed with upload as the move remote is banned") 81 | continue 82 | } 83 | } 84 | 85 | log.Info("Uploader commencing...") 86 | 87 | // refresh details about files to upload 88 | if err := upload.RefreshLocalFiles(); err != nil { 89 | upload.Log.WithError(err).Error("Failed refreshing details of files to upload") 90 | continue 91 | } 92 | 93 | if len(upload.LocalFiles) == 0 { 94 | // there are no files to upload 95 | upload.Log.Info("There were no files found, skipping...") 96 | continue 97 | } 98 | 99 | // check if upload criteria met 100 | forced := false 101 | 102 | if !flagNoCheck { 103 | // no check was not enabled 104 | res, err := upload.Check() 105 | if err != nil { 106 | upload.Log.WithError(err).Error("Failed checking if uploader check conditions met, skipping...") 107 | continue 108 | } 109 | 110 | if !res.Passed { 111 | // get free disk space 112 | freeDiskSpace := "Unknown" 113 | du, err := disk.Usage(upload.Config.LocalFolder) 114 | if err == nil { 115 | freeDiskSpace = humanize.IBytes(du.Free) 116 | } 117 | 118 | // check available disk space 119 | switch { 120 | case err != nil && upload.Config.Check.MinFreeSpace > 0: 121 | // error checking free space 122 | upload.Log.WithError(err).Errorf("Failed checking available free space for: %q", 123 | upload.Config.LocalFolder) 124 | case err == nil && du.Free < upload.Config.Check.MinFreeSpace: 125 | // free space has gone below the free space threshold 126 | forced = true 127 | upload.Log.WithFields(logrus.Fields{ 128 | "until": res.Info, 129 | "free_disk": freeDiskSpace, 130 | }).Infof("Upload conditions not met, however, proceeding as free space below %s", 131 | humanize.IBytes(upload.Config.Check.MinFreeSpace)) 132 | default: 133 | break 134 | } 135 | 136 | if !forced { 137 | upload.Log.WithFields(logrus.Fields{ 138 | "until": res.Info, 139 | "free_disk": freeDiskSpace, 140 | }).Info("Upload conditions not met, skipping...") 141 | continue 142 | } 143 | 144 | // the upload was forced as min_free_size was met 145 | } 146 | } 147 | 148 | // perform upload 149 | if err := performUpload(upload, forced); err != nil { 150 | upload.Log.WithError(err).Error("Error occurred while running uploader, skipping...") 151 | continue 152 | } 153 | } 154 | 155 | log.Infof("Finished in: %v", humanize.RelTime(started, time.Now().UTC(), "", "")) 156 | }, 157 | } 158 | 159 | func init() { 160 | rootCmd.AddCommand(uploadCmd) 161 | 162 | uploadCmd.Flags().StringVarP(&flagUploader, "uploader", "u", "", "Run for a specific uploader") 163 | 164 | uploadCmd.Flags().BoolVar(&flagNoCheck, "no-check", false, "Ignore check and run") 165 | uploadCmd.Flags().BoolVar(&flagNoDedupe, "no-dedupe", false, "Ignore dedupe tasks for uploader") 166 | } 167 | 168 | func performUpload(u *uploader.Uploader, forced bool) error { 169 | u.Log.Info("Running...") 170 | 171 | var liveRotateParams []string 172 | 173 | if u.GlobalConfig.Rclone.LiveRotate && u.RemoteServiceAccountFiles.ServiceAccountsCount() > 0 { 174 | // start web-server 175 | u.Ws.Run() 176 | defer u.Ws.Stop() 177 | 178 | liveRotateParams = append(liveRotateParams, 179 | "--drive-service-account-url", 180 | fmt.Sprintf("http://%s:%d", u.Ws.Host, u.Ws.Port), 181 | ) 182 | } 183 | 184 | /* Cleans */ 185 | if u.Config.Hidden.Enabled { 186 | err := performClean(u) 187 | if err != nil { 188 | return errors.Wrap(err, "failed clearing remotes") 189 | } 190 | } 191 | 192 | /* Generate Additional Rclone Params */ 193 | var additionalRcloneParams []string 194 | 195 | switch forced { 196 | case false: 197 | if !flagNoCheck || u.Config.Check.Forced { 198 | // if no-check is false (default) or check is forced via config, include check params 199 | additionalRcloneParams = u.CheckRcloneParams() 200 | } 201 | default: 202 | break 203 | } 204 | 205 | // add live rotate params set 206 | if len(liveRotateParams) > 0 { 207 | additionalRcloneParams = append(additionalRcloneParams, liveRotateParams...) 208 | } 209 | 210 | /* Copies */ 211 | if len(u.Config.Remotes.Copy) > 0 { 212 | u.Log.Info("Running copies...") 213 | 214 | if err := u.Copy(additionalRcloneParams); err != nil { 215 | return errors.WithMessage(err, "failed performing all copies") 216 | } 217 | 218 | u.Log.Info("Finished copies!") 219 | } 220 | 221 | /* Move */ 222 | if len(u.Config.Remotes.Move) > 0 { 223 | u.Log.Info("Running move...") 224 | 225 | if err := u.Move(false, additionalRcloneParams); err != nil { 226 | return errors.WithMessage(err, "failed performing move") 227 | } 228 | 229 | u.Log.Info("Finished move!") 230 | } 231 | 232 | /* Move Server Side */ 233 | if len(u.Config.Remotes.MoveServerSide) > 0 { 234 | u.Log.Info("Running move server-sides...") 235 | 236 | if err := u.Move(true, nil); err != nil { 237 | return errors.WithMessage(err, "failed performing server-side moves") 238 | } 239 | 240 | u.Log.Info("Finished move server-sides!") 241 | } 242 | 243 | /* Dedupe */ 244 | if !flagNoDedupe && len(u.Config.Remotes.Dedupe) > 0 { 245 | u.Log.Info("Running dedupes...") 246 | 247 | if err := u.Dedupe(nil); err != nil { 248 | return errors.WithMessage(err, "failed performing dedupes") 249 | } 250 | 251 | u.Log.Info("Finished dupes!") 252 | } 253 | 254 | u.Log.Info("Finished!") 255 | return nil 256 | } 257 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/logger" 6 | "github.com/l3uddz/crop/stringutils" 7 | "gopkg.in/yaml.v2" 8 | "io/ioutil" 9 | ) 10 | 11 | type Configuration struct { 12 | Rclone RcloneConfig 13 | Uploader []UploaderConfig 14 | Syncer []SyncerConfig 15 | } 16 | 17 | /* Vars */ 18 | 19 | var ( 20 | Config *Configuration 21 | 22 | // internal 23 | cfgPath = "" 24 | log = logger.GetLogger("cfg") 25 | ) 26 | 27 | /* Public */ 28 | 29 | func Init(configFilePath string) error { 30 | // set package variables 31 | cfgPath = configFilePath 32 | 33 | // read config file 34 | b, err := ioutil.ReadFile(configFilePath) 35 | if err != nil { 36 | return fmt.Errorf("failed reading config file: %w", err) 37 | } 38 | 39 | // decode config file 40 | if err := yaml.Unmarshal(b, &Config); err != nil { 41 | return fmt.Errorf("failed decoding config file: %w", err) 42 | } 43 | 44 | return nil 45 | } 46 | 47 | func ShowUsing() { 48 | log.Infof("Using %s = %q", stringutils.LeftJust("CONFIG", " ", 10), cfgPath) 49 | } 50 | -------------------------------------------------------------------------------- /config/rclone.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type RcloneConfig struct { 4 | Path string `yaml:"path"` 5 | Config string `yaml:"config"` 6 | Stats string `yaml:"stats"` 7 | LiveRotate bool `yaml:"live_rotate"` 8 | DryRun bool `yaml:"dry_run"` 9 | ServiceAccountRemotes map[string][]string `yaml:"service_account_remotes"` 10 | GlobalParams map[string]RcloneParams `yaml:"global_params"` 11 | } 12 | 13 | type RcloneServerSide struct { 14 | From string 15 | To string 16 | } 17 | 18 | type RcloneParams struct { 19 | Copy []string 20 | Move []string 21 | MoveServerSide []string `yaml:"move_server_side"` 22 | Sync []string 23 | Dedupe []string 24 | } 25 | -------------------------------------------------------------------------------- /config/syncer.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type SyncerRemotes struct { 4 | Copy []string 5 | Sync []string 6 | MoveServerSide []RcloneServerSide `yaml:"move_server_side"` 7 | Dedupe []string 8 | } 9 | 10 | type SyncerRcloneParams struct { 11 | Copy []string 12 | GlobalCopy string `yaml:"global_copy"` 13 | Sync []string 14 | GlobalSync string `yaml:"global_sync"` 15 | MoveServerSide []string `yaml:"move_server_side"` 16 | GlobalMoveServerSide string `yaml:"global_move_server_side"` 17 | Dedupe []string 18 | GlobalDedupe string `yaml:"global_dedupe"` 19 | } 20 | 21 | type SyncerConfig struct { 22 | Name string 23 | Enabled bool 24 | SourceRemote string `yaml:"source_remote"` 25 | Remotes SyncerRemotes 26 | RcloneParams SyncerRcloneParams `yaml:"rclone_params"` 27 | } 28 | -------------------------------------------------------------------------------- /config/uploader.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | type UploaderCheck struct { 4 | Forced bool 5 | MinFreeSpace uint64 `yaml:"min_free_space"` 6 | Type string 7 | Limit uint64 8 | Exclude []string 9 | Include []string 10 | } 11 | 12 | type UploaderHidden struct { 13 | Enabled bool 14 | Type string 15 | Folder string 16 | Cleanup bool 17 | Workers int 18 | } 19 | 20 | type UploaderRemotes struct { 21 | Clean []string 22 | Copy []string 23 | Move string 24 | MoveServerSide []RcloneServerSide `yaml:"move_server_side"` 25 | Dedupe []string 26 | } 27 | 28 | type UploaderRcloneParams struct { 29 | Copy []string 30 | GlobalCopy string `yaml:"global_copy"` 31 | Move []string 32 | GlobalMove string `yaml:"global_move"` 33 | MoveServerSide []string `yaml:"move_server_side"` 34 | GlobalMoveServerSide string `yaml:"global_move_server_side"` 35 | Dedupe []string 36 | GlobalDedupe string `yaml:"global_dedupe"` 37 | } 38 | 39 | type UploaderConfig struct { 40 | Name string 41 | Enabled bool 42 | Check UploaderCheck 43 | Hidden UploaderHidden 44 | LocalFolder string `yaml:"local_folder"` 45 | Remotes UploaderRemotes 46 | RcloneParams UploaderRcloneParams `yaml:"rclone_params"` 47 | } 48 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/l3uddz/crop 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/ReneKroon/ttlcache/v2 v2.9.0 7 | github.com/andybalholm/brotli v1.0.4 // indirect 8 | github.com/blang/semver v3.5.1+incompatible 9 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 10 | github.com/dgraph-io/badger/v2 v2.2007.4 // indirect 11 | github.com/dgraph-io/ristretto v0.1.0 // indirect 12 | github.com/dustin/go-humanize v1.0.0 13 | github.com/go-cmd/cmd v1.3.1 14 | github.com/gofiber/fiber/v2 v2.22.0 15 | github.com/golang/glog v1.0.0 // indirect 16 | github.com/golang/snappy v0.0.4 // indirect 17 | github.com/google/go-querystring v1.1.0 // indirect 18 | github.com/klauspost/compress v1.13.6 // indirect 19 | github.com/mattn/go-colorable v0.1.12 // indirect 20 | github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect 21 | github.com/natefinch/lumberjack v2.0.0+incompatible 22 | github.com/nightlyone/lockfile v1.0.0 23 | github.com/onsi/ginkgo v1.12.0 // indirect 24 | github.com/onsi/gomega v1.9.0 // indirect 25 | github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 26 | github.com/pkg/errors v0.9.1 27 | github.com/rhysd/go-github-selfupdate v1.2.3 28 | github.com/shirou/gopsutil v3.21.11+incompatible 29 | github.com/sirupsen/logrus v1.8.1 30 | github.com/sony/sonyflake v1.0.0 // indirect 31 | github.com/spf13/cobra v1.2.1 32 | github.com/ulikunitz/xz v0.5.10 // indirect 33 | github.com/x-cray/logrus-prefixed-formatter v0.5.2 34 | github.com/yale8848/gorpool v0.1.0 35 | github.com/yusufpapurcu/wmi v1.2.2 // indirect 36 | github.com/zippoxer/bow v0.0.0-20200229231453-bf1012ae7ab9 37 | golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect 38 | golang.org/x/net v0.0.0-20211203184738-4852103109b8 // indirect 39 | golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect 40 | golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 41 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect 42 | google.golang.org/protobuf v1.27.1 // indirect 43 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 44 | gopkg.in/yaml.v2 v2.4.0 45 | ) 46 | -------------------------------------------------------------------------------- /logger/log.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "runtime" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | prefixed "github.com/x-cray/logrus-prefixed-formatter" 9 | ) 10 | 11 | var ( 12 | prefixLen = 14 13 | loggingFilePath string 14 | ) 15 | 16 | /* Public */ 17 | 18 | func Init(logLevel int, logFilePath string) error { 19 | var useLevel logrus.Level 20 | 21 | // determine logging level 22 | switch logLevel { 23 | case 0: 24 | useLevel = logrus.InfoLevel 25 | case 1: 26 | useLevel = logrus.DebugLevel 27 | default: 28 | useLevel = logrus.TraceLevel 29 | } 30 | 31 | // set rotating file hook 32 | fileLogFormatter := &prefixed.TextFormatter{} 33 | fileLogFormatter.FullTimestamp = true 34 | fileLogFormatter.QuoteEmptyFields = true 35 | fileLogFormatter.DisableColors = true 36 | fileLogFormatter.ForceFormatting = true 37 | 38 | rotateFileHook, err := NewRotateFileHook(RotateFileConfig{ 39 | Filename: logFilePath, 40 | MaxSize: 5, 41 | MaxBackups: 10, 42 | MaxAge: 90, 43 | Level: useLevel, 44 | Formatter: fileLogFormatter, 45 | }) 46 | 47 | if err != nil { 48 | logrus.WithError(err).Errorf("Failed initializing rotating file log to %q", logFilePath) 49 | return errors.Wrap(err, "failed initializing rotating file hook") 50 | } 51 | 52 | logrus.AddHook(rotateFileHook) 53 | 54 | // set console formatter 55 | logFormatter := &prefixed.TextFormatter{} 56 | logFormatter.FullTimestamp = true 57 | logFormatter.QuoteEmptyFields = true 58 | logFormatter.ForceFormatting = true 59 | 60 | if runtime.GOOS == "windows" { 61 | // disable colors on windows 62 | logFormatter.DisableColors = true 63 | } 64 | 65 | logrus.SetFormatter(logFormatter) 66 | 67 | // set logging level 68 | logrus.SetLevel(useLevel) 69 | 70 | // set globals 71 | loggingFilePath = logFilePath 72 | 73 | return nil 74 | } 75 | 76 | func ShowUsing() { 77 | log := GetLogger("log") 78 | 79 | log.Infof("Using %s = %s", stringLeftJust("LOG_LEVEL", " ", 10), 80 | logrus.GetLevel().String()) 81 | log.Infof("Using %s = %q", stringLeftJust("LOG", " ", 10), loggingFilePath) 82 | } 83 | 84 | func GetLogger(prefix string) *logrus.Entry { 85 | if len(prefix) > prefixLen { 86 | prefixLen = len(prefix) 87 | } 88 | 89 | return logrus.WithFields(logrus.Fields{"prefix": stringLeftJust(prefix, " ", prefixLen)}) 90 | } 91 | -------------------------------------------------------------------------------- /logger/rotatefilehook.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "io" 5 | 6 | "github.com/natefinch/lumberjack" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type RotateFileConfig struct { 11 | Filename string 12 | MaxSize int 13 | MaxBackups int 14 | MaxAge int 15 | Level logrus.Level 16 | Formatter logrus.Formatter 17 | } 18 | 19 | type RotateFileHook struct { 20 | Config RotateFileConfig 21 | logWriter io.Writer 22 | } 23 | 24 | func NewRotateFileHook(config RotateFileConfig) (logrus.Hook, error) { 25 | hook := RotateFileHook{ 26 | Config: config, 27 | } 28 | hook.logWriter = &lumberjack.Logger{ 29 | Filename: config.Filename, 30 | MaxSize: config.MaxSize, 31 | MaxBackups: config.MaxBackups, 32 | MaxAge: config.MaxAge, 33 | } 34 | 35 | return &hook, nil 36 | } 37 | 38 | func (hook *RotateFileHook) Levels() []logrus.Level { 39 | return logrus.AllLevels[:hook.Config.Level+1] 40 | } 41 | 42 | func (hook *RotateFileHook) Fire(entry *logrus.Entry) (err error) { 43 | b, err := hook.Config.Formatter.Format(entry) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | _, _ = hook.logWriter.Write(b) 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /logger/util.go: -------------------------------------------------------------------------------- 1 | package logger 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | func stringLeftJust(text string, filler string, size int) string { 9 | repeatSize := size - len(text) 10 | return fmt.Sprintf("%s%s", text, strings.Repeat(filler, repeatSize)) 11 | } 12 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/l3uddz/crop/cmd" 4 | 5 | func main() { 6 | cmd.Execute() 7 | } 8 | -------------------------------------------------------------------------------- /maputils/maputils.go: -------------------------------------------------------------------------------- 1 | package maputils 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | func GetStringMapValue(stringMap map[string]string, key string, caseSensitive bool) (string, error) { 9 | lowerKey := strings.ToLower(key) 10 | 11 | // case sensitive match 12 | if caseSensitive { 13 | v, ok := stringMap[key] 14 | if !ok { 15 | return "", fmt.Errorf("key was not found in map: %q", key) 16 | } 17 | 18 | return v, nil 19 | } 20 | 21 | // case insensitive match 22 | for k, v := range stringMap { 23 | if strings.ToLower(k) == lowerKey { 24 | return v, nil 25 | } 26 | } 27 | 28 | return "", fmt.Errorf("key was not found in map: %q", lowerKey) 29 | } 30 | 31 | func GetStringKeysBySliceValue(stringMap map[string][]string, value string) ([]string, error) { 32 | keys := make([]string, 0) 33 | 34 | for k, v := range stringMap { 35 | for _, r := range v { 36 | if strings.EqualFold(r, value) { 37 | keys = append(keys, k) 38 | } 39 | } 40 | } 41 | 42 | if len(keys) == 0 { 43 | return keys, fmt.Errorf("value was not found in map: %q", value) 44 | } 45 | 46 | return keys, nil 47 | } 48 | -------------------------------------------------------------------------------- /pathutils/file.go: -------------------------------------------------------------------------------- 1 | package pathutils 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "golang.org/x/sys/unix" 6 | "os" 7 | "path/filepath" 8 | "runtime" 9 | ) 10 | 11 | /* Public */ 12 | 13 | func GetCurrentBinaryPath() string { 14 | // get current binary path 15 | dir, err := filepath.Abs(filepath.Dir(os.Args[0])) 16 | if err != nil { 17 | // get current working dir 18 | if dir, err = os.Getwd(); err != nil { 19 | panic("failed to determine current binary location") 20 | } 21 | } 22 | 23 | return dir 24 | } 25 | 26 | func GetDefaultConfigPath() string { 27 | // get binary path 28 | bp := GetCurrentBinaryPath() 29 | if dirIsWriteable(bp) == nil { 30 | return bp 31 | } 32 | 33 | // binary path is not write-able, use alternative path 34 | uhp, err := os.UserHomeDir() 35 | if err != nil { 36 | panic("failed to determine current user home directory") 37 | } 38 | 39 | // set crop path inside user home dir 40 | chp := filepath.Join(uhp, ".config", "crop") 41 | if _, err := os.Stat(chp); os.IsNotExist(err) { 42 | if e := os.MkdirAll(chp, os.ModePerm); e != nil { 43 | panic("failed to create crop config directory") 44 | } 45 | } 46 | 47 | return chp 48 | } 49 | 50 | /* Private */ 51 | 52 | func dirIsWriteable(dir string) error { 53 | // credits: https://stackoverflow.com/questions/20026320/how-to-tell-if-folder-exists-and-is-writable 54 | var err error 55 | 56 | if runtime.GOOS != "windows" { 57 | err = unix.Access(dir, unix.W_OK) 58 | } else { 59 | f, e := os.Stat(dir) 60 | if e != nil { 61 | return e 62 | } 63 | 64 | switch { 65 | case !f.IsDir(): 66 | err = errors.New("dir is not a directory") 67 | case f.Mode().Perm()&(1<<(uint(7))) == 0: 68 | err = errors.New("dir is not writeable") 69 | default: 70 | break 71 | } 72 | } 73 | 74 | return err 75 | } 76 | -------------------------------------------------------------------------------- /pathutils/find.go: -------------------------------------------------------------------------------- 1 | package pathutils 2 | 3 | import ( 4 | "github.com/l3uddz/crop/logger" 5 | 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | "time" 10 | ) 11 | 12 | var ( 13 | log = logger.GetLogger("paths") 14 | ) 15 | 16 | type Path struct { 17 | Path string 18 | RealPath string 19 | RelativeRealPath string 20 | FileName string 21 | Directory string 22 | IsDir bool 23 | Size int64 24 | ModifiedTime time.Time 25 | } 26 | 27 | type callbackAllowed func(string) *string 28 | 29 | func GetPathsInFolder(folder string, includeFiles bool, includeFolders bool, acceptFn callbackAllowed) ([]Path, 30 | uint64) { 31 | var paths []Path 32 | var size uint64 = 0 33 | 34 | if _, err := os.Stat(folder); os.IsNotExist(err) { 35 | log.WithError(err).Error("Failed finding paths within folder") 36 | return paths, size 37 | } 38 | 39 | err := filepath.Walk(folder, func(path string, info os.FileInfo, err error) error { 40 | // handle err 41 | if err != nil { 42 | return err 43 | } 44 | 45 | // skip files if not wanted 46 | if !includeFiles && !info.IsDir() { 47 | log.Tracef("Skipping file: %s", path) 48 | return nil 49 | } 50 | 51 | // skip folders if not wanted 52 | if !includeFolders && info.IsDir() { 53 | log.Tracef("Skipping folder: %s", path) 54 | return nil 55 | } 56 | 57 | // skip paths rejected by accept callback 58 | realPath := path 59 | finalPath := path 60 | relativeRealPath := strings.Replace(realPath, folder, "", 1) 61 | 62 | if strings.HasPrefix(relativeRealPath, "/") { 63 | relativeRealPath = strings.Replace(relativeRealPath, "/", "", 1) 64 | } 65 | 66 | if acceptFn != nil { 67 | acceptedPath := acceptFn(path) 68 | if acceptedPath == nil { 69 | log.Tracef("Skipping rejected path: %s", path) 70 | return nil 71 | } 72 | 73 | finalPath = *acceptedPath 74 | } 75 | 76 | foundPath := Path{ 77 | Path: finalPath, 78 | RealPath: realPath, 79 | RelativeRealPath: relativeRealPath, 80 | FileName: info.Name(), 81 | Directory: filepath.Dir(path), 82 | IsDir: info.IsDir(), 83 | Size: info.Size(), 84 | ModifiedTime: info.ModTime(), 85 | } 86 | 87 | paths = append(paths, foundPath) 88 | size += uint64(info.Size()) 89 | 90 | return nil 91 | }) 92 | 93 | if err != nil { 94 | log.WithError(err).Errorf("Failed to retrieve paths from: %s", folder) 95 | } 96 | 97 | return paths, size 98 | } 99 | -------------------------------------------------------------------------------- /rclone/copy.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-cmd/cmd" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | /* Public */ 11 | 12 | func Copy(from string, to string, serviceAccounts []*RemoteServiceAccount, 13 | additionalRcloneParams []string) (bool, int, error) { 14 | // set variables 15 | rLog := log.WithFields(logrus.Fields{ 16 | "action": CmdCopy, 17 | "from": from, 18 | "to": to, 19 | }) 20 | result := false 21 | 22 | // generate required rclone parameters 23 | params := []string{ 24 | CmdCopy, 25 | from, 26 | to, 27 | } 28 | 29 | baseParams, err := getBaseParams() 30 | if err != nil { 31 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q -> %q", 32 | CmdCopy, from, to) 33 | } 34 | params = append(params, baseParams...) 35 | extraParams := additionalRcloneParams 36 | 37 | additionalParams, err := getAdditionalParams(CmdCopy, extraParams) 38 | if err != nil { 39 | return false, 1, errors.WithMessagef(err, "failed generating additionalParams to %s: %q -> %q", 40 | CmdCopy, from, to) 41 | } 42 | params = append(params, additionalParams...) 43 | rLog.Debugf("Generated params: %v", params) 44 | 45 | // generate required rclone env 46 | var rcloneEnv []string 47 | if len(serviceAccounts) > 0 { 48 | // iterate service accounts, creating env 49 | for _, env := range serviceAccounts { 50 | if env == nil { 51 | continue 52 | } 53 | 54 | v := env 55 | rcloneEnv = append(rcloneEnv, fmt.Sprintf("%s=%s", v.RemoteEnvVar, v.ServiceAccountPath)) 56 | } 57 | } 58 | rLog.Debugf("Generated rclone env: %v", rcloneEnv) 59 | 60 | // setup cmd 61 | cmdOptions := cmd.Options{ 62 | Buffered: false, 63 | Streaming: true, 64 | } 65 | rcloneCmd := cmd.NewCmdOptions(cmdOptions, cfg.Rclone.Path, params...) 66 | rcloneCmd.Env = rcloneEnv 67 | 68 | // live stream logs 69 | doneChan := make(chan struct{}) 70 | go func() { 71 | defer close(doneChan) 72 | 73 | for rcloneCmd.Stdout != nil || rcloneCmd.Stderr != nil { 74 | select { 75 | case line, open := <-rcloneCmd.Stdout: 76 | if !open { 77 | rcloneCmd.Stdout = nil 78 | continue 79 | } 80 | log.Info(line) 81 | case line, open := <-rcloneCmd.Stderr: 82 | if !open { 83 | rcloneCmd.Stderr = nil 84 | continue 85 | } 86 | log.Info(line) 87 | } 88 | } 89 | }() 90 | 91 | // run command 92 | rLog.Debug("Starting...") 93 | 94 | status := <-rcloneCmd.Start() 95 | <-doneChan 96 | 97 | // check status 98 | switch status.Exit { 99 | case ExitSuccess: 100 | result = true 101 | default: 102 | break 103 | } 104 | 105 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 106 | return result, status.Exit, status.Error 107 | } 108 | -------------------------------------------------------------------------------- /rclone/dedupe.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "github.com/go-cmd/cmd" 5 | "github.com/pkg/errors" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | /* Public */ 10 | 11 | func Dedupe(remotePath string, additionalRcloneParams []string) (bool, int, error) { 12 | // set variables 13 | rLog := log.WithFields(logrus.Fields{ 14 | "action": CmdDedupe, 15 | "remote_path": remotePath, 16 | }) 17 | result := false 18 | 19 | // generate required rclone parameters 20 | params := []string{ 21 | CmdDedupe, 22 | remotePath, 23 | } 24 | 25 | baseParams, err := getBaseParams() 26 | if err != nil { 27 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q", CmdDedupe, 28 | remotePath) 29 | } 30 | 31 | params = append(params, baseParams...) 32 | 33 | additionalParams, err := getAdditionalParams(CmdDedupe, additionalRcloneParams) 34 | if err != nil { 35 | return false, 1, errors.WithMessagef(err, "failed generating additionalParams to %s: %q", 36 | CmdDedupe, remotePath) 37 | } 38 | 39 | params = append(params, additionalParams...) 40 | rLog.Debugf("Generated params: %v", params) 41 | 42 | // setup cmd 43 | cmdOptions := cmd.Options{ 44 | Buffered: false, 45 | Streaming: true, 46 | } 47 | rcloneCmd := cmd.NewCmdOptions(cmdOptions, cfg.Rclone.Path, params...) 48 | 49 | // live stream logs 50 | doneChan := make(chan struct{}) 51 | go func() { 52 | defer close(doneChan) 53 | 54 | for rcloneCmd.Stdout != nil || rcloneCmd.Stderr != nil { 55 | select { 56 | case line, open := <-rcloneCmd.Stdout: 57 | if !open { 58 | rcloneCmd.Stdout = nil 59 | continue 60 | } 61 | log.Info(line) 62 | case line, open := <-rcloneCmd.Stderr: 63 | if !open { 64 | rcloneCmd.Stderr = nil 65 | continue 66 | } 67 | log.Info(line) 68 | } 69 | } 70 | }() 71 | 72 | // run command 73 | rLog.Debug("Starting...") 74 | 75 | status := <-rcloneCmd.Start() 76 | <-doneChan 77 | 78 | // check status 79 | switch status.Exit { 80 | case ExitSuccess: 81 | result = true 82 | default: 83 | break 84 | } 85 | 86 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 87 | return result, status.Exit, status.Error 88 | } 89 | -------------------------------------------------------------------------------- /rclone/deletefile.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "github.com/go-cmd/cmd" 5 | "github.com/pkg/errors" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | /* Public */ 10 | 11 | func DeleteFile(remoteFilePath string) (bool, int, error) { 12 | // set variables 13 | rLog := log.WithFields(logrus.Fields{ 14 | "action": CmdDeleteFile, 15 | "remote_path": remoteFilePath, 16 | }) 17 | result := false 18 | 19 | // generate required rclone parameters 20 | params := []string{ 21 | CmdDeleteFile, 22 | remoteFilePath, 23 | } 24 | 25 | baseParams, err := getBaseParams() 26 | if err != nil { 27 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q", CmdDeleteFile, 28 | remoteFilePath) 29 | } 30 | 31 | params = append(params, baseParams...) 32 | rLog.Debugf("Generated params: %v", params) 33 | 34 | // remove file 35 | rcloneCmd := cmd.NewCmd(cfg.Rclone.Path, params...) 36 | status := <-rcloneCmd.Start() 37 | 38 | // check status 39 | switch status.Exit { 40 | case ExitSuccess: 41 | result = true 42 | default: 43 | break 44 | } 45 | 46 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 47 | return result, status.Exit, status.Error 48 | } 49 | -------------------------------------------------------------------------------- /rclone/enum.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | const ( 4 | CmdCopy string = "copy" 5 | CmdMove string = "move" 6 | CmdSync string = "sync" 7 | CmdDeleteFile string = "deletefile" 8 | CmdDeleteDir string = "rmdir" 9 | CmdDeleteDirs string = "rmdirs" 10 | CmdDedupe string = "dedupe" 11 | ) 12 | 13 | const ( 14 | ExitSuccess int = iota 15 | ExitSyntaxError 16 | ExitErrorUnknown 17 | ExitDirectoryNotFound 18 | ExitFileNotFound 19 | ExitTemporaryError 20 | ExitLessSeriousError 21 | ExitFatalError 22 | ExitTransferExceeded 23 | ) 24 | 25 | type GlobalParamType int 26 | 27 | const ( 28 | GlobalCopyParams GlobalParamType = iota 29 | GlobalMoveParams 30 | GlobalMoveServerSideParams 31 | GlobalSyncParams 32 | GlobalDedupeParams 33 | ) 34 | -------------------------------------------------------------------------------- /rclone/filter.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import "fmt" 4 | 5 | func IncludeExcludeToFilters(includes []string, excludes []string) []string { 6 | params := make([]string, 0) 7 | 8 | // add excludes 9 | if len(excludes) > 0 { 10 | for _, exclude := range excludes { 11 | params = append(params, "--filter", fmt.Sprintf("- %s", exclude)) 12 | } 13 | } 14 | 15 | // were there includes? 16 | if len(includes) > 0 { 17 | for _, include := range includes { 18 | params = append(params, "--filter", fmt.Sprintf("+ %s", include)) 19 | } 20 | 21 | // includes need the below, see: https://forum.rclone.org/t/filter-or-include-exclude-help-needed/10890/2 22 | params = append(params, "--filter", "- *") 23 | } 24 | 25 | return params 26 | } 27 | -------------------------------------------------------------------------------- /rclone/misc.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import "strings" 4 | 5 | // credits: https://github.com/rclone/rclone/blob/master/fs/config.go 6 | func ConfigToEnv(section, name string) string { 7 | return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1)) 8 | } 9 | -------------------------------------------------------------------------------- /rclone/move.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-cmd/cmd" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | /* Public */ 11 | 12 | func Move(from string, to string, serviceAccounts []*RemoteServiceAccount, serverSide bool, 13 | additionalRcloneParams []string) (bool, int, error) { 14 | // set variables 15 | rLog := log.WithFields(logrus.Fields{ 16 | "action": CmdMove, 17 | "from": from, 18 | "to": to, 19 | }) 20 | result := false 21 | 22 | // generate required rclone parameters 23 | params := []string{ 24 | CmdMove, 25 | from, 26 | to, 27 | } 28 | 29 | baseParams, err := getBaseParams() 30 | if err != nil { 31 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q -> %q", 32 | CmdMove, from, to) 33 | } 34 | params = append(params, baseParams...) 35 | 36 | extraParams := additionalRcloneParams 37 | if serverSide { 38 | // add server side parameter 39 | extraParams = append(extraParams, "--drive-server-side-across-configs") 40 | } 41 | 42 | additionalParams, err := getAdditionalParams(CmdMove, extraParams) 43 | if err != nil { 44 | return false, 1, errors.WithMessagef(err, "failed generating additionalParams to %s: %q -> %q", 45 | CmdMove, from, to) 46 | } 47 | params = append(params, additionalParams...) 48 | rLog.Debugf("Generated params: %v", params) 49 | 50 | // generate required rclone env 51 | var rcloneEnv []string 52 | if len(serviceAccounts) > 0 { 53 | // iterate service accounts, creating env 54 | for _, env := range serviceAccounts { 55 | if env == nil { 56 | continue 57 | } 58 | 59 | v := env 60 | rcloneEnv = append(rcloneEnv, fmt.Sprintf("%s=%s", v.RemoteEnvVar, v.ServiceAccountPath)) 61 | } 62 | } 63 | rLog.Debugf("Generated rclone env: %v", rcloneEnv) 64 | 65 | // setup cmd 66 | cmdOptions := cmd.Options{ 67 | Buffered: false, 68 | Streaming: true, 69 | } 70 | rcloneCmd := cmd.NewCmdOptions(cmdOptions, cfg.Rclone.Path, params...) 71 | rcloneCmd.Env = rcloneEnv 72 | 73 | // live stream logs 74 | doneChan := make(chan struct{}) 75 | go func() { 76 | defer close(doneChan) 77 | 78 | for rcloneCmd.Stdout != nil || rcloneCmd.Stderr != nil { 79 | select { 80 | case line, open := <-rcloneCmd.Stdout: 81 | if !open { 82 | rcloneCmd.Stdout = nil 83 | continue 84 | } 85 | log.Info(line) 86 | case line, open := <-rcloneCmd.Stderr: 87 | if !open { 88 | rcloneCmd.Stderr = nil 89 | continue 90 | } 91 | log.Info(line) 92 | } 93 | } 94 | }() 95 | 96 | // run command 97 | rLog.Debug("Starting...") 98 | 99 | status := <-rcloneCmd.Start() 100 | <-doneChan 101 | 102 | // check status 103 | switch status.Exit { 104 | case ExitSuccess: 105 | result = true 106 | default: 107 | break 108 | } 109 | 110 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 111 | return result, status.Exit, status.Error 112 | } 113 | -------------------------------------------------------------------------------- /rclone/param.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | ) 6 | 7 | /* Public */ 8 | 9 | func GetGlobalParams(gp GlobalParamType, name string) []string { 10 | var params []string 11 | 12 | p, ok := cfg.Rclone.GlobalParams[name] 13 | if !ok { 14 | return params 15 | } 16 | 17 | switch gp { 18 | case GlobalCopyParams: 19 | params = p.Copy 20 | case GlobalMoveParams: 21 | params = p.Move 22 | case GlobalMoveServerSideParams: 23 | params = p.MoveServerSide 24 | case GlobalSyncParams: 25 | params = p.Sync 26 | case GlobalDedupeParams: 27 | params = p.Dedupe 28 | default: 29 | break 30 | } 31 | 32 | return params 33 | } 34 | 35 | /* Private */ 36 | 37 | func getBaseParams() ([]string, error) { 38 | var params []string 39 | 40 | // dry run 41 | if cfg.Rclone.DryRun { 42 | params = append(params, "--dry-run") 43 | } 44 | 45 | // defaults 46 | params = append(params, 47 | // config 48 | "--config", cfg.Rclone.Config, 49 | // verbose 50 | "-v", 51 | // user-agent 52 | "--user-agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/537.36 (KHTML, like Gecko) "+ 53 | "Chrome/74.0.3729.131 Safari/537.36", 54 | ) 55 | 56 | // add stats 57 | if config.Config.Rclone.Stats != "" { 58 | params = append(params, 59 | // stats 60 | "--stats", cfg.Rclone.Stats) 61 | } 62 | 63 | return params, nil 64 | } 65 | 66 | func getAdditionalParams(cmd string, extraParams []string) ([]string, error) { 67 | var params []string 68 | 69 | // additional params based on the rclone command being used 70 | switch cmd { 71 | case CmdCopy: 72 | params = append(params, 73 | // stop on upload limit 74 | "--drive-stop-on-upload-limit", 75 | ) 76 | case CmdMove: 77 | params = append(params, 78 | // stop on upload limit 79 | "--drive-stop-on-upload-limit", 80 | ) 81 | case CmdSync: 82 | params = append(params, 83 | // stop on upload limit 84 | "--drive-stop-on-upload-limit", 85 | ) 86 | case CmdDeleteFile: 87 | break 88 | case CmdDeleteDir: 89 | break 90 | case CmdDeleteDirs: 91 | break 92 | case CmdDedupe: 93 | break 94 | default: 95 | break 96 | } 97 | 98 | // add any additional params 99 | params = append(params, extraParams...) 100 | return params, nil 101 | } 102 | -------------------------------------------------------------------------------- /rclone/rclone.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | "github.com/l3uddz/crop/logger" 6 | ) 7 | 8 | var ( 9 | log = logger.GetLogger("rclone") 10 | 11 | // init 12 | cfg *config.Configuration 13 | ) 14 | 15 | /* Struct */ 16 | 17 | type RemoteInstruction struct { 18 | From string 19 | To string 20 | ServerSide bool 21 | } 22 | 23 | /* Public */ 24 | 25 | func Init(c *config.Configuration) error { 26 | // set required globals 27 | cfg = c 28 | 29 | // load service files for all uploader(s) 30 | return nil 31 | } 32 | -------------------------------------------------------------------------------- /rclone/rmdir.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "github.com/go-cmd/cmd" 5 | "github.com/pkg/errors" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | /* Public */ 10 | 11 | func RmDir(remoteFilePath string) (bool, int, error) { 12 | // set variables 13 | rLog := log.WithFields(logrus.Fields{ 14 | "action": CmdDeleteDir, 15 | "remote_path": remoteFilePath, 16 | }) 17 | 18 | result := false 19 | 20 | // generate required rclone parameters 21 | params := []string{ 22 | CmdDeleteDir, 23 | remoteFilePath, 24 | } 25 | 26 | baseParams, err := getBaseParams() 27 | if err != nil { 28 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q", CmdDeleteDir, 29 | remoteFilePath) 30 | } 31 | 32 | params = append(params, baseParams...) 33 | rLog.Debugf("Generated params: %v", params) 34 | 35 | // remove file 36 | rcloneCmd := cmd.NewCmd(cfg.Rclone.Path, params...) 37 | status := <-rcloneCmd.Start() 38 | 39 | // check status 40 | switch status.Exit { 41 | case ExitSuccess: 42 | result = true 43 | default: 44 | break 45 | } 46 | 47 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 48 | return result, status.Exit, status.Error 49 | } 50 | -------------------------------------------------------------------------------- /rclone/sa.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "fmt" 5 | "github.com/ReneKroon/ttlcache/v2" 6 | "github.com/l3uddz/crop/cache" 7 | "github.com/l3uddz/crop/logger" 8 | "github.com/l3uddz/crop/maputils" 9 | "github.com/l3uddz/crop/pathutils" 10 | "github.com/l3uddz/crop/reutils" 11 | "github.com/l3uddz/crop/stringutils" 12 | "github.com/sirupsen/logrus" 13 | "go/types" 14 | "math/rand" 15 | "sort" 16 | "strconv" 17 | "strings" 18 | "sync" 19 | "time" 20 | ) 21 | 22 | /* Struct */ 23 | 24 | type RemoteServiceAccounts struct { 25 | RemoteEnvVar string 26 | ServiceAccounts []pathutils.Path 27 | } 28 | 29 | type RemoteServiceAccount struct { 30 | RemoteEnvVar string 31 | ServiceAccountPath string 32 | } 33 | 34 | type ServiceAccountManager struct { 35 | log *logrus.Entry 36 | remoteServiceAccountFolders map[string][]string 37 | remoteServiceAccounts map[string]RemoteServiceAccounts 38 | parallelism int 39 | } 40 | 41 | var ( 42 | mtx sync.Mutex 43 | mcache *ttlcache.Cache 44 | ) 45 | 46 | /* Private */ 47 | 48 | func init() { 49 | mcache = ttlcache.NewCache() 50 | _ = mcache.SetTTL(60 * time.Minute) 51 | mcache.SetExpirationCallback(mcacheItemExpired) 52 | } 53 | 54 | func mcacheItemExpired(key string, _ interface{}) { 55 | log.Debugf("Cleared SA from mcache: %s", key) 56 | } 57 | 58 | func addServiceAccountsToTempCache(serviceAccounts []*RemoteServiceAccount) { 59 | for _, sa := range serviceAccounts { 60 | _ = mcache.Set(sa.ServiceAccountPath, nil) 61 | } 62 | } 63 | 64 | /* Public */ 65 | 66 | func NewServiceAccountManager(serviceAccountFolders map[string][]string, parallelism int) *ServiceAccountManager { 67 | return &ServiceAccountManager{ 68 | log: logger.GetLogger("sa_manager"), 69 | remoteServiceAccountFolders: serviceAccountFolders, 70 | remoteServiceAccounts: make(map[string]RemoteServiceAccounts), 71 | parallelism: parallelism, 72 | } 73 | } 74 | 75 | func (m *ServiceAccountManager) LoadServiceAccounts(remotePaths []string) error { 76 | m.log.Trace("Loading service accounts") 77 | 78 | // iterate remotes 79 | for _, remotePath := range remotePaths { 80 | // ignore junk paths 81 | if remotePath == "" { 82 | continue 83 | } 84 | 85 | // parse remote name and retrieve service account folder(s) 86 | remoteName := stringutils.FromLeftUntil(remotePath, ":") 87 | remoteServiceAccountFolders, err := maputils.GetStringKeysBySliceValue(m.remoteServiceAccountFolders, remoteName) 88 | if err != nil { 89 | m.log.Tracef("Service account folder(s) not found for: %q, skipping...", remoteName) 90 | continue 91 | } 92 | 93 | // service accounts loaded for this remote? 94 | if _, ok := m.remoteServiceAccounts[remoteName]; ok { 95 | continue 96 | } 97 | 98 | // load service account files in all folders 99 | totalServiceAccountFiles := make([]pathutils.Path, 0) 100 | 101 | for _, remoteServiceAccountFolder := range remoteServiceAccountFolders { 102 | // retrieve service files within this folder 103 | serviceAccountFiles, _ := pathutils.GetPathsInFolder(remoteServiceAccountFolder, true, 104 | false, func(path string) *string { 105 | lowerPath := strings.ToLower(path) 106 | 107 | // ignore non json files 108 | if !strings.HasSuffix(lowerPath, ".json") { 109 | return nil 110 | } 111 | 112 | return &path 113 | }) 114 | 115 | // were service accounts found? 116 | if len(serviceAccountFiles) == 0 { 117 | m.log.Tracef("No service accounts found for %q in: %v", remoteName, remoteServiceAccountFolder) 118 | continue 119 | } 120 | 121 | // sort service files 122 | sort.SliceStable(serviceAccountFiles, func(i, j int) bool { 123 | is := reutils.GetEveryNumber(serviceAccountFiles[i].RealPath) 124 | js := reutils.GetEveryNumber(serviceAccountFiles[j].RealPath) 125 | 126 | in, err := strconv.Atoi(is) 127 | if err != nil { 128 | return false 129 | } 130 | jn, err := strconv.Atoi(js) 131 | if err != nil { 132 | return false 133 | } 134 | 135 | return in < jn 136 | }) 137 | 138 | totalServiceAccountFiles = append(totalServiceAccountFiles, serviceAccountFiles...) 139 | } 140 | 141 | // were service accounts found? 142 | if len(totalServiceAccountFiles) == 0 { 143 | m.log.Tracef("No service accounts found for %q in: %v", remoteName, remoteServiceAccountFolders) 144 | continue 145 | } 146 | 147 | // add to remote service accounts var 148 | v := RemoteServiceAccounts{ 149 | RemoteEnvVar: ConfigToEnv(remoteName, "SERVICE_ACCOUNT_FILE"), 150 | ServiceAccounts: totalServiceAccountFiles, 151 | } 152 | m.remoteServiceAccounts[remoteName] = v 153 | 154 | m.log.Debugf("Loaded %d service accounts for remote %q (env: %v)", len(totalServiceAccountFiles), 155 | remoteName, v.RemoteEnvVar) 156 | } 157 | 158 | return nil 159 | } 160 | 161 | func (m *ServiceAccountManager) GetRandomServiceAccount(remotePath string) (string, error) { 162 | // parse remote name 163 | remoteName := stringutils.FromLeftUntil(remotePath, ":") 164 | if remoteName == "" { 165 | // no remote name was parsed, so ignore this request 166 | m.log.Tracef("No remote determined for: %q, not providing service account", remotePath) 167 | return "", nil 168 | } 169 | 170 | // service accounts loaded for this remote? 171 | remote, ok := m.remoteServiceAccounts[remoteName] 172 | if !ok || len(remote.ServiceAccounts) == 0 { 173 | // no service accounts found for this remote 174 | m.log.Tracef("No service accounts loaded for remote: %q, not providing service account", remoteName) 175 | return "", nil 176 | } 177 | 178 | // random service account 179 | rand.Seed(time.Now().Unix()) 180 | sa := remote.ServiceAccounts[rand.Intn(len(remote.ServiceAccounts))] 181 | 182 | return sa.RealPath, nil 183 | } 184 | 185 | func (m *ServiceAccountManager) GetServiceAccount(remotePaths ...string) ([]*RemoteServiceAccount, error) { 186 | var serviceAccounts []*RemoteServiceAccount 187 | var err error 188 | successfulRemotes := make(map[string]*types.Nil) 189 | 190 | // acquire global lock 191 | mtx.Lock() 192 | defer mtx.Unlock() 193 | 194 | for _, remotePath := range remotePaths { 195 | saFound := false 196 | 197 | // parse remote name 198 | remoteName := stringutils.FromLeftUntil(remotePath, ":") 199 | if remoteName == "" { 200 | // no remote name was parsed, so ignore this request 201 | m.log.Tracef("No remote determined for: %q, not providing service account", remotePath) 202 | continue 203 | } 204 | 205 | // service accounts loaded for this remote? 206 | remote, ok := m.remoteServiceAccounts[remoteName] 207 | if !ok || len(remote.ServiceAccounts) == 0 { 208 | // no service accounts found for this remote 209 | m.log.Tracef("No service accounts loaded for remote: %q, not providing service account", remoteName) 210 | continue 211 | } 212 | 213 | // have we already set a service account for this remote? 214 | if _, ok := successfulRemotes[strings.ToLower(remoteName)]; ok { 215 | continue 216 | } 217 | 218 | // find unbanned service account 219 | for _, sa := range remote.ServiceAccounts { 220 | // does the cache already contain this service account? 221 | if exists, _ := cache.IsBanned(sa.RealPath); exists { 222 | // service account is currently banned 223 | continue 224 | } 225 | 226 | // has this service account been issued within N seconds? 227 | if _, err := mcache.Get(sa.RealPath); err == nil { 228 | // this sa was in our memory cache and has not expired yet 229 | continue 230 | } 231 | 232 | // this service account is unbanned 233 | serviceAccounts = append(serviceAccounts, &RemoteServiceAccount{ 234 | RemoteEnvVar: remote.RemoteEnvVar, 235 | ServiceAccountPath: sa.RealPath, 236 | }) 237 | 238 | saFound = true 239 | break 240 | } 241 | 242 | if saFound { 243 | // we found a service account, check for next remote 244 | successfulRemotes[strings.ToLower(remoteName)] = nil 245 | continue 246 | } 247 | 248 | // if we are here, no more service accounts were available 249 | m.log.Warnf("No more service accounts available for remote: %q", remoteName) 250 | err = fmt.Errorf("failed finding available service account for remote: %q", remoteName) 251 | break 252 | } 253 | 254 | // were service accounts found? 255 | if err == nil && m.parallelism > 1 && len(serviceAccounts) > 0 { 256 | // there may be multiple routines requesting service accounts 257 | // attempt to prevent service account from being re-used (unless explicitly removed by a successful operation) 258 | addServiceAccountsToTempCache(serviceAccounts) 259 | } 260 | 261 | return serviceAccounts, err 262 | } 263 | 264 | func (m *ServiceAccountManager) ServiceAccountsCount() int { 265 | n := 0 266 | t := make(map[string]int) 267 | 268 | for _, remote := range m.remoteServiceAccounts { 269 | for _, sa := range remote.ServiceAccounts { 270 | if _, ok := t[sa.RealPath]; !ok { 271 | t[sa.RealPath] = 0 272 | n++ 273 | } 274 | } 275 | } 276 | 277 | return n 278 | } 279 | 280 | func RemoveServiceAccountsFromTempCache(serviceAccounts []*RemoteServiceAccount) { 281 | mtx.Lock() 282 | defer mtx.Unlock() 283 | 284 | for _, sa := range serviceAccounts { 285 | _ = mcache.Remove(sa.ServiceAccountPath) 286 | } 287 | } 288 | 289 | func AnyRemotesBanned(remotes []string) (bool, time.Time) { 290 | var banned bool 291 | var expires time.Time 292 | 293 | // ignore empty remotes slice 294 | if remotes == nil { 295 | return banned, expires 296 | } 297 | 298 | // format remotes into remote names if possible 299 | checkRemotes := make([]string, 0) 300 | for _, remote := range remotes { 301 | checkRemotes = append(checkRemotes, stringutils.FromLeftUntil(remote, ":")) 302 | } 303 | 304 | // iterate remotes 305 | for _, remote := range checkRemotes { 306 | banned, expires = cache.IsBanned(remote) 307 | if banned { 308 | break 309 | } 310 | } 311 | 312 | return banned, expires 313 | } 314 | -------------------------------------------------------------------------------- /rclone/sync.go: -------------------------------------------------------------------------------- 1 | package rclone 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-cmd/cmd" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | /* Public */ 11 | 12 | func Sync(from string, to string, serviceAccounts []*RemoteServiceAccount, 13 | additionalRcloneParams []string) (bool, int, error) { 14 | // set variables 15 | rLog := log.WithFields(logrus.Fields{ 16 | "action": CmdSync, 17 | "from": from, 18 | "to": to, 19 | }) 20 | result := false 21 | 22 | // generate required rclone parameters 23 | params := []string{ 24 | CmdSync, 25 | from, 26 | to, 27 | } 28 | 29 | baseParams, err := getBaseParams() 30 | if err != nil { 31 | return false, 1, errors.WithMessagef(err, "failed generating baseParams to %s: %q -> %q", 32 | CmdSync, from, to) 33 | } 34 | 35 | params = append(params, baseParams...) 36 | extraParams := additionalRcloneParams 37 | 38 | additionalParams, err := getAdditionalParams(CmdSync, extraParams) 39 | if err != nil { 40 | return false, 1, errors.WithMessagef(err, "failed generating additionalParams to %s: %q -> %q", 41 | CmdSync, from, to) 42 | } 43 | 44 | params = append(params, additionalParams...) 45 | rLog.Debugf("Generated params: %v", params) 46 | 47 | // generate required rclone env 48 | var rcloneEnv []string 49 | if len(serviceAccounts) > 0 { 50 | // iterate service accounts, creating env 51 | for _, env := range serviceAccounts { 52 | if env == nil { 53 | continue 54 | } 55 | 56 | v := env 57 | rcloneEnv = append(rcloneEnv, fmt.Sprintf("%s=%s", v.RemoteEnvVar, v.ServiceAccountPath)) 58 | } 59 | } 60 | rLog.Debugf("Generated rclone env: %v", rcloneEnv) 61 | 62 | // setup cmd 63 | cmdOptions := cmd.Options{ 64 | Buffered: false, 65 | Streaming: true, 66 | } 67 | rcloneCmd := cmd.NewCmdOptions(cmdOptions, cfg.Rclone.Path, params...) 68 | rcloneCmd.Env = rcloneEnv 69 | 70 | // live stream logs 71 | doneChan := make(chan struct{}) 72 | go func() { 73 | defer close(doneChan) 74 | 75 | for rcloneCmd.Stdout != nil || rcloneCmd.Stderr != nil { 76 | select { 77 | case line, open := <-rcloneCmd.Stdout: 78 | if !open { 79 | rcloneCmd.Stdout = nil 80 | continue 81 | } 82 | log.Info(line) 83 | case line, open := <-rcloneCmd.Stderr: 84 | if !open { 85 | rcloneCmd.Stderr = nil 86 | continue 87 | } 88 | log.Info(line) 89 | } 90 | } 91 | }() 92 | 93 | // run command 94 | rLog.Debug("Starting...") 95 | 96 | status := <-rcloneCmd.Start() 97 | <-doneChan 98 | 99 | // check status 100 | switch status.Exit { 101 | case ExitSuccess: 102 | result = true 103 | default: 104 | break 105 | } 106 | 107 | rLog.WithField("exit_code", status.Exit).Debug("Finished") 108 | return result, status.Exit, status.Error 109 | } 110 | -------------------------------------------------------------------------------- /reutils/glob.go: -------------------------------------------------------------------------------- 1 | package reutils 2 | 3 | import ( 4 | "bytes" 5 | "github.com/pkg/errors" 6 | "regexp" 7 | "strings" 8 | ) 9 | 10 | // credits: https://github.com/rclone/rclone/blob/master/fs/filter/glob.go 11 | // rsync style glob parser 12 | 13 | // globToRegexp converts an rsync style glob to a regexp 14 | 15 | // documented in filtering.md 16 | func GlobToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) { 17 | var re bytes.Buffer 18 | if ignoreCase { 19 | _, _ = re.WriteString("(?i)") 20 | } 21 | if strings.HasPrefix(glob, "/") { 22 | glob = glob[1:] 23 | _, _ = re.WriteRune('^') 24 | } else { 25 | _, _ = re.WriteString("(^|/)") 26 | } 27 | consecutiveStars := 0 28 | insertStars := func() error { 29 | if consecutiveStars > 0 { 30 | switch consecutiveStars { 31 | case 1: 32 | _, _ = re.WriteString(`[^/]*`) 33 | case 2: 34 | _, _ = re.WriteString(`.*`) 35 | default: 36 | return errors.Errorf("too many stars in %q", glob) 37 | } 38 | } 39 | consecutiveStars = 0 40 | return nil 41 | } 42 | inBraces := false 43 | inBrackets := 0 44 | slashed := false 45 | for _, c := range glob { 46 | if slashed { 47 | _, _ = re.WriteRune(c) 48 | slashed = false 49 | continue 50 | } 51 | if c != '*' { 52 | err := insertStars() 53 | if err != nil { 54 | return nil, err 55 | } 56 | } 57 | if inBrackets > 0 { 58 | _, _ = re.WriteRune(c) 59 | if c == '[' { 60 | inBrackets++ 61 | } 62 | if c == ']' { 63 | inBrackets-- 64 | } 65 | continue 66 | } 67 | switch c { 68 | case '\\': 69 | _, _ = re.WriteRune(c) 70 | slashed = true 71 | case '*': 72 | consecutiveStars++ 73 | case '?': 74 | _, _ = re.WriteString(`[^/]`) 75 | case '[': 76 | _, _ = re.WriteRune(c) 77 | inBrackets++ 78 | case ']': 79 | return nil, errors.Errorf("mismatched ']' in glob %q", glob) 80 | case '{': 81 | if inBraces { 82 | return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob) 83 | } 84 | inBraces = true 85 | _, _ = re.WriteRune('(') 86 | case '}': 87 | if !inBraces { 88 | return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob) 89 | } 90 | _, _ = re.WriteRune(')') 91 | inBraces = false 92 | case ',': 93 | if inBraces { 94 | _, _ = re.WriteRune('|') 95 | } else { 96 | _, _ = re.WriteRune(c) 97 | } 98 | case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above 99 | _, _ = re.WriteRune('\\') 100 | _, _ = re.WriteRune(c) 101 | default: 102 | _, _ = re.WriteRune(c) 103 | } 104 | } 105 | err := insertStars() 106 | if err != nil { 107 | return nil, err 108 | } 109 | if inBrackets > 0 { 110 | return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob) 111 | } 112 | if inBraces { 113 | return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob) 114 | } 115 | _, _ = re.WriteRune('$') 116 | result, err := regexp.Compile(re.String()) 117 | if err != nil { 118 | return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String()) 119 | } 120 | return result, nil 121 | } 122 | -------------------------------------------------------------------------------- /reutils/numbers.go: -------------------------------------------------------------------------------- 1 | package reutils 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | ) 7 | 8 | var ( 9 | num = regexp.MustCompile(`(\d+)`) 10 | ) 11 | 12 | func GetEveryNumber(from string) string { 13 | matches := num.FindAllString(from, -1) 14 | val := strings.Join(matches, "") 15 | return strings.TrimLeft(val, "0") 16 | } 17 | -------------------------------------------------------------------------------- /runtime/runtime.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | var ( 4 | Version string 5 | Timestamp string 6 | GitCommit string 7 | ) 8 | -------------------------------------------------------------------------------- /stringutils/default.go: -------------------------------------------------------------------------------- 1 | package stringutils 2 | 3 | func NewOrExisting(new string, existing string) string { 4 | if new == "" { 5 | return existing 6 | } 7 | 8 | return new 9 | } 10 | -------------------------------------------------------------------------------- /stringutils/left.go: -------------------------------------------------------------------------------- 1 | package stringutils 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | func LeftJust(text string, filler string, size int) string { 9 | repeatSize := size - len(text) 10 | return fmt.Sprintf("%s%s", text, strings.Repeat(filler, repeatSize)) 11 | } 12 | -------------------------------------------------------------------------------- /stringutils/until.go: -------------------------------------------------------------------------------- 1 | package stringutils 2 | 3 | import "strings" 4 | 5 | func FromLeftUntil(from string, sub string) string { 6 | pos := strings.Index(from, sub) 7 | if pos < 0 { 8 | return from 9 | } 10 | 11 | return from[:pos] 12 | } 13 | -------------------------------------------------------------------------------- /syncer/copy.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/l3uddz/crop/stringutils" 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | "time" 11 | ) 12 | 13 | func (s *Syncer) Copy(additionalRcloneParams []string, daisyChain bool) error { 14 | // set variables 15 | extraParams := s.Config.RcloneParams.Copy 16 | if additionalRcloneParams != nil { 17 | extraParams = append(extraParams, additionalRcloneParams...) 18 | } 19 | 20 | if globalParams := rclone.GetGlobalParams(rclone.GlobalCopyParams, s.Config.RcloneParams.GlobalCopy); globalParams != nil { 21 | extraParams = append(extraParams, globalParams...) 22 | } 23 | 24 | // add server side parameter 25 | extraParams = append(extraParams, "--drive-server-side-across-configs") 26 | 27 | pos := 0 28 | srcRemote := s.Config.SourceRemote 29 | 30 | // iterate all remotes and run copy 31 | for _, remotePath := range s.Config.Remotes.Copy { 32 | // set variables 33 | attempts := 1 34 | 35 | // daisy 36 | if daisyChain && pos > 0 { 37 | srcRemote = s.Config.Remotes.Copy[pos-1] 38 | } 39 | pos++ 40 | 41 | // copy to remote 42 | for { 43 | // set log 44 | rLog := s.Log.WithFields(logrus.Fields{ 45 | "copy_remote": remotePath, 46 | "source_remote": srcRemote, 47 | "attempts": attempts, 48 | }) 49 | 50 | // get service account file(s) 51 | serviceAccounts, err := s.RemoteServiceAccountFiles.GetServiceAccount(srcRemote, remotePath) 52 | if err != nil { 53 | return errors.WithMessagef(err, 54 | "aborting further copy attempts of %q due to serviceAccount exhaustion", 55 | srcRemote) 56 | } 57 | 58 | // display service account(s) being used 59 | if len(serviceAccounts) > 0 { 60 | for _, sa := range serviceAccounts { 61 | rLog.Infof("Using service account %q: %v", sa.RemoteEnvVar, sa.ServiceAccountPath) 62 | } 63 | } 64 | 65 | // copy 66 | rLog.Info("Copying...") 67 | success, exitCode, err := rclone.Copy(srcRemote, remotePath, serviceAccounts, extraParams) 68 | 69 | // check result 70 | if err != nil { 71 | rLog.WithError(err).Errorf("Failed unexpectedly...") 72 | return errors.WithMessagef(err, "copy failed unexpectedly with exit code: %v", exitCode) 73 | } else if success { 74 | // successful exit code 75 | if !s.Ws.Running { 76 | // web service is not running (no live rotate) 77 | rclone.RemoveServiceAccountsFromTempCache(serviceAccounts) 78 | } 79 | break 80 | } 81 | 82 | // is this an exit code we can retry? 83 | switch exitCode { 84 | case rclone.ExitFatalError: 85 | // are we using service accounts? 86 | if len(serviceAccounts) == 0 { 87 | // we are not using service accounts, so mark this remote as banned 88 | if err := cache.SetBanned(stringutils.FromLeftUntil(remotePath, ":"), 25); err != nil { 89 | rLog.WithError(err).Errorf("Failed banning remote") 90 | } 91 | 92 | return fmt.Errorf("copy failed with exit code: %v", exitCode) 93 | } 94 | 95 | // ban this service account 96 | for _, sa := range serviceAccounts { 97 | if err := cache.SetBanned(sa.ServiceAccountPath, 25); err != nil { 98 | rLog.WithError(err).Error("Failed banning service account, cannot try again...") 99 | return fmt.Errorf("failed banning service account: %v", sa.ServiceAccountPath) 100 | } 101 | } 102 | 103 | // attempt copy again 104 | rLog.Warnf("Copy failed with retryable exit code %v, trying again...", exitCode) 105 | attempts++ 106 | continue 107 | default: 108 | return fmt.Errorf("failed and cannot proceed with exit code: %v", exitCode) 109 | } 110 | } 111 | 112 | // sleep before moving on 113 | if daisyChain && pos < len(s.Config.Remotes.Copy) { 114 | s.Log.Info("Waiting 60 seconds before continuing...") 115 | time.Sleep(60 * time.Second) 116 | } 117 | } 118 | 119 | return nil 120 | } 121 | -------------------------------------------------------------------------------- /syncer/dedupe.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/rclone" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func (s *Syncer) Dedupe(additionalRcloneParams []string) error { 11 | extraParams := s.Config.RcloneParams.Dedupe 12 | if additionalRcloneParams != nil { 13 | extraParams = append(extraParams, additionalRcloneParams...) 14 | } 15 | 16 | if globalParams := rclone.GetGlobalParams(rclone.GlobalDedupeParams, s.Config.RcloneParams.GlobalDedupe); globalParams != nil { 17 | extraParams = append(extraParams, globalParams...) 18 | } 19 | 20 | // iterate all remotes and run dedupe 21 | for _, dedupeRemote := range s.Config.Remotes.Dedupe { 22 | // set variables 23 | rLog := s.Log.WithFields(logrus.Fields{ 24 | "dedupe_remote": dedupeRemote, 25 | }) 26 | 27 | // service account 28 | if s.RemoteServiceAccountFiles.ServiceAccountsCount() > 0 { 29 | sa, err := s.RemoteServiceAccountFiles.GetRandomServiceAccount(dedupeRemote) 30 | if err == nil && sa != "" { 31 | extraParams = append(extraParams, "--drive-service-account-file", sa) 32 | } 33 | } 34 | 35 | // dedupe remote 36 | rLog.Info("Deduping...") 37 | success, exitCode, err := rclone.Dedupe(dedupeRemote, extraParams) 38 | 39 | // check result 40 | if err != nil { 41 | rLog.WithError(err).Errorf("Failed unexpectedly...") 42 | return errors.WithMessagef(err, "dedupe failed unexpectedly with exit code: %v", exitCode) 43 | } else if success { 44 | // successful exit code 45 | continue 46 | } 47 | 48 | return fmt.Errorf("dedupe failed with exit code: %v", exitCode) 49 | } 50 | 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /syncer/move.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/rclone" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func (s *Syncer) Move(additionalRcloneParams []string) error { 11 | moveRemotes := make([]rclone.RemoteInstruction, 0) 12 | 13 | // set variables 14 | for _, remote := range s.Config.Remotes.MoveServerSide { 15 | moveRemotes = append(moveRemotes, rclone.RemoteInstruction{ 16 | From: remote.From, 17 | To: remote.To, 18 | ServerSide: true, 19 | }) 20 | } 21 | 22 | extraParams := s.Config.RcloneParams.MoveServerSide 23 | if additionalRcloneParams != nil { 24 | extraParams = append(extraParams, additionalRcloneParams...) 25 | } 26 | 27 | if globalParams := rclone.GetGlobalParams(rclone.GlobalMoveServerSideParams, s.Config.RcloneParams.GlobalMoveServerSide); globalParams != nil { 28 | extraParams = append(extraParams, globalParams...) 29 | } 30 | 31 | // iterate remotes and run move 32 | for _, move := range moveRemotes { 33 | // set variables 34 | attempts := 1 35 | rLog := s.Log.WithFields(logrus.Fields{ 36 | "move_to": move.To, 37 | "move_from": move.From, 38 | "attempts": attempts, 39 | }) 40 | 41 | // move to remote 42 | rLog.Info("Moving...") 43 | success, exitCode, err := rclone.Move(move.From, move.To, nil, true, extraParams) 44 | 45 | // check result 46 | if err != nil { 47 | rLog.WithError(err).Errorf("Failed unexpectedly...") 48 | return errors.WithMessagef(err, "move failed unexpectedly with exit code: %v", exitCode) 49 | } else if success { 50 | // successful exit code 51 | continue 52 | } 53 | 54 | return fmt.Errorf("move failed with exit code: %v", exitCode) 55 | } 56 | 57 | return nil 58 | } 59 | -------------------------------------------------------------------------------- /syncer/sync.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/l3uddz/crop/stringutils" 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | "time" 11 | ) 12 | 13 | func (s *Syncer) Sync(additionalRcloneParams []string, daisyChain bool) error { 14 | // set variables 15 | extraParams := s.Config.RcloneParams.Sync 16 | if additionalRcloneParams != nil { 17 | extraParams = append(extraParams, additionalRcloneParams...) 18 | } 19 | 20 | if globalParams := rclone.GetGlobalParams(rclone.GlobalSyncParams, s.Config.RcloneParams.GlobalSync); globalParams != nil { 21 | extraParams = append(extraParams, globalParams...) 22 | } 23 | 24 | // add server side parameter 25 | extraParams = append(extraParams, "--drive-server-side-across-configs") 26 | 27 | pos := 0 28 | srcRemote := s.Config.SourceRemote 29 | 30 | // iterate all remotes and run sync 31 | for _, remotePath := range s.Config.Remotes.Sync { 32 | // set variables 33 | attempts := 1 34 | 35 | // daisy 36 | if daisyChain && pos > 0 { 37 | srcRemote = s.Config.Remotes.Sync[pos-1] 38 | } 39 | pos++ 40 | 41 | // sync to remote 42 | for { 43 | // set log 44 | rLog := s.Log.WithFields(logrus.Fields{ 45 | "sync_remote": remotePath, 46 | "source_remote": srcRemote, 47 | "attempts": attempts, 48 | }) 49 | 50 | // get service account file(s) 51 | serviceAccounts, err := s.RemoteServiceAccountFiles.GetServiceAccount(srcRemote, remotePath) 52 | if err != nil { 53 | return errors.WithMessagef(err, 54 | "aborting further sync attempts of %q due to serviceAccount exhaustion", 55 | srcRemote) 56 | } 57 | 58 | // display service account(s) being used 59 | if len(serviceAccounts) > 0 { 60 | for _, sa := range serviceAccounts { 61 | rLog.Infof("Using service account %q: %v", sa.RemoteEnvVar, sa.ServiceAccountPath) 62 | } 63 | } 64 | 65 | // sync 66 | rLog.Info("Syncing...") 67 | success, exitCode, err := rclone.Sync(srcRemote, remotePath, serviceAccounts, extraParams) 68 | 69 | // check result 70 | if err != nil { 71 | rLog.WithError(err).Errorf("Failed unexpectedly...") 72 | return errors.WithMessagef(err, "sync failed unexpectedly with exit code: %v", exitCode) 73 | } else if success { 74 | // successful exit code 75 | if !s.Ws.Running { 76 | // web service is not running (no live rotate) 77 | rclone.RemoveServiceAccountsFromTempCache(serviceAccounts) 78 | } 79 | break 80 | } 81 | 82 | // is this an exit code we can retry? 83 | switch exitCode { 84 | case rclone.ExitFatalError: 85 | // are we using service accounts? 86 | if len(serviceAccounts) == 0 { 87 | // we are not using service accounts, so mark this remote as banned 88 | if err := cache.SetBanned(stringutils.FromLeftUntil(remotePath, ":"), 25); err != nil { 89 | rLog.WithError(err).Errorf("Failed banning remote") 90 | } 91 | 92 | return fmt.Errorf("sync failed with exit code: %v", exitCode) 93 | } 94 | 95 | // ban this service account 96 | for _, sa := range serviceAccounts { 97 | if err := cache.SetBanned(sa.ServiceAccountPath, 25); err != nil { 98 | rLog.WithError(err).Error("Failed banning service account, cannot try again...") 99 | return fmt.Errorf("failed banning service account: %v", sa.ServiceAccountPath) 100 | } 101 | } 102 | 103 | // attempt sync again 104 | rLog.Warnf("Sync failed with retryable exit code %v, trying again...", exitCode) 105 | attempts++ 106 | continue 107 | default: 108 | return fmt.Errorf("failed and cannot proceed with exit code: %v", exitCode) 109 | } 110 | } 111 | 112 | // sleep before moving on 113 | if daisyChain && pos < len(s.Config.Remotes.Sync) { 114 | s.Log.Info("Waiting 60 seconds before continuing...") 115 | time.Sleep(60 * time.Second) 116 | } 117 | } 118 | 119 | return nil 120 | } 121 | -------------------------------------------------------------------------------- /syncer/syncer.go: -------------------------------------------------------------------------------- 1 | package syncer 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | "github.com/l3uddz/crop/logger" 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/l3uddz/crop/web" 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | type Syncer struct { 13 | // Public 14 | Log *logrus.Entry 15 | GlobalConfig *config.Configuration 16 | Config *config.SyncerConfig 17 | Name string 18 | RemoteServiceAccountFiles *rclone.ServiceAccountManager 19 | Ws *web.Server 20 | } 21 | 22 | func New(config *config.Configuration, syncerConfig *config.SyncerConfig, syncerName string, parallelism int) (*Syncer, error) { 23 | // init syncer dependencies 24 | // - service account manager 25 | sam := rclone.NewServiceAccountManager(config.Rclone.ServiceAccountRemotes, parallelism) 26 | 27 | remotePaths := append([]string{}, syncerConfig.Remotes.Copy...) 28 | remotePaths = append(remotePaths, syncerConfig.Remotes.Sync...) 29 | remotePaths = append(remotePaths, syncerConfig.SourceRemote) 30 | 31 | if err := sam.LoadServiceAccounts(remotePaths); err != nil { 32 | return nil, errors.WithMessage(err, "failed initializing associated remote service accounts") 33 | } 34 | 35 | // init syncer 36 | l := logger.GetLogger(syncerName) 37 | 38 | syncer := &Syncer{ 39 | Log: l, 40 | GlobalConfig: config, 41 | Config: syncerConfig, 42 | Name: syncerName, 43 | RemoteServiceAccountFiles: sam, 44 | Ws: web.New("127.0.0.1", l, syncerName, sam), 45 | } 46 | 47 | return syncer, nil 48 | } 49 | -------------------------------------------------------------------------------- /systemd/crop_clean.service: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_clean.service 2 | [Unit] 3 | Description=crop clean 4 | After=network-online.target 5 | 6 | [Service] 7 | User=1000 8 | Group=1000 9 | Type=exec 10 | ExecStart=/opt/crop/crop clean 11 | ExecStopPost=/bin/rm -rf /opt/crop/crop.lock 12 | 13 | [Install] 14 | WantedBy=default.target 15 | -------------------------------------------------------------------------------- /systemd/crop_clean.timer: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_clean.timer 2 | [Unit] 3 | Description=crop clean 4 | After=network-online.target 5 | 6 | [Timer] 7 | # hourly, daily, weekly are valid. as are OnCalendar=DayOfWeek Year-Month-Day Hour:Minute:Second (example: "OnCalendar=Mon..Fri 22:00" to run weekdays at 22:00, a second line can be added i.e. "OnCalendar=Sat,Sun 03:00" to run at 3am on weekends.) 8 | OnCalendar=weekly 9 | # to use inactivity since last finished running, 1h can be used for 1hour. 10 | #OnUnitInactiveSec=168h 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /systemd/crop_sync.service: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_sync.service 2 | [Unit] 3 | Description=crop sync 4 | After=network-online.target 5 | 6 | [Service] 7 | User=1000 8 | Group=1000 9 | Type=exec 10 | ExecStart=/opt/crop/crop sync 11 | ExecStopPost=/bin/rm -rf /opt/crop/crop.lock 12 | 13 | [Install] 14 | WantedBy=default.target 15 | -------------------------------------------------------------------------------- /systemd/crop_sync.timer: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_sync.timer 2 | [Unit] 3 | Description=crop sync 4 | After=network-online.target 5 | 6 | [Timer] 7 | # hourly, daily, weekly are valid. as are OnCalendar=DayOfWeek Year-Month-Day Hour:Minute:Second (example: "OnCalendar=Mon..Fri 22:00" to run weekdays at 22:00, a second line can be added i.e. "OnCalendar=Sat,Sun 03:00" to run at 3am on weekends.) 8 | OnCalendar=daily 9 | # to use inactivity since last finished running, 1h can be used for 1hour. 10 | #OnUnitInactiveSec=24h 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /systemd/crop_upload.service: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_upload.service 2 | [Unit] 3 | Description=crop upload 4 | After=network-online.target 5 | 6 | [Service] 7 | User=1000 8 | Group=1000 9 | Type=exec 10 | ExecStart=/opt/crop/crop upload 11 | ExecStopPost=/bin/rm -rf /opt/crop/crop.lock 12 | 13 | [Install] 14 | WantedBy=default.target 15 | -------------------------------------------------------------------------------- /systemd/crop_upload.timer: -------------------------------------------------------------------------------- 1 | # /etc/systemd/system/crop_upload.timer 2 | [Unit] 3 | Description=crop upload 4 | After=network-online.target 5 | 6 | [Timer] 7 | # hourly, daily, weekly are valid. as are OnCalendar=DayOfWeek Year-Month-Day Hour:Minute:Second (example: "OnCalendar=Mon..Fri 22:00" to run weekdays at 22:00, a second line can be added i.e. "OnCalendar=Sat,Sun 03:00" to run at 3am on weekends.) 8 | #OnCalendar=hourly 9 | # to use inactivity since last finished running, 1h can be used for 1hour. 10 | OnUnitInactiveSec=5m 11 | 12 | [Install] 13 | WantedBy=timers.target 14 | -------------------------------------------------------------------------------- /uploader/check.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "github.com/l3uddz/crop/uploader/checker" 5 | ) 6 | 7 | var ( 8 | supportedCheckers = map[string]interface{}{ 9 | "size": checker.Size{}, 10 | "age": checker.Age{}, 11 | } 12 | ) 13 | 14 | func (u *Uploader) Check() (*checker.Result, error) { 15 | // Perform the check 16 | return u.Checker.Check(&u.Config.Check, u.Log, u.LocalFiles, u.LocalFilesSize) 17 | } 18 | 19 | func (u *Uploader) CheckRcloneParams() []string { 20 | // Return rclone parameters for a passed check 21 | return u.Checker.RcloneParams(&u.Config.Check, u.Log) 22 | } 23 | -------------------------------------------------------------------------------- /uploader/checker/age.go: -------------------------------------------------------------------------------- 1 | package checker 2 | 3 | import ( 4 | "fmt" 5 | "github.com/dustin/go-humanize" 6 | "github.com/l3uddz/crop/config" 7 | "github.com/l3uddz/crop/pathutils" 8 | "github.com/l3uddz/crop/rclone" 9 | "github.com/sirupsen/logrus" 10 | "time" 11 | ) 12 | 13 | type Age struct{} 14 | 15 | func (Age) Check(cfg *config.UploaderCheck, log *logrus.Entry, paths []pathutils.Path, size uint64) (*Result, error) { 16 | var checkPassed bool 17 | var filesPassed int 18 | var filesSize int64 19 | 20 | oldestFile := time.Now() 21 | 22 | // Check File Ages 23 | maxFileAge := time.Now().Add(time.Duration(-cfg.Limit) * time.Minute) 24 | 25 | for _, path := range paths { 26 | path := path 27 | 28 | // skip directories 29 | if path.IsDir { 30 | continue 31 | } 32 | 33 | // set oldestFile 34 | if oldestFile.IsZero() || path.ModifiedTime.Before(oldestFile) { 35 | oldestFile = path.ModifiedTime 36 | } 37 | 38 | // was this file modified after our max file age? 39 | if path.ModifiedTime.Before(maxFileAge) { 40 | filesPassed++ 41 | filesSize += path.Size 42 | 43 | log.WithFields(logrus.Fields{ 44 | "max_age": humanize.Time(maxFileAge), 45 | "file_time": path.ModifiedTime, 46 | "file_path": path.Path, 47 | "over_age": humanize.RelTime(maxFileAge, path.ModifiedTime, "", ""), 48 | }).Trace("Age is greater than specified limit") 49 | 50 | checkPassed = true 51 | } 52 | } 53 | 54 | if checkPassed { 55 | log.WithFields(logrus.Fields{ 56 | "files_passed": filesPassed, 57 | "files_size": humanize.IBytes(uint64(filesSize)), 58 | }).Info("Local files matching check criteria") 59 | } 60 | 61 | return &Result{ 62 | Passed: checkPassed, 63 | Info: humanize.RelTime(oldestFile, maxFileAge, "", ""), 64 | }, nil 65 | } 66 | 67 | func (Age) CheckFile(cfg *config.UploaderCheck, log *logrus.Entry, path pathutils.Path, size uint64) (bool, error) { 68 | maxFileAge := time.Now().Add(time.Duration(-cfg.Limit) * time.Minute) 69 | 70 | // Check File Age 71 | if path.ModifiedTime.Before(maxFileAge) { 72 | return true, nil 73 | } 74 | 75 | return false, nil 76 | } 77 | 78 | func (Age) RcloneParams(cfg *config.UploaderCheck, log *logrus.Entry) []string { 79 | params := []string{ 80 | "--min-age", 81 | fmt.Sprintf("%dm", cfg.Limit), 82 | } 83 | 84 | // add filters 85 | params = append(params, rclone.IncludeExcludeToFilters(cfg.Include, cfg.Exclude)...) 86 | 87 | return params 88 | } 89 | -------------------------------------------------------------------------------- /uploader/checker/interface.go: -------------------------------------------------------------------------------- 1 | package checker 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | "github.com/l3uddz/crop/pathutils" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | type Interface interface { 10 | Check(*config.UploaderCheck, *logrus.Entry, []pathutils.Path, uint64) (*Result, error) 11 | CheckFile(*config.UploaderCheck, *logrus.Entry, pathutils.Path, uint64) (bool, error) 12 | RcloneParams(check *config.UploaderCheck, entry *logrus.Entry) []string 13 | } 14 | 15 | type Result struct { 16 | Passed bool 17 | Info interface{} 18 | } 19 | -------------------------------------------------------------------------------- /uploader/checker/size.go: -------------------------------------------------------------------------------- 1 | package checker 2 | 3 | import ( 4 | "github.com/dustin/go-humanize" 5 | "github.com/l3uddz/crop/config" 6 | "github.com/l3uddz/crop/pathutils" 7 | "github.com/l3uddz/crop/rclone" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | type Size struct{} 12 | 13 | func (Size) Check(cfg *config.UploaderCheck, log *logrus.Entry, paths []pathutils.Path, size uint64) (*Result, error) { 14 | // Check Total Size 15 | if size > cfg.Limit { 16 | s := humanize.IBytes(size) 17 | log.WithFields(logrus.Fields{ 18 | "max_size": humanize.IBytes(cfg.Limit), 19 | "current_size": s, 20 | "over_size": humanize.IBytes(size - cfg.Limit), 21 | }).Info("Size is greater than specified limit") 22 | 23 | return &Result{ 24 | Passed: true, 25 | Info: s, 26 | }, nil 27 | } 28 | 29 | return &Result{ 30 | Passed: false, 31 | Info: humanize.IBytes(cfg.Limit - size), 32 | }, nil 33 | } 34 | 35 | func (Size) CheckFile(cfg *config.UploaderCheck, log *logrus.Entry, path pathutils.Path, size uint64) (bool, error) { 36 | // Check Total Size 37 | if size > cfg.Limit { 38 | return true, nil 39 | } 40 | 41 | return false, nil 42 | } 43 | 44 | func (Size) RcloneParams(cfg *config.UploaderCheck, log *logrus.Entry) []string { 45 | return rclone.IncludeExcludeToFilters(cfg.Include, cfg.Exclude) 46 | } 47 | -------------------------------------------------------------------------------- /uploader/clean.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "github.com/l3uddz/crop/pathutils" 5 | 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/sirupsen/logrus" 8 | "os" 9 | "strings" 10 | ) 11 | 12 | func (u *Uploader) Clean(path *pathutils.Path) error { 13 | // iterate all remotes and remove the file/folder 14 | for _, remotePath := range u.Config.Remotes.Clean { 15 | // transform remotePath to a path that can be removed 16 | cleanRemotePath := strings.Replace(path.Path, u.Config.Hidden.Folder, remotePath, 1) 17 | 18 | // set log 19 | rLog := u.Log.WithFields(logrus.Fields{ 20 | "clean_local_path": path.RealPath, 21 | "clean_remote_path": cleanRemotePath, 22 | }) 23 | 24 | // remove from remote 25 | var success bool 26 | var exitCode int 27 | var err error 28 | 29 | rLog.Debug("Removing...") 30 | if path.IsDir { 31 | // remove directory 32 | success, exitCode, err = rclone.RmDir(cleanRemotePath) 33 | } else { 34 | // remove file 35 | success, exitCode, err = rclone.DeleteFile(cleanRemotePath) 36 | } 37 | 38 | // handle response 39 | switch { 40 | case err != nil: 41 | // error removing 42 | rLog.WithError(err).WithField("exit_code", exitCode).Error("Error removing remotely") 43 | case !success: 44 | // failed 45 | rLog.WithField("exit_code", exitCode).Debug("Failed removing remotely") 46 | default: 47 | // cleaned 48 | rLog.Info("Removed remotely") 49 | } 50 | } 51 | 52 | // cleanup cleaned path locally 53 | if !u.GlobalConfig.Rclone.DryRun && u.Config.Hidden.Cleanup { 54 | if err := os.Remove(path.RealPath); err != nil { 55 | u.Log. 56 | WithField("clean_local_path", path.RealPath). 57 | WithError(err). 58 | Error("Failed removing locally") 59 | } else { 60 | u.Log. 61 | WithField("clean_local_path", path.RealPath). 62 | Debug("Removed locally") 63 | } 64 | } 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /uploader/cleaner/interface.go: -------------------------------------------------------------------------------- 1 | package cleaner 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | "github.com/l3uddz/crop/pathutils" 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | type Interface interface { 10 | FindHidden(*config.UploaderHidden, *logrus.Entry) ([]pathutils.Path, []pathutils.Path, error) 11 | } 12 | -------------------------------------------------------------------------------- /uploader/cleaner/unionfs.go: -------------------------------------------------------------------------------- 1 | package cleaner 2 | 3 | import ( 4 | "github.com/l3uddz/crop/config" 5 | "github.com/l3uddz/crop/pathutils" 6 | "github.com/sirupsen/logrus" 7 | "strings" 8 | ) 9 | 10 | type Unionfs struct{} 11 | 12 | func (Unionfs) FindHidden(cfg *config.UploaderHidden, log *logrus.Entry) ([]pathutils.Path, []pathutils.Path, error) { 13 | tLog := log.WithField("cleaner", "unionfs") 14 | 15 | // retrieve files 16 | files, _ := pathutils.GetPathsInFolder(cfg.Folder, true, true, 17 | func(path string) *string { 18 | if strings.HasSuffix(path, "_HIDDEN~") { 19 | // we are interested in hidden files/folders 20 | newPath := strings.ReplaceAll(path, "_HIDDEN~", "") 21 | return &newPath 22 | } 23 | 24 | // we are not interested in non-hidden files/folders 25 | return nil 26 | }) 27 | 28 | // create hidden variables 29 | hiddenFiles := make([]pathutils.Path, 0) 30 | hiddenFolders := make([]pathutils.Path, 0) 31 | for _, path := range files { 32 | if !path.IsDir { 33 | // this is a hidden file 34 | hiddenFiles = append(hiddenFiles, path) 35 | } else { 36 | // this is a hidden folder 37 | hiddenFolders = append(hiddenFolders, path) 38 | } 39 | } 40 | 41 | // sort results 42 | 43 | // log results 44 | tLog.WithFields(logrus.Fields{ 45 | "found_files": len(hiddenFiles), 46 | "found_folders": len(hiddenFolders), 47 | "hidden_folder": cfg.Folder, 48 | }).Info("Refreshed hidden files/folders") 49 | return hiddenFiles, hiddenFolders, nil 50 | } 51 | -------------------------------------------------------------------------------- /uploader/cleans.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "github.com/yale8848/gorpool" 6 | "time" 7 | ) 8 | 9 | func (u *Uploader) PerformCleans(gp *gorpool.Pool) error { 10 | // refresh details about hidden files/folders to remove 11 | if err := u.RefreshHiddenPaths(); err != nil { 12 | u.Log.WithError(err).Error("Failed refreshing details of hidden files/folders to clean") 13 | return errors.Wrap(err, "failed refreshing details of hidden files/folders") 14 | } 15 | 16 | // perform clean files 17 | if len(u.HiddenFiles) > 0 { 18 | u.Log.Info("Performing clean of hidden files...") 19 | 20 | for _, path := range u.HiddenFiles { 21 | p := path 22 | 23 | gp.AddJob(func() { 24 | _ = u.Clean(&p) 25 | }) 26 | } 27 | 28 | u.Log.Debug("Waiting for queued jobs to finish") 29 | time.Sleep(2 * time.Second) 30 | gp.WaitForAll() 31 | u.Log.Info("Finished cleaning hidden files!") 32 | } 33 | 34 | // perform clean folders 35 | if len(u.HiddenFolders) > 0 { 36 | u.Log.Info("Performing clean of hidden folders...") 37 | for _, path := range u.HiddenFolders { 38 | p := path 39 | 40 | gp.AddJob(func() { 41 | _ = u.Clean(&p) 42 | }) 43 | } 44 | 45 | u.Log.Debug("Waiting for queued jobs to finish") 46 | time.Sleep(2 * time.Second) 47 | gp.WaitForAll() 48 | u.Log.Info("Finished cleaning hidden folders!") 49 | } 50 | 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /uploader/copy.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/l3uddz/crop/stringutils" 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func (u *Uploader) Copy(additionalRcloneParams []string) error { 13 | // set variables 14 | extraParams := u.Config.RcloneParams.Copy 15 | if additionalRcloneParams != nil { 16 | extraParams = append(extraParams, additionalRcloneParams...) 17 | } 18 | 19 | if globalParams := rclone.GetGlobalParams(rclone.GlobalCopyParams, u.Config.RcloneParams.GlobalCopy); globalParams != nil { 20 | extraParams = append(extraParams, globalParams...) 21 | } 22 | 23 | // iterate all remotes and run copy 24 | for _, remotePath := range u.Config.Remotes.Copy { 25 | // set variables 26 | attempts := 1 27 | 28 | // copy to remote 29 | for { 30 | // set log 31 | rLog := u.Log.WithFields(logrus.Fields{ 32 | "copy_remote": remotePath, 33 | "copy_local_path": u.Config.LocalFolder, 34 | "attempts": attempts, 35 | }) 36 | 37 | // get service account(s) 38 | serviceAccounts, err := u.RemoteServiceAccountFiles.GetServiceAccount(remotePath) 39 | if err != nil { 40 | return errors.WithMessagef(err, 41 | "aborting further copy attempts of %q due to serviceAccount exhaustion", 42 | u.Config.LocalFolder) 43 | } 44 | 45 | // display service account(s) being used 46 | if len(serviceAccounts) > 0 { 47 | for _, sa := range serviceAccounts { 48 | rLog.Infof("Using service account %q: %v", sa.RemoteEnvVar, sa.ServiceAccountPath) 49 | } 50 | } 51 | 52 | // copy 53 | rLog.Info("Copying...") 54 | success, exitCode, err := rclone.Copy(u.Config.LocalFolder, remotePath, serviceAccounts, extraParams) 55 | 56 | // check result 57 | if err != nil { 58 | rLog.WithError(err).Errorf("Failed unexpectedly...") 59 | return errors.WithMessagef(err, "copy failed unexpectedly with exit code: %v", exitCode) 60 | } else if success { 61 | // successful exit code 62 | break 63 | } 64 | 65 | // is this an exit code we can retry? 66 | switch exitCode { 67 | case rclone.ExitFatalError: 68 | // are we using service accounts? 69 | if len(serviceAccounts) == 0 { 70 | // we are not using service accounts, so mark this remote as banned 71 | if err := cache.SetBanned(stringutils.FromLeftUntil(remotePath, ":"), 25); err != nil { 72 | rLog.WithError(err).Errorf("Failed banning remote") 73 | } 74 | 75 | return fmt.Errorf("copy failed with exit code: %v", exitCode) 76 | } 77 | 78 | // ban service account(s) used 79 | for _, sa := range serviceAccounts { 80 | if err := cache.SetBanned(sa.ServiceAccountPath, 25); err != nil { 81 | rLog.WithError(err).Error("Failed banning service account, cannot try again...") 82 | return fmt.Errorf("failed banning service account: %v", sa.ServiceAccountPath) 83 | } 84 | } 85 | 86 | // attempt copy again 87 | rLog.Warnf("Copy failed with retryable exit code %v, trying again...", exitCode) 88 | attempts++ 89 | continue 90 | default: 91 | return fmt.Errorf("failed and cannot proceed with exit code: %v", exitCode) 92 | } 93 | } 94 | } 95 | 96 | return nil 97 | } 98 | -------------------------------------------------------------------------------- /uploader/dedupe.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/rclone" 6 | "github.com/pkg/errors" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func (u *Uploader) Dedupe(additionalRcloneParams []string) error { 11 | extraParams := u.Config.RcloneParams.Dedupe 12 | if additionalRcloneParams != nil { 13 | extraParams = append(extraParams, additionalRcloneParams...) 14 | } 15 | 16 | if globalParams := rclone.GetGlobalParams(rclone.GlobalDedupeParams, u.Config.RcloneParams.GlobalDedupe); globalParams != nil { 17 | extraParams = append(extraParams, globalParams...) 18 | } 19 | 20 | // iterate all remotes and run dedupe 21 | for _, dedupeRemote := range u.Config.Remotes.Dedupe { 22 | // set variables 23 | rLog := u.Log.WithFields(logrus.Fields{ 24 | "dedupe_remote": dedupeRemote, 25 | }) 26 | 27 | // service account 28 | if u.RemoteServiceAccountFiles.ServiceAccountsCount() > 0 { 29 | sa, err := u.RemoteServiceAccountFiles.GetRandomServiceAccount(dedupeRemote) 30 | if err == nil && sa != "" { 31 | extraParams = append(extraParams, "--drive-service-account-file", sa) 32 | } 33 | } 34 | 35 | // dedupe remote 36 | rLog.Info("Deduping...") 37 | success, exitCode, err := rclone.Dedupe(dedupeRemote, extraParams) 38 | 39 | // check result 40 | if err != nil { 41 | rLog.WithError(err).Errorf("Failed unexpectedly...") 42 | return errors.WithMessagef(err, "dedupe failed unexpectedly with exit code: %v", exitCode) 43 | } else if success { 44 | // successful exit code 45 | continue 46 | } 47 | 48 | return fmt.Errorf("dedupe failed with exit code: %v", exitCode) 49 | } 50 | 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /uploader/file.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/dustin/go-humanize" 6 | "github.com/l3uddz/crop/pathutils" 7 | "github.com/l3uddz/crop/uploader/cleaner" 8 | "github.com/sirupsen/logrus" 9 | "strings" 10 | ) 11 | 12 | var ( 13 | supportedCleaners = map[string]interface{}{ 14 | "unionfs": cleaner.Unionfs{}, 15 | } 16 | ) 17 | 18 | func (u *Uploader) RefreshLocalFiles() error { 19 | // retrieve files 20 | u.LocalFiles, u.LocalFilesSize = pathutils.GetPathsInFolder(u.Config.LocalFolder, true, false, 21 | func(path string) *string { 22 | rcloneStylePath := strings.TrimLeft(strings.Replace(path, u.Config.LocalFolder, "", 1), "/") 23 | 24 | // should this path be excluded? 25 | if len(u.ExcludePatterns) > 0 { 26 | for _, excludePattern := range u.ExcludePatterns { 27 | if excludePattern.MatchString(rcloneStylePath) { 28 | // this path matches an exclude pattern 29 | return nil 30 | } 31 | } 32 | } 33 | 34 | // should this path be included? 35 | if len(u.Config.Check.Include) > 0 { 36 | for _, includePattern := range u.IncludePatterns { 37 | if includePattern.MatchString(rcloneStylePath) { 38 | // this path matches an include pattern 39 | return &path 40 | } 41 | } 42 | 43 | return nil 44 | } 45 | 46 | // we are interested in all these files 47 | return &path 48 | }) 49 | 50 | // log results 51 | u.Log.WithFields(logrus.Fields{ 52 | "found_files": len(u.LocalFiles), 53 | "files_size": humanize.IBytes(u.LocalFilesSize), 54 | "local_folder": u.Config.LocalFolder, 55 | }).Info("Refreshed local files") 56 | 57 | return nil 58 | } 59 | 60 | func (u *Uploader) RefreshHiddenPaths() error { 61 | var err error 62 | 63 | // Retrieve hidden files/folders 64 | u.HiddenFiles, u.HiddenFolders, err = u.Cleaner.FindHidden(&u.Config.Hidden, u.Log) 65 | if err != nil { 66 | return fmt.Errorf("failed refreshing hidden paths for: %q", u.Config.Hidden.Folder) 67 | } 68 | 69 | return nil 70 | } 71 | -------------------------------------------------------------------------------- /uploader/move.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/cache" 6 | "github.com/l3uddz/crop/rclone" 7 | "github.com/l3uddz/crop/stringutils" 8 | "github.com/pkg/errors" 9 | "github.com/sirupsen/logrus" 10 | ) 11 | 12 | func (u *Uploader) Move(serverSide bool, additionalRcloneParams []string) error { 13 | var moveRemotes []rclone.RemoteInstruction 14 | var extraParams []string 15 | 16 | // create move instructions 17 | if serverSide { 18 | // this is a server side move 19 | for _, remote := range u.Config.Remotes.MoveServerSide { 20 | moveRemotes = append(moveRemotes, rclone.RemoteInstruction{ 21 | From: remote.From, 22 | To: remote.To, 23 | ServerSide: true, 24 | }) 25 | } 26 | 27 | extraParams = u.Config.RcloneParams.MoveServerSide 28 | if globalParams := rclone.GetGlobalParams(rclone.GlobalMoveServerSideParams, u.Config.RcloneParams.GlobalMoveServerSide); globalParams != nil { 29 | extraParams = append(extraParams, globalParams...) 30 | } 31 | } else { 32 | // this is a normal move (to only one location) 33 | moveRemotes = append(moveRemotes, rclone.RemoteInstruction{ 34 | From: u.Config.LocalFolder, 35 | To: u.Config.Remotes.Move, 36 | ServerSide: false, 37 | }) 38 | 39 | extraParams = u.Config.RcloneParams.Move 40 | if globalParams := rclone.GetGlobalParams(rclone.GlobalMoveParams, u.Config.RcloneParams.GlobalMove); globalParams != nil { 41 | extraParams = append(extraParams, globalParams...) 42 | } 43 | } 44 | 45 | // set variables 46 | if additionalRcloneParams != nil { 47 | extraParams = append(extraParams, additionalRcloneParams...) 48 | } 49 | 50 | // iterate all remotes and run move 51 | for _, move := range moveRemotes { 52 | // set variables 53 | attempts := 1 54 | 55 | // move to remote 56 | for { 57 | var serviceAccounts []*rclone.RemoteServiceAccount 58 | var err error 59 | 60 | // set log 61 | rLog := u.Log.WithFields(logrus.Fields{ 62 | "move_to": move.To, 63 | "move_from": move.From, 64 | "attempts": attempts, 65 | }) 66 | 67 | // get service account(s) for non server side move 68 | if !serverSide { 69 | serviceAccounts, err = u.RemoteServiceAccountFiles.GetServiceAccount(move.To) 70 | if err != nil { 71 | return errors.WithMessagef(err, 72 | "aborting further move attempts of %q due to serviceAccount exhaustion", 73 | move.From) 74 | } 75 | 76 | // display service accounts being used 77 | if len(serviceAccounts) > 0 { 78 | for _, sa := range serviceAccounts { 79 | rLog.Infof("Using service account %q: %v", sa.RemoteEnvVar, sa.ServiceAccountPath) 80 | } 81 | } 82 | } 83 | 84 | // move 85 | rLog.Info("Moving...") 86 | success, exitCode, err := rclone.Move(move.From, move.To, serviceAccounts, serverSide, extraParams) 87 | 88 | // check result 89 | if err != nil { 90 | rLog.WithError(err).Errorf("Failed unexpectedly...") 91 | return errors.WithMessagef(err, "move failed unexpectedly with exit code: %v", exitCode) 92 | } 93 | 94 | if success { 95 | // successful exit code 96 | break 97 | } else if serverSide { 98 | // server side moves will not use service accounts, so we will not retry... 99 | return fmt.Errorf("failed and cannot proceed with exit code: %v", exitCode) 100 | } 101 | 102 | // is this an exit code we can retry? 103 | switch exitCode { 104 | case rclone.ExitFatalError: 105 | // are we using service accounts? 106 | if len(serviceAccounts) == 0 { 107 | // we are not using service accounts, so mark this remote as banned (if non server side move) 108 | if !serverSide { 109 | // this was not a server side move, so lets ban the remote we are moving too 110 | if err := cache.SetBanned(stringutils.FromLeftUntil(move.To, ":"), 25); err != nil { 111 | rLog.WithError(err).Errorf("Failed banning remote") 112 | } 113 | } 114 | 115 | return fmt.Errorf("move failed with exit code: %v", exitCode) 116 | } 117 | 118 | // ban the service account(s) used 119 | for _, sa := range serviceAccounts { 120 | if err := cache.SetBanned(sa.ServiceAccountPath, 25); err != nil { 121 | rLog.WithError(err).Error("Failed banning service account, cannot try again...") 122 | return fmt.Errorf("failed banning service account: %v", sa.ServiceAccountPath) 123 | } 124 | } 125 | 126 | // attempt move again 127 | rLog.Warnf("Move failed with retryable exit code %v, trying again...", exitCode) 128 | attempts++ 129 | continue 130 | default: 131 | return fmt.Errorf("failed and cannot proceed with exit code: %v", exitCode) 132 | } 133 | } 134 | } 135 | 136 | return nil 137 | } 138 | -------------------------------------------------------------------------------- /uploader/uploader.go: -------------------------------------------------------------------------------- 1 | package uploader 2 | 3 | import ( 4 | "fmt" 5 | "github.com/l3uddz/crop/config" 6 | "github.com/l3uddz/crop/logger" 7 | "github.com/l3uddz/crop/pathutils" 8 | "github.com/l3uddz/crop/rclone" 9 | "github.com/l3uddz/crop/reutils" 10 | "github.com/l3uddz/crop/uploader/checker" 11 | "github.com/l3uddz/crop/uploader/cleaner" 12 | "github.com/l3uddz/crop/web" 13 | "github.com/pkg/errors" 14 | "github.com/sirupsen/logrus" 15 | "regexp" 16 | "strings" 17 | ) 18 | 19 | type Uploader struct { 20 | // Public 21 | Log *logrus.Entry 22 | GlobalConfig *config.Configuration 23 | Config *config.UploaderConfig 24 | Name string 25 | 26 | Checker checker.Interface 27 | Cleaner cleaner.Interface 28 | 29 | IncludePatterns []*regexp.Regexp 30 | ExcludePatterns []*regexp.Regexp 31 | 32 | RemoteServiceAccountFiles *rclone.ServiceAccountManager 33 | 34 | LocalFiles []pathutils.Path 35 | LocalFilesSize uint64 36 | HiddenFiles []pathutils.Path 37 | HiddenFolders []pathutils.Path 38 | 39 | Ws *web.Server 40 | } 41 | 42 | func New(config *config.Configuration, uploaderConfig *config.UploaderConfig, uploaderName string) (*Uploader, error) { 43 | // init uploader dependencies 44 | // - checker 45 | c, found := supportedCheckers[strings.ToLower(uploaderConfig.Check.Type)] 46 | if !found { 47 | return nil, fmt.Errorf("unknown check type specified: %q", uploaderConfig.Check.Type) 48 | } 49 | 50 | chk, ok := c.(checker.Interface) 51 | if !ok { 52 | return nil, fmt.Errorf("failed typecasting to checker interface for: %q", uploaderConfig.Check.Type) 53 | } 54 | 55 | // - cleaner 56 | var cln cleaner.Interface = nil 57 | if uploaderConfig.Hidden.Enabled { 58 | c, found := supportedCleaners[strings.ToLower(uploaderConfig.Hidden.Type)] 59 | if !found { 60 | // checker was not found 61 | return nil, fmt.Errorf("unknown cleaner type specified: %q", uploaderConfig.Hidden.Type) 62 | } 63 | 64 | // Typecast found cleaner 65 | cln, ok = c.(cleaner.Interface) 66 | if !ok { 67 | return nil, fmt.Errorf("failed typecasting to cleaner interface for: %q", uploaderConfig.Hidden.Type) 68 | } 69 | } 70 | 71 | // - include patterns 72 | includePatterns := make([]*regexp.Regexp, 0) 73 | 74 | for _, includePattern := range uploaderConfig.Check.Include { 75 | g, err := reutils.GlobToRegexp(includePattern, false) 76 | if err != nil { 77 | return nil, fmt.Errorf("invalid include pattern: %q", includePattern) 78 | } 79 | 80 | includePatterns = append(includePatterns, g) 81 | } 82 | 83 | // - exclude patterns 84 | excludePatterns := make([]*regexp.Regexp, 0) 85 | 86 | for _, excludePattern := range uploaderConfig.Check.Exclude { 87 | g, err := reutils.GlobToRegexp(excludePattern, false) 88 | if err != nil { 89 | return nil, fmt.Errorf("invalid exclude pattern: %q", excludePattern) 90 | } 91 | 92 | excludePatterns = append(excludePatterns, g) 93 | } 94 | 95 | // - service account manager 96 | sam := rclone.NewServiceAccountManager(config.Rclone.ServiceAccountRemotes, 1) 97 | 98 | remotePaths := append([]string{}, uploaderConfig.Remotes.Copy...) 99 | remotePaths = append(remotePaths, uploaderConfig.Remotes.Move) 100 | 101 | if err := sam.LoadServiceAccounts(remotePaths); err != nil { 102 | return nil, errors.WithMessage(err, "failed initializing associated remote service accounts") 103 | } 104 | 105 | // init uploader 106 | l := logger.GetLogger(uploaderName) 107 | uploader := &Uploader{ 108 | Log: l, 109 | GlobalConfig: config, 110 | Config: uploaderConfig, 111 | Name: uploaderName, 112 | Checker: chk, 113 | Cleaner: cln, 114 | IncludePatterns: includePatterns, 115 | ExcludePatterns: excludePatterns, 116 | RemoteServiceAccountFiles: sam, 117 | Ws: web.New("127.0.0.1", l, uploaderName, sam), 118 | } 119 | 120 | return uploader, nil 121 | } 122 | -------------------------------------------------------------------------------- /web/handler.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "github.com/gofiber/fiber/v2" 5 | "github.com/l3uddz/crop/cache" 6 | "time" 7 | ) 8 | 9 | func (ws *Server) ServiceAccountHandler(c *fiber.Ctx) error { 10 | // only accept json 11 | c.Accepts("application/json") 12 | 13 | // acquire cache lock 14 | ws.saCache.Lock() 15 | defer ws.saCache.Unlock() 16 | 17 | // parse body 18 | req := new(ServiceAccountRequest) 19 | if err := c.BodyParser(req); err != nil { 20 | ws.log.WithError(err).Error("Failed parsing service account request from rclone...") 21 | return c.SendStatus(500) 22 | } 23 | 24 | // have we issued a replacement sa for this banned sa? 25 | now := time.Now().UTC() 26 | nsa, ok := ws.saCache.cache[req.OldServiceAccount] 27 | switch { 28 | case ok && now.Before(nsa.Expires): 29 | // we issued a replacement sa for this one already 30 | nsa.Hits++ 31 | if nsa.Hits <= maxSaCacheHits { 32 | // return last response 33 | return c.SendString(nsa.ResponseServiceAccount) 34 | } 35 | 36 | // remove entries that have exceeded max hits 37 | delete(ws.saCache.cache, req.OldServiceAccount) 38 | case ok: 39 | // we issued a replacement sa for this one already, but it has expired 40 | delete(ws.saCache.cache, req.OldServiceAccount) 41 | default: 42 | break 43 | } 44 | 45 | // handle response 46 | ws.log.Warnf("Service account limit reached for remote %q, sa: %v", req.Remote, req.OldServiceAccount) 47 | 48 | // ban this service account 49 | if err := cache.SetBanned(req.OldServiceAccount, 25); err != nil { 50 | ws.log.WithError(err).Error("Failed banning service account, cannot try again...") 51 | return c.SendStatus(500) 52 | } 53 | 54 | // get service account for this remote 55 | sa, err := ws.sa.GetServiceAccount(req.Remote) 56 | switch { 57 | case err != nil: 58 | ws.log.WithError(err).Errorf("Failed retrieving service account for remote: %q", req.Remote) 59 | return c.SendStatus(500) 60 | case len(sa) < 1: 61 | ws.log.Errorf("Failed finding service account for remote: %q", req.Remote) 62 | return c.SendStatus(500) 63 | default: 64 | break 65 | } 66 | 67 | // create cache entry 68 | cacheEntry := &ServiceAccountCacheEntry{ 69 | ResponseServiceAccount: sa[0].ServiceAccountPath, 70 | Expires: time.Now().UTC().Add(durationSaCacheEntry), 71 | Hits: 0, 72 | } 73 | 74 | // store cache entry for the old account 75 | ws.saCache.cache[req.OldServiceAccount] = cacheEntry 76 | 77 | // store cache entry for the new account 78 | // (so if another transfer routine requests within N duration, re-issue the same sa) 79 | ws.saCache.cache[sa[0].ServiceAccountPath] = cacheEntry 80 | 81 | // return service account 82 | ws.log.Warnf("New service account for remote %q, sa: %v", req.Remote, sa[0].ServiceAccountPath) 83 | return c.SendString(sa[0].ServiceAccountPath) 84 | } 85 | -------------------------------------------------------------------------------- /web/server.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "fmt" 5 | "github.com/gofiber/fiber/v2" 6 | "github.com/gofiber/fiber/v2/middleware/recover" 7 | "github.com/l3uddz/crop/rclone" 8 | "github.com/phayes/freeport" 9 | "github.com/sirupsen/logrus" 10 | "sync" 11 | "time" 12 | ) 13 | 14 | /* Const */ 15 | 16 | const ( 17 | maxSaCacheHits int = 4 18 | durationSaCacheEntry = 10 * time.Second 19 | ) 20 | 21 | /* Var */ 22 | 23 | var ( 24 | fpc *FreePortCache 25 | ) 26 | 27 | /* Private */ 28 | 29 | func init() { 30 | fpc = &FreePortCache{ 31 | pCache: make(map[int]int), 32 | Mutex: sync.Mutex{}, 33 | } 34 | } 35 | 36 | /* Public */ 37 | 38 | func New(host string, log *logrus.Entry, name string, sa *rclone.ServiceAccountManager) *Server { 39 | // get free port 40 | fpc.Lock() 41 | defer fpc.Unlock() 42 | port := 0 43 | 44 | for { 45 | p, err := freeport.GetFreePort() 46 | if err != nil { 47 | log.WithError(err).Fatal("Failed locating free port for the service account server") 48 | } 49 | 50 | if _, exists := fpc.pCache[p]; !exists { 51 | fpc.pCache[p] = p 52 | port = p 53 | log.Debugf("Found free port for service account server: %d", port) 54 | break 55 | } 56 | } 57 | 58 | // create ws object 59 | ws := &Server{ 60 | Host: host, 61 | Port: port, 62 | app: fiber.New(fiber.Config{ 63 | DisableStartupMessage: true, 64 | }), 65 | log: log, 66 | name: name, 67 | sa: sa, 68 | saCache: &ServiceAccountCache{ 69 | cache: make(map[string]*ServiceAccountCacheEntry), 70 | Mutex: sync.Mutex{}, 71 | }, 72 | } 73 | 74 | // middleware(s) 75 | ws.app.Use(recover.New()) 76 | 77 | // route(s) 78 | ws.app.Post("*", ws.ServiceAccountHandler) 79 | 80 | return ws 81 | } 82 | 83 | func (ws *Server) Run() { 84 | go func() { 85 | ws.log.Infof("Starting service account server: %s:%d", ws.Host, ws.Port) 86 | ws.Running = true 87 | 88 | if err := ws.app.Listen(fmt.Sprintf("%s:%d", ws.Host, ws.Port)); err != nil { 89 | ws.log.WithError(err).Error("Service account server failed...") 90 | } 91 | 92 | ws.Running = false 93 | }() 94 | } 95 | 96 | func (ws *Server) Stop() { 97 | if err := ws.app.Shutdown(); err != nil { 98 | ws.log.WithError(err).Error("Failed shutting down service account server...") 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /web/struct.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "github.com/gofiber/fiber/v2" 5 | "github.com/l3uddz/crop/rclone" 6 | "github.com/sirupsen/logrus" 7 | "sync" 8 | "time" 9 | ) 10 | 11 | type ServiceAccountCache struct { 12 | cache map[string]*ServiceAccountCacheEntry 13 | sync.Mutex 14 | } 15 | 16 | type ServiceAccountCacheEntry struct { 17 | ResponseServiceAccount string 18 | Expires time.Time 19 | Hits int 20 | } 21 | 22 | type Server struct { 23 | Host string 24 | Port int 25 | Running bool 26 | app *fiber.App 27 | log *logrus.Entry 28 | name string 29 | sa *rclone.ServiceAccountManager 30 | saCache *ServiceAccountCache 31 | } 32 | 33 | type FreePortCache struct { 34 | pCache map[int]int 35 | sync.Mutex 36 | } 37 | 38 | type ServiceAccountRequest struct { 39 | OldServiceAccount string `json:"old"` 40 | Remote string `json:"remote"` 41 | } 42 | --------------------------------------------------------------------------------