├── .gitignore ├── LICENSE ├── README.md ├── build.sbt ├── project ├── build.properties └── plugins.sbt ├── src └── main │ ├── resources │ └── conf │ │ └── filesync.conf │ └── scala │ └── sync │ ├── Config.scala │ ├── FileSync.scala │ ├── TestFIleGenerator.scala │ ├── client │ ├── Client.scala │ ├── ClientFilePumpingChannel.scala │ ├── FileSynchronizationWorker.scala │ ├── FileSynchronizer.scala │ └── SafeDefaultFileRegion.scala │ └── server │ ├── FileUploadingService.scala │ ├── FileUploadingServiceImpl.scala │ ├── Server.scala │ ├── ServerFileSaveChannel.scala │ ├── UploadingManager.scala │ └── UploadingMonitor.scala └── test.text /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### SBT template 3 | # Simple Build Tool 4 | # http://www.scala-sbt.org/release/docs/Getting-Started/Directories.html#configuring-version-control 5 | 6 | dist/* 7 | target/ 8 | lib_managed/ 9 | src_managed/ 10 | project/boot/ 11 | project/plugins/project/ 12 | .history 13 | .cache 14 | .lib/ 15 | ### Scala template 16 | *.class 17 | *.log 18 | ### JetBrains template 19 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm 20 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 21 | 22 | # User-specific stuff 23 | .idea/**/workspace.xml 24 | .idea/**/tasks.xml 25 | .idea/**/usage.statistics.xml 26 | .idea/**/dictionaries 27 | .idea/**/shelf 28 | 29 | # Sensitive or high-churn files 30 | .idea/**/dataSources/ 31 | .idea/**/dataSources.ids 32 | .idea/**/dataSources.local.xml 33 | .idea/**/sqlDataSources.xml 34 | .idea/**/dynamic.xml 35 | .idea/**/uiDesigner.xml 36 | .idea/**/dbnavigator.xml 37 | 38 | # Gradle 39 | .idea/**/gradle.xml 40 | .idea/**/libraries 41 | 42 | # Gradle and Maven with auto-import 43 | # When using Gradle or Maven with auto-import, you should exclude module files, 44 | # since they will be recreated, and may cause churn. Uncomment if using 45 | # auto-import. 46 | # .idea/modules.xml 47 | # .idea/*.iml 48 | # .idea/modules 49 | 50 | # CMake 51 | cmake-build-*/ 52 | 53 | # Mongo Explorer plugin 54 | .idea/**/mongoSettings.xml 55 | 56 | # File-based project format 57 | *.iws 58 | 59 | # IntelliJ 60 | out/ 61 | 62 | # mpeltonen/sbt-idea plugin 63 | .idea_modules/ 64 | 65 | # JIRA plugin 66 | atlassian-ide-plugin.xml 67 | 68 | # Cursive Clojure plugin 69 | .idea/replstate.xml 70 | 71 | # Crashlytics plugin (for Android Studio and IntelliJ) 72 | com_crashlytics_export_strings.xml 73 | crashlytics.properties 74 | crashlytics-build.properties 75 | fabric.properties 76 | 77 | # Editor-based Rest Client 78 | .idea/httpRequests 79 | 80 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # FileSync 2 | 3 | Do file uploading with zero copy with Netty and coordination with Akka. 4 | 5 | A quick demo to do concurrent zero copy file uploading with netty 6 | 7 | A quick demo written in hours :) with Akka,just for sharing nothing more. -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | enablePlugins(JavaAppPackaging) 2 | enablePlugins(UniversalPlugin) 3 | enablePlugins(LinuxPlugin) 4 | 5 | name := "FileSync" 6 | 7 | version := "0.1" 8 | 9 | scalaVersion := "2.12.8" 10 | 11 | // https://mvnrepository.com/artifact/com.typesafe.akka/akka-stream 12 | libraryDependencies += "com.typesafe.akka" %% "akka-stream" % "2.5.19" 13 | // https://mvnrepository.com/artifact/io.netty/netty-all 14 | libraryDependencies += "io.netty" % "netty-all" % "4.1.32.Final" 15 | 16 | maintainer := "your.name@company.org" 17 | 18 | import LinuxPlugin._ 19 | 20 | mapGenericFilesToLinux -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version = 1.2.7 -------------------------------------------------------------------------------- /project/plugins.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.typesafe.sbt" % "sbt-native-packager" % "1.3.15") 2 | -------------------------------------------------------------------------------- /src/main/resources/conf/filesync.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/He-Pin/filesync/c728ae87d7ae963d0daf205fd06aeab4ff7617ac/src/main/resources/conf/filesync.conf -------------------------------------------------------------------------------- /src/main/scala/sync/Config.scala: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import java.nio.file.Paths 4 | 5 | import com.typesafe.config.ConfigFactory 6 | 7 | /** 8 | * @author hepin1989 9 | **/ 10 | object Config { 11 | private val underling = ConfigFactory.load("conf/filesync.conf") 12 | 13 | lazy val userHome = Paths.get(underling.getString("user.home")) 14 | .toAbsolutePath 15 | 16 | //TODO make if configurable 17 | lazy val generateTo = userHome.resolve("source.text") 18 | 19 | lazy val saveTo = userHome.resolve("save") 20 | 21 | } 22 | -------------------------------------------------------------------------------- /src/main/scala/sync/FileSync.scala: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import scala.annotation.tailrec 4 | 5 | /** 6 | * @author hepin1989 7 | **/ 8 | object FileSync { 9 | @tailrec 10 | def main(args: Array[String]): Unit = { 11 | printBanner() 12 | args.headOption match { 13 | case Some(head) => head match { 14 | case "gen" => 15 | TestFIleGenerator.main0(args.drop(1)) 16 | case "server" => 17 | server.Server.main0(args.drop(1)) 18 | case "connect" => 19 | client.Client.main0(args.drop(1)) 20 | case _ => 21 | sys.error(s"unsupported role:$head") 22 | printBanner() 23 | } 24 | case None => 25 | println("Nothing in arguments") 26 | printBanner() 27 | println("please input by hands now.") 28 | print("$>") 29 | val inputs = scala.io.StdIn.readLine().split(' ') 30 | main(inputs) 31 | } 32 | 33 | } 34 | 35 | def printBanner(): Unit = { 36 | println( 37 | """ 38 | |usage: 39 | | 40 | |gen to generate file 41 | |server run as server 42 | |connect ip:port connections run as client and connect to remote server 43 | | 44 | """.stripMargin) 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/main/scala/sync/TestFIleGenerator.scala: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import java.nio.charset.Charset 4 | 5 | import akka.actor.ActorSystem 6 | import akka.stream.scaladsl.{FileIO, Source} 7 | import akka.stream.{ActorMaterializer, OverflowStrategy} 8 | import akka.util.ByteString 9 | 10 | import scala.concurrent.Await 11 | import scala.concurrent.duration.Duration 12 | 13 | /** 14 | * @author hepin1989 15 | **/ 16 | object TestFIleGenerator { 17 | def main0(args: Array[String]): Unit = { 18 | //generating file 19 | implicit val system = ActorSystem("fileSync") 20 | implicit val materilizer: ActorMaterializer = ActorMaterializer() 21 | 22 | //Source 23 | val charset = Charset.forName("ASCII") 24 | val contentSource = Source.fromIterator(() => (1 to 100000000).toIterator) 25 | .map(str => ByteString(new StringBuffer(str.toString).append('\n').toString, charset)) 26 | .buffer(10000, overflowStrategy = OverflowStrategy.backpressure) 27 | .conflate((bs1, bs2) => bs1 ++ bs2) 28 | .buffer(8, overflowStrategy = OverflowStrategy.backpressure) 29 | .async 30 | 31 | //sink 32 | val fileSink = FileIO.toPath(Config.generateTo) 33 | println(s"starting generate file from content source to :[${Config.generateTo}].") 34 | //run 35 | val startTime = System.currentTimeMillis() 36 | val mat = contentSource.runWith(fileSink) 37 | val fileIOResult = Await.result(mat, Duration.Inf) 38 | println("final result :" + fileIOResult) 39 | materilizer.shutdown() 40 | Await.result(system.terminate(), Duration.Inf) 41 | println(s"times :${(System.currentTimeMillis() - startTime) / 1000}s") 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/scala/sync/client/Client.scala: -------------------------------------------------------------------------------- 1 | package sync.client 2 | 3 | import java.net.InetSocketAddress 4 | 5 | import akka.Done 6 | import akka.actor.ActorSystem 7 | import sync.Config 8 | 9 | import scala.concurrent.duration.Duration 10 | import scala.concurrent.{Await, Promise} 11 | 12 | /** 13 | * @author hepin1989 14 | **/ 15 | object Client { 16 | def main0(args: Array[String]): Unit = { 17 | val startTime = System.currentTimeMillis() 18 | val arguments = args 19 | println( 20 | """ 21 | |usage: 22 | |input server address:ip:port connections 23 | | 24 | """.stripMargin) 25 | 26 | val actorSystem = ActorSystem("file-sync") 27 | 28 | val addr = if (arguments.isEmpty) { 29 | println("input server address in: ip:port please.") 30 | scala.io.StdIn.readLine() 31 | } else { 32 | arguments(0) 33 | } 34 | val Array(ip, port) = addr.split(':') 35 | val connections = arguments.drop(1).headOption.map(_.toInt).getOrElse(4) 36 | // 37 | val serverAddress = new InetSocketAddress(ip, port.toInt) 38 | val filePath = Config.generateTo 39 | val donePromise = Promise[Done]() 40 | val fileSynchronizer = actorSystem.actorOf(props = FileSynchronizer.props( 41 | serverAddress = serverAddress, 42 | filePath = filePath, 43 | sliceCount = connections, 44 | donePromise = donePromise 45 | )) 46 | Await.result(donePromise.future, Duration.Inf) 47 | println(s"time = ${System.currentTimeMillis() - startTime} ms") 48 | //TODO manager 49 | //TODO while with byte 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/scala/sync/client/ClientFilePumpingChannel.scala: -------------------------------------------------------------------------------- 1 | package sync.client 2 | 3 | import java.io.RandomAccessFile 4 | import java.nio.channels.FileChannel 5 | import java.nio.file.Path 6 | 7 | import io.netty.buffer.ByteBuf 8 | import io.netty.channel._ 9 | import io.netty.util.concurrent.{Future, GenericFutureListener} 10 | 11 | import scala.concurrent.Promise 12 | 13 | /** 14 | * @author hepin1989 15 | **/ 16 | class ClientFilePumpingChannel(filePath: Path, 17 | totalSliceCount: Int, 18 | sliceIndex: Int, 19 | randomAccessFile: RandomAccessFile, 20 | fileChannel: FileChannel, 21 | start: Long, 22 | sliceLength: Long, 23 | uniqTaskId: String, 24 | donePromise: Promise[(Int, Channel)]) extends ChannelDuplexHandler { 25 | override def channelActive(ctx: ChannelHandlerContext): Unit = { 26 | println(s"channel active for slice index:$sliceIndex position:$start length:$sliceLength") 27 | //连接成功后,发送第一个指令,格式: 28 | //协议长度-长度-任务id-长度-文件名-文件长度-总分片-当前分片Index-当前分片位置-当前分片大小 29 | //Short-Byte-String-Byte-String-Long-Byte-Byte-Long-Long 30 | //--2-----1-----x-----1-----x-----8---1----1-----8----8- 31 | val fileNameBytes = filePath.getFileName.toString.getBytes 32 | val taskIdBytes = uniqTaskId.getBytes 33 | 34 | val protocol = ctx.alloc().directBuffer( 35 | 2 + 1 + taskIdBytes.length + 1 + fileNameBytes.length + 8 + 1 + 1 + 8 + 8) 36 | protocol.writerIndex(2) 37 | protocol.writeByte(taskIdBytes.length) 38 | .writeBytes(taskIdBytes) 39 | .writeByte(fileNameBytes.length) 40 | .writeBytes(fileNameBytes) 41 | .writeLong(randomAccessFile.length()) 42 | .writeByte(totalSliceCount) //不支持超过128的分片 43 | .writeByte(sliceIndex) 44 | .writeLong(start) 45 | .writeLong(sliceLength) 46 | 47 | protocol.setShort(0, protocol.readableBytes() - 2) 48 | // 49 | ctx.channel().writeAndFlush(protocol).addListener(new GenericFutureListener[Future[_ >: Void]] { 50 | override def operationComplete(future: Future[_ >: Void]): Unit = { 51 | if (future.isSuccess) { 52 | //连接后发送的第一个包之后,才开始发送文件本身 53 | println( 54 | s""" 55 | |starting to send... 56 | |fileName: ${filePath.getFileName} 57 | |slice: $sliceIndex 58 | |total: $totalSliceCount 59 | |length: $sliceLength 60 | |position: $start 61 | |uniqTaskId: $uniqTaskId 62 | """.stripMargin) 63 | 64 | ctx.writeAndFlush(new SafeDefaultFileRegion(fileChannel, start, sliceLength)) 65 | 66 | // ctx.executor().scheduleAtFixedRate( 67 | // () => ctx.channel().flush(), 68 | // 10, 69 | // 10, 70 | // TimeUnit.MILLISECONDS 71 | // ) 72 | } else { 73 | future.cause().printStackTrace() 74 | } 75 | } 76 | }) 77 | super.channelActive(ctx) 78 | } 79 | 80 | override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = { 81 | val byteBuf = msg.asInstanceOf[ByteBuf] 82 | //第一个字节作为ack 83 | donePromise.trySuccess((byteBuf.readByte().toInt, ctx.channel())) 84 | //TODO 85 | ctx.channel().close() 86 | } 87 | 88 | override def channelInactive(ctx: ChannelHandlerContext): Unit = { 89 | println(s"channel inactive for slice index:$sliceIndex position:$start length:$sliceLength") 90 | super.channelInactive(ctx) 91 | } 92 | 93 | override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit = { 94 | cause.printStackTrace() 95 | super.exceptionCaught(ctx, cause) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /src/main/scala/sync/client/FileSynchronizationWorker.scala: -------------------------------------------------------------------------------- 1 | package sync.client 2 | 3 | import java.io.RandomAccessFile 4 | import java.net.InetSocketAddress 5 | import java.nio.channels.FileChannel 6 | import java.nio.file.Path 7 | 8 | import akka.actor.{Actor, ActorRef, Props} 9 | import io.netty.bootstrap.Bootstrap 10 | import io.netty.channel.nio.NioEventLoopGroup 11 | import io.netty.channel.socket.SocketChannel 12 | import io.netty.channel.socket.nio.NioSocketChannel 13 | import io.netty.channel.{Channel, ChannelInitializer} 14 | 15 | import scala.concurrent.{ExecutionContextExecutor, Future, Promise} 16 | 17 | /** 18 | * @author hepin1989 19 | **/ 20 | class FileSynchronizationWorker(serverAddress: InetSocketAddress, 21 | filePath: Path, 22 | totalSliceCount: Int, 23 | sliceIndex: Int, 24 | randomAccessFile: RandomAccessFile, 25 | fileChannel: FileChannel, 26 | start: Long, 27 | sliceLength: Long, 28 | uniqTaskId: String) extends Actor { 29 | 30 | import FileSynchronizationWorker._ 31 | 32 | override def receive: Receive = { 33 | case Start => 34 | handleStartCommand() 35 | } 36 | 37 | private def handleStartCommand(): Unit = { 38 | val bootStrap = new Bootstrap 39 | bootStrap.group(group) 40 | val promise = Promise[(Int, Channel)]() 41 | bootStrap.channel(classOf[NioSocketChannel]) 42 | .handler(new ChannelInitializer[SocketChannel] { 43 | override def initChannel(ch: SocketChannel): Unit = { 44 | val pipeline = ch.pipeline() 45 | pipeline.addLast(new ClientFilePumpingChannel( 46 | filePath = filePath, 47 | totalSliceCount = totalSliceCount, 48 | sliceIndex = sliceIndex, 49 | randomAccessFile = randomAccessFile, 50 | fileChannel = fileChannel, 51 | start = start, 52 | sliceLength = sliceLength, 53 | uniqTaskId = uniqTaskId, 54 | donePromise = promise 55 | )) 56 | } 57 | }) 58 | .connect(serverAddress) 59 | import akka.pattern.pipe 60 | implicit val dispatcher: ExecutionContextExecutor = context.dispatcher 61 | promise.future.pipeTo(self)(ActorRef.noSender) 62 | context.become(waitingTransferDone(promise.future)) 63 | } 64 | 65 | def waitingTransferDone(future: Future[(Int, Channel)]): Receive = { 66 | case (code: Int, channel: Channel) => 67 | println(s"transfer done of slice :$sliceIndex,code:$code,channel:$channel") 68 | context.parent ! SliceDone(sliceIndex, code == 0, code) 69 | } 70 | 71 | override def preStart(): Unit = { 72 | super.preStart() 73 | self ! Start 74 | } 75 | } 76 | 77 | object FileSynchronizationWorker { 78 | def props(serverAddress: InetSocketAddress, 79 | filePath: Path, 80 | totalSliceCount: Int, 81 | sliceIndex: Int, 82 | randomAccessFile: RandomAccessFile, 83 | fileChannel: FileChannel, 84 | start: Long, 85 | sliceLength: Long, 86 | uniqTaskId: String): Props = { 87 | Props(new FileSynchronizationWorker( 88 | serverAddress, 89 | filePath, 90 | totalSliceCount, 91 | sliceIndex, 92 | randomAccessFile, 93 | fileChannel, 94 | start, 95 | sliceLength, 96 | uniqTaskId 97 | )) 98 | } 99 | 100 | private lazy val group: NioEventLoopGroup = new NioEventLoopGroup() 101 | 102 | sealed trait Command 103 | 104 | private[client] case object Start 105 | 106 | sealed trait Event 107 | 108 | private[client] final case class SliceDone(sliceIndex: Int, isSuccess: Boolean, code: Int) extends Event 109 | 110 | } 111 | -------------------------------------------------------------------------------- /src/main/scala/sync/client/FileSynchronizer.scala: -------------------------------------------------------------------------------- 1 | package sync.client 2 | 3 | import java.io.RandomAccessFile 4 | import java.net.InetSocketAddress 5 | import java.nio.channels.FileChannel 6 | import java.nio.file.Path 7 | import java.util.UUID 8 | 9 | import akka.Done 10 | import akka.actor.{Actor, Props} 11 | import sync.client.FileSynchronizationWorker.SliceDone 12 | 13 | import scala.concurrent.Promise 14 | 15 | /** 16 | * @author hepin1989 17 | **/ 18 | class FileSynchronizer(serverAddress: InetSocketAddress, 19 | filePath: Path, 20 | sliceCount: Int, 21 | donePromise: Promise[Done]) extends Actor { 22 | private var randomAccessFile: RandomAccessFile = _ 23 | private var fileChannel: FileChannel = _ 24 | 25 | import FileSynchronizer._ 26 | 27 | override def receive: Receive = { 28 | case StartSync => 29 | println(s"start sync file :$filePath with slice count $sliceCount") 30 | //start with x child to sync 31 | handleStartSyncCommand() 32 | } 33 | 34 | def handleStartSyncCommand(): Unit = { 35 | randomAccessFile = new RandomAccessFile(filePath.toFile, "r") 36 | fileChannel = randomAccessFile.getChannel 37 | val fileLength = randomAccessFile.length() 38 | val step: Long = fileLength / sliceCount 39 | var currentPosition: Long = 0 40 | val uniqTaskId = UUID.randomUUID().toString 41 | for (sliceIndex <- 0 until sliceCount) { 42 | val start = currentPosition 43 | val sliceLength = if (sliceIndex != sliceCount - 1) step else fileLength - currentPosition 44 | // 45 | val childProps = FileSynchronizationWorker.props( 46 | serverAddress = serverAddress, 47 | filePath = filePath, 48 | totalSliceCount = sliceCount, 49 | sliceIndex = sliceIndex, 50 | randomAccessFile = randomAccessFile, 51 | fileChannel = fileChannel, 52 | start = start, 53 | sliceLength = sliceLength, 54 | uniqTaskId = uniqTaskId 55 | ) 56 | val child = context.actorOf(childProps, s"slice-$sliceIndex") 57 | context.watch(child) 58 | currentPosition += step 59 | } 60 | // 61 | context.become(waitingSubTaskAllComplete(uniqTaskId, sliceCount)) 62 | } 63 | 64 | def waitingSubTaskAllComplete(uniqTaskId: String, remaining: Int): Receive = { 65 | case SliceDone(sliceIndex, isSuccess, code) => 66 | println(s"slice: $sliceIndex of task: $uniqTaskId complete with $isSuccess,code :$code") 67 | if (!isSuccess) { 68 | donePromise.tryFailure( 69 | new IllegalStateException(s"slice $sliceIndex of task:$uniqTaskId is failure with code:$code")) 70 | context.stop(self) 71 | } else if (remaining - 1 == 0) { 72 | println(s"task: $uniqTaskId with task id all complete.") 73 | println(s"release file:$filePath") 74 | fileChannel.close() 75 | randomAccessFile.close() 76 | context.stop(self) 77 | donePromise.trySuccess(Done) 78 | } else { 79 | context.become(waitingSubTaskAllComplete(uniqTaskId, remaining - 1)) 80 | } 81 | } 82 | 83 | override def preStart(): Unit = { 84 | super.preStart() 85 | //TODO check is file 86 | this.randomAccessFile = new RandomAccessFile(filePath.toFile, "r") 87 | this.fileChannel = randomAccessFile.getChannel 88 | self ! StartSync 89 | } 90 | } 91 | 92 | object FileSynchronizer { 93 | 94 | def props(serverAddress: InetSocketAddress, 95 | filePath: Path, 96 | sliceCount: Int, 97 | donePromise: Promise[Done]): Props = { 98 | Props(new FileSynchronizer(serverAddress: InetSocketAddress, 99 | filePath: Path, 100 | sliceCount: Int, 101 | donePromise)) 102 | } 103 | 104 | sealed trait Command 105 | 106 | private[client] case object StartSync 107 | 108 | 109 | } 110 | -------------------------------------------------------------------------------- /src/main/scala/sync/client/SafeDefaultFileRegion.scala: -------------------------------------------------------------------------------- 1 | package sync.client 2 | 3 | import java.nio.channels.FileChannel 4 | 5 | import io.netty.channel.DefaultFileRegion 6 | 7 | /** 8 | * @author hepin1989 9 | **/ 10 | class SafeDefaultFileRegion(file: FileChannel, position: Long, count: Long) extends 11 | DefaultFileRegion(file, position, count) { 12 | override def deallocate(): Unit = { 13 | //deallocate by hand, otherwise will fail 14 | //the underling file is released by an Akka actor once the uploading is done. 15 | //super.deallocate() 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main/scala/sync/server/FileUploadingService.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | import akka.NotUsed 4 | 5 | import scala.concurrent.Future 6 | 7 | /** 8 | * @author hepin1989 9 | **/ 10 | trait FileUploadingService { 11 | 12 | def getWritableFileChannel(uniqTaskId: String, 13 | fileName: String, 14 | fileLength: Long, 15 | sliceIndex: Int, 16 | totalSliceCount: Int): Future[GetFileChannelResult] 17 | 18 | def commitSlice(uniqTaskId: String, 19 | sliceIndex: Int, 20 | code: Int): Future[NotUsed] 21 | } 22 | -------------------------------------------------------------------------------- /src/main/scala/sync/server/FileUploadingServiceImpl.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | import java.nio.file.Path 4 | import java.util.concurrent.TimeUnit 5 | 6 | import akka.NotUsed 7 | import akka.actor.{ActorRef, ActorSystem} 8 | import akka.util.Timeout 9 | 10 | import scala.concurrent.Future 11 | 12 | /** 13 | * @author hepin1989 14 | **/ 15 | class FileUploadingServiceImpl(actorSystem: ActorSystem, 16 | baseFolder: Path) extends FileUploadingService { 17 | private val managerRef: ActorRef = actorSystem.actorOf(UploadingManager.props(baseFolder)) 18 | 19 | override def getWritableFileChannel(uniqTaskId: String, 20 | fileName: String, 21 | fileLength: Long, 22 | sliceIndex: Int, 23 | totalSliceCount: Int): Future[GetFileChannelResult] = { 24 | import akka.pattern.ask 25 | implicit val timeout: Timeout = Timeout(10, TimeUnit.SECONDS) 26 | (managerRef ? GetFileChannel(uniqTaskId, fileName, fileLength, sliceIndex, totalSliceCount)) 27 | .mapTo[GetFileChannelResult] 28 | } 29 | 30 | override def commitSlice(uniqTaskId: String, sliceIndex: Int, code: Int): Future[NotUsed] = { 31 | import akka.pattern.ask 32 | implicit val timeout: Timeout = Timeout(10, TimeUnit.SECONDS) 33 | (managerRef ? SliceWriteDone(uniqTaskId, sliceIndex, code)) 34 | .mapTo[NotUsed] 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /src/main/scala/sync/server/Server.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | 4 | import akka.actor.ActorSystem 5 | import io.netty.bootstrap.ServerBootstrap 6 | import io.netty.channel.ChannelInitializer 7 | import io.netty.channel.nio.NioEventLoopGroup 8 | import io.netty.channel.socket.SocketChannel 9 | import io.netty.channel.socket.nio.NioServerSocketChannel 10 | import sync.Config 11 | 12 | /** 13 | * @author hepin1989 14 | **/ 15 | object Server { 16 | def main0(args: Array[String]): Unit = { 17 | val actorSystem = ActorSystem("fileSync-server") 18 | val fileUploadingService = new FileUploadingServiceImpl( 19 | actorSystem, 20 | Config.saveTo) 21 | 22 | val group = new NioEventLoopGroup() 23 | val bootstrap = new ServerBootstrap() 24 | 25 | bootstrap.childHandler(new ChannelInitializer[SocketChannel] { 26 | override def initChannel(ch: SocketChannel): Unit = { 27 | val pipeline = ch.pipeline() 28 | pipeline.addLast(new ServerFileSaveChannel(fileUploadingService)) 29 | } 30 | }) 31 | bootstrap.group(group, group) 32 | .channel(classOf[NioServerSocketChannel]) 33 | .localAddress("0.0.0.0", 0) 34 | val channelFuture = bootstrap.bind() 35 | val channel = channelFuture.awaitUninterruptibly().channel() 36 | println(s"server start at :[$channel]") 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /src/main/scala/sync/server/ServerFileSaveChannel.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | import java.nio.MappedByteBuffer 4 | import java.nio.channels.FileChannel 5 | import java.nio.channels.FileChannel.MapMode 6 | import java.nio.charset.Charset 7 | 8 | import io.netty.buffer.ByteBuf 9 | import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter} 10 | import io.netty.util.ReferenceCountUtil 11 | 12 | import scala.concurrent.Await 13 | import scala.concurrent.duration.Duration 14 | 15 | /** 16 | * @author hepin1989 17 | **/ 18 | class ServerFileSaveChannel(service: FileUploadingService) extends ChannelInboundHandlerAdapter { 19 | 20 | import ServerFileSaveChannel._ 21 | 22 | private var fileChannel: FileChannel = _ 23 | private var mappedFileBytes:MappedByteBuffer = _ 24 | 25 | private var writtenBytesCount: Long = 0 26 | private var position: Long = 0 27 | private var protocolLength: Short = 0 28 | private var protocol: Protocol = _ 29 | private var cache: ByteBuf = _ 30 | 31 | 32 | //连接成功后,发送第一个指令,格式: 33 | //协议长度-长度-任务id-长度-文件名-文件长度-总分片-当前分片Index-当前分片位置-当前分片大小 34 | //Short-Byte-String-Byte-String-Long-Byte-Byte-Long-Long 35 | //--2-----1-----x-----1-----x-----8---1----1-----8----8- 36 | def readProtocolBody(body: ByteBuf): Protocol = { 37 | val uniqTaskIdLength = body.readByte().toInt 38 | val uniqTaskId = body.readCharSequence(uniqTaskIdLength, Charset.defaultCharset()).toString 39 | val fileNameLength = body.readByte().toInt 40 | val fileName = body.readCharSequence(fileNameLength, Charset.defaultCharset()).toString 41 | val fileLength = body.readLong() 42 | val totalSliceCount = body.readByte().toInt 43 | val sliceIndex = body.readByte().toInt 44 | val startPosition = body.readLong() 45 | val sliceLength = body.readLong() 46 | Protocol( 47 | uniqTaskId = uniqTaskId, 48 | fileName = fileName, 49 | fileLength = fileLength, 50 | sliceIndex = sliceIndex, 51 | totalSliceCount = totalSliceCount, 52 | startPosition = startPosition, 53 | sliceLength = sliceLength 54 | ) 55 | } 56 | 57 | override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = { 58 | val byteBuf = msg.asInstanceOf[ByteBuf] 59 | if (byteBuf.readableBytes() != 0) { 60 | if (protocol eq null) { 61 | handleProtocol(byteBuf, ctx) 62 | } else { 63 | write2File(byteBuf, ctx) 64 | } 65 | ReferenceCountUtil.release(msg) 66 | } 67 | } 68 | 69 | def handleProtocol(byteBuf: ByteBuf, ctx: ChannelHandlerContext): Unit = { 70 | if (protocolLength == 0) { 71 | //happy path 72 | if ((cache eq null) && byteBuf.readableBytes() >= 2) { 73 | protocolLength = byteBuf.readShort() 74 | if (byteBuf.readableBytes() >= protocolLength) { 75 | protocol = readProtocolBody(byteBuf) 76 | updatePositionAsProtocol(protocol) 77 | write2File(byteBuf, ctx) 78 | } else { 79 | if (cache eq null) { 80 | cache = ctx.alloc().directBuffer(32) 81 | } 82 | cache.writeBytes(byteBuf) 83 | } 84 | } else { 85 | if (cache eq null) { 86 | cache = ctx.alloc().buffer(64) 87 | } 88 | cache.writeBytes(byteBuf) 89 | if (cache.readableBytes() >= 2) { 90 | protocolLength = cache.readShort() 91 | if (cache.readableBytes() >= protocolLength) { 92 | protocol = readProtocolBody(cache) 93 | updatePositionAsProtocol(protocol) 94 | write2File(cache, ctx) 95 | } 96 | } 97 | } 98 | } else { 99 | cache.writeBytes(byteBuf) 100 | if (cache.readableBytes() >= protocolLength) { 101 | protocol = readProtocolBody(cache) 102 | updatePositionAsProtocol(protocol) 103 | write2File(cache, ctx) 104 | } 105 | } 106 | } 107 | 108 | private def updatePositionAsProtocol(protocol: Protocol): Unit = { 109 | position = protocol.startPosition 110 | } 111 | 112 | private def write2File(byteBuf: ByteBuf, ctx: ChannelHandlerContext): Unit = { 113 | if (byteBuf.readableBytes() != 0) { 114 | if (fileChannel eq null) { 115 | val future = service.getWritableFileChannel( 116 | uniqTaskId = protocol.uniqTaskId, 117 | fileName = protocol.fileName, 118 | fileLength = protocol.fileLength, 119 | sliceIndex = protocol.sliceIndex, 120 | totalSliceCount = protocol.totalSliceCount 121 | ) 122 | val getFileChannelResult = Await.result(future, Duration.Inf) 123 | fileChannel = getFileChannelResult.fileChannel 124 | } 125 | 126 | var readableBytes = byteBuf.readableBytes() 127 | writtenBytesCount += readableBytes 128 | 129 | while (readableBytes > 0) { 130 | position += byteBuf.readBytes(fileChannel, position, readableBytes) 131 | readableBytes = byteBuf.readableBytes() 132 | } 133 | 134 | //mapped 135 | // if (mappedFileBytes eq null){ 136 | // mappedFileBytes = fileChannel.map(MapMode.READ_WRITE,position,protocol.sliceLength) 137 | // } 138 | // while (readableBytes > 0) { 139 | // byteBuf.readBytes(mappedFileBytes) 140 | // readableBytes = byteBuf.readableBytes() 141 | // } 142 | // 143 | 144 | if (writtenBytesCount == protocol.sliceLength) { 145 | println(s"slice:${protocol.sliceIndex} of file:${protocol.fileName} done,commit to monitor.") 146 | //写入完成 147 | val code = byteBuf.alloc().ioBuffer(1) 148 | code.writeByte(0) 149 | ctx.channel().writeAndFlush(code) 150 | service.commitSlice( 151 | uniqTaskId = protocol.uniqTaskId, 152 | sliceIndex = protocol.sliceIndex, 153 | code = 0) 154 | } 155 | if (protocol.sliceLength - writtenBytesCount < 1000) { 156 | println(s"remaining :${protocol.sliceLength - writtenBytesCount}") 157 | } 158 | } 159 | } 160 | 161 | override def channelActive(ctx: ChannelHandlerContext): Unit = { 162 | println(s"channel active: ${ctx.channel()}") 163 | super.channelActive(ctx) 164 | } 165 | 166 | override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit = { 167 | cause.printStackTrace() 168 | super.exceptionCaught(ctx, cause) 169 | } 170 | } 171 | 172 | object ServerFileSaveChannel { 173 | 174 | //连接成功后,发送第一个指令,格式: 175 | //协议长度-长度-任务id-长度-文件名-文件长度-总分片-当前分片Index-当前分片位置-当前分片大小 176 | //Short-Byte-String-Byte-String-Long-Byte-Byte-Long-Long 177 | //--2-----1-----x-----1-----x-----8---1----1-----8----8- 178 | final case class Protocol(uniqTaskId: String, 179 | fileName: String, 180 | fileLength: Long, 181 | sliceIndex: Int, 182 | totalSliceCount: Int, 183 | startPosition: Long, 184 | sliceLength: Long) 185 | 186 | } 187 | -------------------------------------------------------------------------------- /src/main/scala/sync/server/UploadingManager.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | import java.nio.channels.FileChannel 4 | import java.nio.file.Path 5 | 6 | import akka.actor.Status.Failure 7 | import akka.actor.{Actor, Props} 8 | 9 | /** 10 | * @author hepin1989 11 | **/ 12 | class UploadingManager(baseFolder: Path) extends Actor { 13 | override def receive: Receive = { 14 | case command@GetFileChannel(uniqTaskId, fileName, fileLength, _, _) => 15 | val uploadingMonitor = context.child(uniqTaskId) 16 | .getOrElse(context.actorOf(UploadingMonitor.props( 17 | baseFolder, 18 | uniqTaskId, 19 | fileName, 20 | fileLength 21 | ), uniqTaskId)) 22 | uploadingMonitor.forward(command) 23 | case msg@SliceWriteDone(uniqTaskId, _, _, _) => 24 | context.child(uniqTaskId) match { 25 | case Some(ref) => 26 | ref.forward(msg) 27 | case None => 28 | sender() ! Failure(new IllegalArgumentException(s"no monitor found for $uniqTaskId")) 29 | } 30 | case msg => 31 | println(msg) 32 | } 33 | } 34 | 35 | object UploadingManager { 36 | def props(baseFolder: Path): Props = { 37 | Props(new UploadingManager(baseFolder)) 38 | } 39 | } 40 | 41 | private[server] sealed trait Command 42 | 43 | private[server] final case class GetFileChannel(uniqTaskId: String, 44 | fileName: String, 45 | fileLength: Long, 46 | sliceIndex: Int, 47 | totalSliceCount: Int) extends Command 48 | 49 | private[server] sealed trait Result 50 | 51 | private[server] final case class GetFileChannelResult(fileChannel: FileChannel, 52 | filePath: Path) extends Result 53 | 54 | private[server] sealed trait Event 55 | 56 | private[server] final case class SliceWriteDone(uniqTaskId: String, 57 | sliceIndex: Int, 58 | code: Int, 59 | msg: String = "") extends Event -------------------------------------------------------------------------------- /src/main/scala/sync/server/UploadingMonitor.scala: -------------------------------------------------------------------------------- 1 | package sync.server 2 | 3 | import java.io.RandomAccessFile 4 | import java.nio.channels.FileChannel 5 | import java.nio.file.{Files, Path} 6 | 7 | import akka.actor.{Actor, Props} 8 | import sync.server.UploadingMonitor.UploadingTaskDone 9 | 10 | /** 11 | * @author hepin1989 12 | **/ 13 | class UploadingMonitor(baseFolder: Path, 14 | uniqTaskId: String, 15 | fileName: String, 16 | fileLength: Long) extends Actor { 17 | private var randomAccessFile: RandomAccessFile = _ 18 | private var fileChannel: FileChannel = _ 19 | private var filePath: Path = _ 20 | private var sliceInTheFlyCount = 0 21 | private var allSliceCount = 0 22 | private var sliceCompletedCount = 0 23 | 24 | override def receive: Receive = { 25 | case GetFileChannel(uniqTaskId, fileName, fileLength, sliceIndex, totalSliceCount) => 26 | sender() ! GetFileChannelResult(fileChannel, filePath) 27 | if (allSliceCount == 0) { 28 | //TODO check 29 | allSliceCount = totalSliceCount 30 | } 31 | sliceInTheFlyCount += 1 32 | case SliceWriteDone(uniqTaskId, sliceIndex, code, msg) => 33 | sliceInTheFlyCount -= 1 34 | sliceCompletedCount += 1 35 | 36 | println(s"slice: $sliceIndex of task:$uniqTaskId done " + 37 | s"with code: $code,msg: $msg,slice in the fly count:$sliceInTheFlyCount") 38 | if (sliceCompletedCount == allSliceCount) { 39 | //all done 40 | fileChannel.force(true) 41 | fileChannel.close() 42 | randomAccessFile.close() 43 | println(s"task:$uniqTaskId all slice count:$allSliceCount done,saved to $filePath,committed.") 44 | context.parent ! UploadingTaskDone(uniqTaskId, filePath, fileLength) 45 | context.stop(self) 46 | } 47 | } 48 | 49 | override def preStart(): Unit = { 50 | filePath = baseFolder.resolve(s"$uniqTaskId-$fileName").toAbsolutePath 51 | if (Files.notExists(filePath.getParent)) { 52 | Files.createDirectories(filePath.getParent) 53 | } 54 | randomAccessFile = new RandomAccessFile( 55 | filePath.toString, "rw") 56 | randomAccessFile.setLength(fileLength) 57 | fileChannel = randomAccessFile.getChannel 58 | super.preStart() 59 | } 60 | } 61 | 62 | object UploadingMonitor { 63 | 64 | def props(baseFolder: Path, 65 | uniqTaskId: String, 66 | fileName: String, 67 | fileLength: Long): Props = { 68 | Props(new UploadingMonitor(baseFolder, uniqTaskId, fileName, fileLength)) 69 | } 70 | 71 | private[server] sealed trait Event 72 | 73 | private[server] final case class UploadingTaskDone(uniqTaskId: String, 74 | filePath: Path, 75 | fileLength: Long) extends Event 76 | 77 | } 78 | -------------------------------------------------------------------------------- /test.text: -------------------------------------------------------------------------------- 1 | 1 2 | 2 3 | 3 4 | 4 5 | 5 6 | 6 7 | 7 8 | 8 9 | 9 10 | 10 11 | 11 12 | 12 13 | 13 14 | 14 15 | 15 16 | 16 17 | 17 18 | 18 19 | 19 20 | 20 21 | 21 22 | 22 23 | 23 24 | 24 25 | 25 26 | 26 27 | 27 28 | 28 29 | 29 30 | 30 31 | 31 32 | 32 33 | 33 34 | 34 35 | 35 36 | 36 37 | 37 38 | 38 39 | 39 40 | 40 41 | 41 42 | 42 43 | 43 44 | 44 45 | 45 46 | 46 47 | 47 48 | 48 49 | 49 50 | 50 51 | 51 52 | 52 53 | 53 54 | 54 55 | 55 56 | 56 57 | 57 58 | 58 59 | 59 60 | 60 61 | 61 62 | 62 63 | 63 64 | 64 65 | 65 66 | 66 67 | 67 68 | 68 69 | 69 70 | 70 71 | 71 72 | 72 73 | 73 74 | 74 75 | 75 76 | 76 77 | 77 78 | 78 79 | 79 80 | 80 81 | 81 82 | 82 83 | 83 84 | 84 85 | 85 86 | 86 87 | 87 88 | 88 89 | 89 90 | 90 91 | 91 92 | 92 93 | 93 94 | 94 95 | 95 96 | 96 97 | 97 98 | 98 99 | 99 100 | 100 101 | --------------------------------------------------------------------------------