├── .gitignore ├── README.md ├── agent ├── agent.go └── handler.go ├── cmd ├── agent.sh ├── main.go └── server.sh ├── common ├── base.go ├── config.go └── http.go ├── config ├── agent.yml └── server.yml ├── docs ├── create_task.md ├── images │ ├── create_task.svg │ └── trans_file.svg └── trans_file.md ├── flowctrl ├── flowctrl.go ├── io.go └── utils.go ├── go.mod ├── go.sum ├── misc ├── server.crt └── server.key ├── p2p ├── api.go ├── bitset.go ├── cache.go ├── file.go ├── listen.go ├── meta.go ├── osfile.go ├── peer.go ├── piece.go ├── report.go ├── session.go ├── sessionmgnt.go └── utils.go └── server ├── api.go ├── handler.go ├── server.go └── taskmgnt.go /.gitignore: -------------------------------------------------------------------------------- 1 | .log4g_tmp 2 | .DS_Store 3 | .idea 4 | vendor 5 | /**/*.tar.gz -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | GoFD 2 | ========== 3 | 4 | ## 简介 5 | 6 | GoFD是一个使用Go语言开发,集中控制的文件分发系统,用于在一个大规模业务系统文件分发。 7 | 系统采用C/S结构,S端负责接收分发任务,把任务下发给C端,C端之间采用P2P技术节点之间共享加速下载。 8 | P2P部分代码参考了[Taipei-Torrent](https://github.com/jackpal/Taipei-Torrent/)。 9 | P2P是一个简化版本的BT协议实现,只使用了其中四个消息(HAVE,REQUEST,PIECE, BITFIELD),并不与BT协议兼容。 10 | 11 | 与BT软件不一样的是,GoFD是一个P2SP系统,固定存在S端,用于做文件下载源,S端也是集中控制端。 12 | S端与BT的Tracker机制也不一样,它不会维护节点的已下载的文件信息。C端下载文件完成之后也不会再做为种子。 13 | 节点之间也没有BT的激励机制,所以没有CHOKE与UNCHOKE消息。 14 | 15 | 16 | ## 第三方依赖 17 | 18 | * Web框架:[echo](https://github.com/labstack/echo) 19 | * 日志:[log4g](https://github.com/xtfly/log4g) 20 | * 工具库(Cache,Crypto等):[gokits](https://github.com/xtfly/gokits) 21 | * 配置(YAML):[yaml.v2](https://gopkg.in/yaml.v2) 22 | 23 | ## 使用方式 24 | 25 | ### 下载依赖与GoFD 26 | 27 | 本工程采用`go mod`来管理第三方依赖 28 | 29 | ``` 30 | export GOPROXY=https://goproxy.cn 31 | go get github.com/xtfly/gofd 32 | ``` 33 | 34 | ### 修改配置 35 | 36 | #### 配置日志 37 | 38 | 日志是采用[log4g](https://github.com/xtfly/log4g) 。 39 | 40 | #### 配置Server与Agent 41 | 42 | GoFD的Server与Agent的配置采用Yaml格式,其中涉及到文件路径需要采用绝对路径,Server样例如下: 43 | 44 | ``` 45 | name: server #名称 46 | log: /Users/xiao/gofd/config/log4g.yaml #可选,日志配置文件绝对路径 47 | net: 48 | ip: 127.0.0.1 #监听的IP 49 | mgntPort: 45000 #管理端口,用于接收客户端的创建任务等Rest接口 50 | dataPort: 45001 #服务端数据下载端口 51 | agentMgntPort: 45010 #Agent端的管理端口,用于接收Server下载的管理Rest接口 52 | agentDataPort: 45011 #Agent端的数据下载端口 53 | tls: #管理端口的TLS配置,如果没有配置,则管理端口是采用HTTP 54 | cert: /Users/xiao/server.crt 55 | key: /Users/xiao/server.key 56 | auth: 57 | username: gofd #管理端口与数据端口用于认证的用户名 58 | password: yrsK+2iiwPqecImH7obTUm1vhnvvQzFmYYiOz5oqaoc= #管理端口与数据端口用于认证的密码 59 | factor: 9427e80d # passwd加密密钥因子 60 | control: 61 | speed: 10 # 流量控制,单位为MBps 62 | cacheSize: 50 # 文件下载的内存缓存大小,单位为MB 63 | maxActive: 10 # 并发的任务数 64 | ``` 65 | 66 | Agent配置样例如下,其中Agent需要配置`downdir`,用于存放下载的文件。`contorl.speed`不需要配置,由Server在创建任务时传给Agent。 67 | 68 | ``` 69 | name: agent 70 | downdir: /Users/xiao/download 71 | log: /Users/xiao/config/log4g.yaml 72 | net: 73 | ip: 127.0.0.1 74 | mgntPort: 45010 75 | dataPort: 45011 76 | tls: 77 | cert: /Users/xiao/server.crt 78 | key: /Users/xiao/server.key 79 | auth: 80 | username: gofd 81 | password: yrsK+2iiwPqecImH7obTUm1vhnvvQzFmYYiOz5oqaoc= 82 | factor: 9427e80d 83 | control: 84 | cacheSize: 50 # unit is MB 85 | maxActive: 10 86 | ``` 87 | 88 | 使用命令行`gofd -p `生成加密密钥因子,密码: 89 | 90 | $ gofd -p gofd 91 | factor = 28711f5d 92 | stxt = BkrjWALvWhXrLjVXQMUDzyEcX7UpAdDG+uoedDOfeVo= 93 | 94 | ### 启动Server 95 | 96 | $ gofd -s /Users/xiao/gofd/config/server.yml 97 | 98 | ### 启动Agent 99 | 100 | $ gofd -a /Users/xiao/gofd/config/agent.yml 101 | 102 | ## 基本流程 103 | 104 | ### 创建任务 105 | 106 | [点击查看图片](docs/images/create_task.svg) 107 | 108 | #### Agent之间文件分发 109 | 110 | [点击查看图片](docs/images/trans_file.svg) 111 | 112 | ## 测试 113 | 114 | * 创建分发任务 115 | 116 | curl -l --insecure --basic -u "gofd:gofd" -H "Content-type: application/json" -X POST -d '{"id":"1","dispatchFiles":["/Users/xiao/archlinux.tar.gz"],"destIPs":["127.0.0.1"]}' https://127.0.0.1:45000/api/v1/server/tasks 117 | 118 | * 查询分发任务 119 | 120 | curl -l --insecure --basic -u "gofd:gofd" -H "Content-type: application/json" -X GET https://127.0.0.1:45000/api/v1/server/tasks/1 121 | 122 | * 取消分发任务 123 | 124 | curl -l --insecure --basic -u "gofd:gofd" -H "Content-type: application/json" -X DELETE https://127.0.0.1:45000/api/v1/server/tasks/1 -------------------------------------------------------------------------------- /agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/labstack/echo/v4/middleware" 6 | "github.com/xtfly/gofd/common" 7 | "github.com/xtfly/gofd/p2p" 8 | ) 9 | 10 | // Agent is the p2p client 11 | type Agent struct { 12 | *common.BaseService 13 | sessionMgnt *p2p.TaskSessionMgnt 14 | } 15 | 16 | // NewAgent return created Agent instance 17 | func NewAgent(cfg *common.Config) (*Agent, error) { 18 | c := &Agent{ 19 | sessionMgnt: p2p.NewSessionMgnt(cfg), 20 | } 21 | c.BaseService = common.NewBaseService(cfg, cfg.Name, c) 22 | return c, nil 23 | } 24 | 25 | // OnStart implements the Service interface 26 | func (c *Agent) OnStart(cfg *common.Config, e *echo.Echo) error { 27 | go func() { c.sessionMgnt.Start() }() 28 | 29 | e.Use(middleware.BasicAuth(c.Auth)) 30 | e.POST("/api/v1/agent/tasks", c.CreateTask) 31 | e.POST("/api/v1/agent/tasks/start", c.StartTask) 32 | e.DELETE("/api/v1/agent/tasks/:id", c.CancelTask) 33 | 34 | return nil 35 | } 36 | 37 | // OnStop implements the Service interface 38 | func (c *Agent) OnStop(cfg *common.Config, e *echo.Echo) { 39 | c.sessionMgnt.Stop() 40 | } 41 | -------------------------------------------------------------------------------- /agent/handler.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "github.com/labstack/echo/v4" 5 | "github.com/labstack/gommon/log" 6 | "github.com/xtfly/gofd/common" 7 | "github.com/xtfly/gofd/p2p" 8 | ) 9 | 10 | // CreateTask POST /api/v1/agent/tasks 11 | func (svc *Agent) CreateTask(c echo.Context) (err error) { 12 | // 获取Body 13 | dt := new(p2p.DispatchTask) 14 | if err = c.Bind(dt); err != nil { 15 | log.Errorf("Recv '%s' request, decode body failed. %v", c.Request().URL, err) 16 | return 17 | } 18 | 19 | common.LOG.Infof("[%s] Recv create task request", dt.TaskID) 20 | // 暂不检查任务是否重复下发 21 | svc.sessionMgnt.CreateTask(dt) 22 | return nil 23 | } 24 | 25 | // StartTask POST /api/v1/agent/tasks/start 26 | func (svc *Agent) StartTask(c echo.Context) (err error) { 27 | // 获取Body 28 | st := new(p2p.StartTask) 29 | if err = c.Bind(st); err != nil { 30 | common.LOG.Errorf("Recv '%s' request, decode body failed. %v", c.Request().URL, err) 31 | return 32 | } 33 | 34 | common.LOG.Infof("[%s] Recv start task request", st.TaskID) 35 | // 暂不检查任务是否重复下发 36 | svc.sessionMgnt.StartTask(st) 37 | return nil 38 | } 39 | 40 | // CancelTask DELETE /api/v1/agent/tasks/:id 41 | func (svc *Agent) CancelTask(c echo.Context) error { 42 | id := c.Param("id") 43 | common.LOG.Infof("[%s] Recv cancel task request", id) 44 | svc.sessionMgnt.StopTask(id) 45 | return nil 46 | } 47 | -------------------------------------------------------------------------------- /cmd/agent.sh: -------------------------------------------------------------------------------- 1 | go run -race main.go -a ../config/agent.yml 2 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | 9 | "github.com/xtfly/gofd/agent" 10 | "github.com/xtfly/gofd/common" 11 | "github.com/xtfly/gofd/server" 12 | "github.com/xtfly/gokits/gcrypto" 13 | "github.com/xtfly/gokits/grand" 14 | ) 15 | 16 | var ( 17 | a = flag.Bool("a", false, "start as a agent") 18 | s = flag.Bool("s", false, "start as a server") 19 | p = flag.String("p", "", "create a password encrypted by AES128") 20 | ) 21 | 22 | func usage() { 23 | fmt.Println("gofd [<-a|-s> ] [-p ]") 24 | flag.PrintDefaults() 25 | os.Exit(2) 26 | } 27 | 28 | func main() { 29 | flag.Parse() 30 | if !*a && !*s && *p == "" { 31 | fmt.Println("miss option") 32 | usage() 33 | } 34 | 35 | if *p != "" { 36 | factor := grand.NewRand(8) 37 | crypto, _ := gcrypto.NewCrypto(factor) 38 | stxt, _ := crypto.EncryptStr(*p) 39 | fmt.Println("factor =", factor) 40 | fmt.Println("stxt =", stxt) 41 | return 42 | } 43 | 44 | if flag.NArg() < 1 { 45 | fmt.Println("miss config file") 46 | usage() 47 | } 48 | 49 | cfgfile := flag.Args()[0] 50 | var cfg *common.Config 51 | var err error 52 | if cfg, err = common.ParserConfig(cfgfile, *s); err != nil { 53 | fmt.Printf("parser config file %s error, %s.\n", cfgfile, err.Error()) 54 | os.Exit(3) 55 | } 56 | 57 | var svc common.Service 58 | if *s { 59 | if svc, err = server.NewServer(cfg); err != nil { 60 | fmt.Printf("start server error, %s.\n", err.Error()) 61 | os.Exit(4) 62 | } 63 | } 64 | 65 | if *a { 66 | if svc, err = agent.NewAgent(cfg); err != nil { 67 | fmt.Printf("start agent error, %s.\n", err.Error()) 68 | os.Exit(4) 69 | } 70 | } 71 | 72 | if err = svc.Start(); err != nil { 73 | fmt.Printf("Start service failed, %s.\n", err.Error()) 74 | os.Exit(4) 75 | } 76 | 77 | quitChan := listenSigInt() 78 | select { 79 | case <-quitChan: 80 | fmt.Printf("got control-C") 81 | svc.Stop() 82 | } 83 | } 84 | 85 | func listenSigInt() chan os.Signal { 86 | c := make(chan os.Signal, 1) 87 | signal.Notify(c, os.Interrupt, os.Kill) 88 | return c 89 | } 90 | -------------------------------------------------------------------------------- /cmd/server.sh: -------------------------------------------------------------------------------- 1 | go run -race main.go -s ../config/server.yml 2 | -------------------------------------------------------------------------------- /common/base.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/labstack/echo/v4" 11 | "github.com/xtfly/log4g" 12 | ) 13 | 14 | // Service is a common service interface 15 | type Service interface { 16 | Start() error 17 | Stop() bool 18 | OnStart(c *Config, e *echo.Echo) error 19 | OnStop(c *Config, e *echo.Echo) 20 | IsRunning() bool 21 | } 22 | 23 | // BaseService is the basic service struct with config and status 24 | type BaseService struct { 25 | name string 26 | running uint32 // atomic 27 | Cfg *Config 28 | echo *echo.Echo 29 | svc Service 30 | } 31 | 32 | // NewBaseService return created a basic service instance 33 | func NewBaseService(cfg *Config, name string, svc Service) *BaseService { 34 | return &BaseService{ 35 | name: name, 36 | running: 0, 37 | Cfg: cfg, 38 | echo: echo.New(), 39 | svc: svc, 40 | } 41 | } 42 | 43 | // init log by config 44 | func (s *BaseService) initlog() { 45 | if s.Cfg.Log != "" { 46 | if err := log4g.GetManager().LoadConfigFile(s.Cfg.Log); err != nil { 47 | println("load log4g config failed") 48 | } 49 | } 50 | } 51 | 52 | func (s *BaseService) runEcho() (err error) { 53 | net := s.Cfg.Net 54 | addr := fmt.Sprintf("%s:%v", net.IP, net.MgntPort) 55 | LOG.Infof("Starting http server %s", addr) 56 | if net.TLS != nil { 57 | err = s.echo.StartTLS(addr, net.TLS.Cert, net.TLS.Key) 58 | } else { 59 | err = s.echo.Start(addr) 60 | } 61 | 62 | if err != nil { 63 | LOG.Infof("Start http server %s failed, %v", addr, err) 64 | return err 65 | } 66 | return nil 67 | } 68 | 69 | // Start the service 70 | func (s *BaseService) Start() error { 71 | if atomic.CompareAndSwapUint32(&s.running, 0, 1) { 72 | s.initlog() 73 | LOG.Infof("Starting %s", s.name) 74 | if err := s.svc.OnStart(s.Cfg, s.echo); err != nil { 75 | return err 76 | } 77 | 78 | done := make(chan error) 79 | go func() { 80 | done <- s.runEcho() 81 | }() 82 | select { 83 | case err := <-done: 84 | return err 85 | case <-time.After(500 * time.Millisecond): 86 | return nil 87 | } 88 | } 89 | return errors.New("Started aleadry.") 90 | } 91 | 92 | // OnStart implements Service 93 | func (s *BaseService) OnStart(c *Config, e *echo.Echo) error { return nil } 94 | 95 | // Stop the service 96 | func (s *BaseService) Stop() bool { 97 | if atomic.CompareAndSwapUint32(&s.running, 1, 0) { 98 | LOG.Infof("Stopping %s", s.name) 99 | s.svc.OnStop(s.Cfg, s.echo) 100 | ctx, cancel := context.WithTimeout(context.Background(), time.Second) 101 | defer cancel() 102 | if err := s.echo.Shutdown(ctx); err != nil { 103 | LOG.Error("shutdown echo server failed ", err) 104 | } 105 | return true 106 | } 107 | return false 108 | } 109 | 110 | // OnStop implements Service 111 | func (s *BaseService) OnStop(c *Config, e *echo.Echo) {} 112 | 113 | // IsRunning implements Service 114 | func (s *BaseService) IsRunning() bool { 115 | return atomic.LoadUint32(&s.running) == 1 116 | } 117 | 118 | // Auth using basic authorization 119 | func (s *BaseService) Auth(u, p string, ctx echo.Context) (bool, error) { 120 | if u == s.Cfg.Auth.Username && p == s.Cfg.Auth.Password { 121 | return true, nil 122 | } 123 | return false, nil 124 | } 125 | -------------------------------------------------------------------------------- /common/config.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io/ioutil" 7 | "os" 8 | "path/filepath" 9 | 10 | "github.com/xtfly/gokits/gcrypto" 11 | "github.com/xtfly/log4g" 12 | 13 | "gopkg.in/yaml.v2" 14 | ) 15 | 16 | var ( 17 | LOG = log4g.GetLogger("gofd") 18 | ) 19 | 20 | // Config is struct maping the yaml configuration file 21 | type Config struct { 22 | Server bool //是否为服务端 23 | Crypto *gcrypto.Crypto 24 | 25 | Name string `yaml:"name"` 26 | DownDir string `yaml:"downdir,omitempty"` //只有客户端才配置 27 | Log string `yaml:"log"` 28 | 29 | Net struct { 30 | IP string `yaml:"ip"` 31 | MgntPort int `yaml:"mgntPort"` 32 | DataPort int `yaml:"dataPort"` 33 | 34 | AgentMgntPort int `yaml:"agentMgntPort,omitempty"` 35 | AgentDataPort int `yaml:"agentDataPort,omitempty"` 36 | 37 | TLS *struct { 38 | Cert string `yaml:"cert"` 39 | Key string `yaml:"key"` 40 | } `yaml:"tls,omitempty"` 41 | } `yaml:"net"` 42 | 43 | Auth struct { 44 | Username string `yaml:"username"` 45 | Password string `yaml:"password"` 46 | Factor string `yaml:"factor"` 47 | } `yaml:"auth"` 48 | 49 | Control *Control `yaml:"control"` 50 | } 51 | 52 | // Control is some config item for controling the p2p session 53 | type Control struct { 54 | Speed int `yaml:"speed"` // Unit: MiBps 55 | MaxActive int `yaml:"maxActive"` // 56 | CacheSize int `yaml:"cacheSize"` // Unit: MiB 57 | } 58 | 59 | func normalFile(dir string) string { 60 | if !filepath.IsAbs(dir) { 61 | pwd, _ := os.Getwd() 62 | dir = filepath.Join(pwd, dir) 63 | dir, _ = filepath.Abs(dir) 64 | dir = filepath.Clean(dir) 65 | return dir 66 | } 67 | return dir 68 | } 69 | 70 | func (c *Config) defaultValue() { 71 | c.DownDir = normalFile(c.DownDir) 72 | f, err := os.Stat(c.DownDir) 73 | if err == nil || !os.IsExist(err) { 74 | if err := os.MkdirAll(c.DownDir, os.ModePerm); err != nil { 75 | fmt.Printf("mkdir %s failed", c.DownDir) 76 | os.Exit(6) 77 | } 78 | } else { 79 | if !f.IsDir() { 80 | fmt.Printf("%s is not a directory", c.DownDir) 81 | os.Exit(6) 82 | } 83 | } 84 | 85 | if c.Log != "" { 86 | c.Log = normalFile(c.Log) 87 | } 88 | 89 | if c.Net.TLS != nil { 90 | c.Net.TLS.Cert = normalFile(c.Net.TLS.Cert) 91 | c.Net.TLS.Key = normalFile(c.Net.TLS.Key) 92 | } 93 | 94 | if c.Control == nil { 95 | c.Control = &Control{Speed: 10, MaxActive: 5, CacheSize: 25} 96 | } 97 | 98 | if c.Control.Speed == 0 { 99 | c.Control.Speed = 20 100 | } 101 | if c.Control.MaxActive == 0 { 102 | c.Control.MaxActive = 5 103 | } 104 | if c.Control.CacheSize == 0 { 105 | c.Control.CacheSize = 25 106 | } 107 | } 108 | 109 | func (c *Config) validate() error { 110 | if c.Server { 111 | if c.Net.AgentMgntPort == 0 { 112 | return errors.New("not set Net.AgentMgntPort in server config file") 113 | } 114 | if c.Net.AgentDataPort == 0 { 115 | return errors.New("not set Net.AgentDataPort in server config file") 116 | } 117 | } 118 | 119 | if !c.Server { 120 | if c.DownDir == "" { 121 | return errors.New("Not set DownDir in client config file") 122 | } 123 | } 124 | 125 | if c.Auth.Username == "" || c.Auth.Password == "" || c.Auth.Factor == "" { 126 | return errors.New("not set auth in config file") 127 | } 128 | 129 | var err error 130 | c.Crypto, err = gcrypto.NewCrypto(c.Auth.Factor) 131 | if err != nil { 132 | return err 133 | } 134 | c.Auth.Password, err = c.Crypto.DecryptStr(c.Auth.Password) 135 | if err != nil { 136 | return err 137 | } 138 | 139 | return nil 140 | } 141 | 142 | // ParserConfig return the Config instance when parse the configuration file 143 | func ParserConfig(cfgfile string, server bool) (*Config, error) { 144 | ncfg := normalFile(cfgfile) 145 | bs, err := ioutil.ReadFile(ncfg) 146 | if err != nil { 147 | return nil, err 148 | } 149 | cfg := new(Config) 150 | cfg.Server = server 151 | if err := yaml.Unmarshal(bs, cfg); err != nil { 152 | return nil, err 153 | } 154 | 155 | if err := cfg.validate(); err != nil { 156 | return nil, err 157 | } 158 | 159 | cfg.defaultValue() 160 | return cfg, nil 161 | } 162 | -------------------------------------------------------------------------------- /common/http.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "bytes" 5 | "crypto/tls" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | // HTTPGet return the body of the response when send http get method to the server 14 | func (s *BaseService) HTTPGet(addr, urlpath string) (rspBody []byte, err error) { 15 | return SendHTTPReq(s.Cfg, "GET", addr, urlpath, nil) 16 | } 17 | 18 | // HTTPPost return the body of the response when send http get method to the server 19 | func (s *BaseService) HTTPPost(addr, urlpath string, reqBody []byte) (rspBody []byte, err error) { 20 | return SendHTTPReq(s.Cfg, "POST", addr, urlpath, reqBody) 21 | } 22 | 23 | // HTTPDelete return the body of the response when send http get method to the server 24 | func (s *BaseService) HTTPDelete(addr, urlpath string) (err error) { 25 | _, err = SendHTTPReq(s.Cfg, "DELETE", addr, urlpath, nil) 26 | return 27 | } 28 | 29 | // CreateHTTPClient return a http client instannce 30 | func CreateHTTPClient(cfg *Config) *http.Client { 31 | var client *http.Client 32 | tr := &http.Transport{ 33 | TLSHandshakeTimeout: 10 * time.Second, 34 | ExpectContinueTimeout: 1 * time.Second, 35 | MaxIdleConnsPerHost: 1, 36 | DisableKeepAlives: true, 37 | } 38 | if cfg.Net.TLS != nil { 39 | tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} 40 | } 41 | client = &http.Client{Transport: tr} 42 | return client 43 | } 44 | 45 | // SendHTTPReqWithClient ... 46 | func SendHTTPReqWithClient(client *http.Client, cfg *Config, method, addr, urlpath string, reqBody []byte) (rspBody []byte, err error) { 47 | schema := "http" 48 | if cfg.Net.TLS != nil { 49 | schema = "https" 50 | } 51 | 52 | if cfg.Server && !strings.Contains(addr, ":") { 53 | addr = fmt.Sprintf("%s:%v", addr, cfg.Net.AgentMgntPort) 54 | } 55 | 56 | url := fmt.Sprintf("%s://%s%s", schema, addr, urlpath) 57 | req, err := http.NewRequest(method, url, bytes.NewReader(reqBody)) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | req.SetBasicAuth(cfg.Auth.Username, cfg.Auth.Password) 63 | req.Header.Set("Content-Type", "application/json") 64 | //log.Debugf("Sending http request %v", req) 65 | 66 | client.Timeout = 2 * time.Second 67 | resp, err := client.Do(req) 68 | if err != nil { 69 | return nil, err 70 | } 71 | defer resp.Body.Close() 72 | 73 | if resp.StatusCode > 300 { 74 | return nil, fmt.Errorf("Recv http status code %v", resp.StatusCode) 75 | } 76 | 77 | if resp.ContentLength > 0 { 78 | rspBody, err = ioutil.ReadAll(resp.Body) 79 | } 80 | return 81 | } 82 | 83 | // SendHTTPReq ... 84 | func SendHTTPReq(cfg *Config, method, addr, urlpath string, reqBody []byte) (rspBody []byte, err error) { 85 | client := CreateHTTPClient(cfg) 86 | return SendHTTPReqWithClient(client, cfg, method, addr, urlpath, reqBody) 87 | } 88 | -------------------------------------------------------------------------------- /config/agent.yml: -------------------------------------------------------------------------------- 1 | name: agent 2 | downdir: ../download 3 | net: 4 | ip: 127.0.0.1 5 | mgntPort: 45010 6 | dataPort: 45011 7 | tls: 8 | cert: ../misc/server.crt 9 | key: ../misc/server.key 10 | auth: 11 | username: gofd 12 | password: LFFex2mDcetFDnghcZqpz0z/zbzYhRLBj58fnAs45uc= 13 | factor: 802cbde8 14 | control: 15 | cacheSize: 50 # unit is MB 16 | maxActive: 10 -------------------------------------------------------------------------------- /config/server.yml: -------------------------------------------------------------------------------- 1 | name: server 2 | net: 3 | ip: 127.0.0.1 4 | mgntPort: 45000 5 | dataPort: 45001 6 | agentMgntPort: 45010 7 | agentDataPort: 45011 8 | tls: 9 | cert: ../misc/server.crt 10 | key: ../misc/server.key 11 | auth: 12 | username: gofd 13 | password: LFFex2mDcetFDnghcZqpz0z/zbzYhRLBj58fnAs45uc= 14 | factor: 802cbde8 15 | control: 16 | speed: 50 # unit is MB 17 | cacheSize: 50 # unit is MB 18 | maxActive: 10 19 | -------------------------------------------------------------------------------- /docs/create_task.md: -------------------------------------------------------------------------------- 1 | ```sequence 2 | Client->GoFD Server: POST /api/v1/server/tasks 3 | GoFD Server-->Client: 200 OK 4 | GoFD Server->GoFD Agent1: POST /api/v1/agent/tasks 5 | GoFD Agent1-->GoFD Server: 200 OK 6 | GoFD Server->GoFD Agent2: POST /api/v1/agent/tasks 7 | GoFD Agent2-->GoFD Server: 200 OK 8 | GoFD Server->GoFD Agent1: POST /api/v1/agent/tasks/start 9 | GoFD Agent1-->GoFD Server: 200 OK 10 | GoFD Server->GoFD Agent2: POST /api/v1/agent/tasks/start 11 | GoFD Agent2-->GoFD Server: 200 OK 12 | GoFD Agent1--GoFD Agent2: p2p translate file 13 | GoFD Agent1->GoFD Server: POST /api/v1/server/tasks/status 14 | GoFD Server-->GoFD Agent1: 200 OK 15 | GoFD Agent2->GoFD Server: POST /api/v1/server/tasks/status 16 | GoFD Server-->GoFD Agent2: 200 OK 17 | ``` -------------------------------------------------------------------------------- /docs/images/create_task.svg: -------------------------------------------------------------------------------- 1 | Client->GoFD Server: POST /api/v1/server/tasks 2 | GoFD Server-->Client: 200 OK 3 | GoFD Server->GoFD Agent1: POST /api/v1/agent/tasks 4 | GoFD Agent1-->GoFD Server: 200 OK 5 | GoFD Server->GoFD Agent2: POST /api/v1/agent/tasks 6 | GoFD Agent2-->GoFD Server: 200 OK 7 | GoFD Server->GoFD Agent1: POST /api/v1/agent/tasks/start 8 | GoFD Agent1-->GoFD Server: 200 OK 9 | GoFD Server->GoFD Agent2: POST /api/v1/agent/tasks/start 10 | GoFD Agent2-->GoFD Server: 200 OK 11 | GoFD Agent1--GoFD Agent2: p2p translate file 12 | GoFD Agent1->GoFD Server: POST /api/v1/server/tasks/status 13 | GoFD Server-->GoFD Agent1: 200 OK 14 | GoFD Agent2->GoFD Server: POST /api/v1/server/tasks/status 15 | GoFD Server-->GoFD Agent2: 200 OKCreated with Raphaël 2.1.2ClientClientGoFD ServerGoFD ServerGoFD Agent1GoFD Agent1GoFD Agent2GoFD Agent2POST /api/v1/server/tasks200 OKPOST /api/v1/agent/tasks200 OKPOST /api/v1/agent/tasks200 OKPOST /api/v1/agent/tasks/start200 OKPOST /api/v1/agent/tasks/start200 OKp2p translate filePOST /api/v1/server/tasks/status200 OKPOST /api/v1/server/tasks/status200 OK -------------------------------------------------------------------------------- /docs/images/trans_file.svg: -------------------------------------------------------------------------------- 1 | participant Agent1 2 | participant Agent2 3 | participant Agent3 4 | Agent2->Agent1: TCP Connect(with auth header) 5 | Agent1->Agent2: 0xFF 6 | Agent2->Agent1: send BITFIELD 7 | Agent1->Agent2: send BITFIELD 8 | Agent3->Agent2: TCP Connect(with auth header) 9 | Agent2->Agent3: 0xFF 10 | Agent3->Agent2: send BITFIELD 11 | Agent2->Agent3: send BITFIELD 12 | Agent2->Agent1: send REQUEST 13 | Agent1->Agent2: send PIECE 14 | Agent2->Agent3: send HAVE 15 | Agent3->Agent2: send REQUEST 16 | Agent2->Agent3: send PIECECreated with Raphaël 2.1.2Agent1Agent1Agent2Agent2Agent3Agent3TCP Connect(with auth header)0xFFsend BITFIELDsend BITFIELDTCP Connect(with auth header)0xFFsend BITFIELDsend BITFIELDsend REQUESTsend PIECEsend HAVEsend REQUESTsend PIECE -------------------------------------------------------------------------------- /docs/trans_file.md: -------------------------------------------------------------------------------- 1 | ```sequence 2 | participant Agent1 3 | participant Agent2 4 | participant Agent3 5 | Agent2->Agent1: TCP Connect(with auth header) 6 | Agent1->Agent2: 0xFF 7 | Agent2->Agent1: send BITFIELD 8 | Agent1->Agent2: send BITFIELD 9 | Agent3->Agent2: TCP Connect(with auth header) 10 | Agent2->Agent3: 0xFF 11 | Agent3->Agent2: send BITFIELD 12 | Agent2->Agent3: send BITFIELD 13 | Agent2->Agent1: send REQUEST 14 | Agent1->Agent2: send PIECE 15 | Agent2->Agent3: send HAVE 16 | Agent3->Agent2: send REQUEST 17 | Agent2->Agent3: send PIECE 18 | ``` -------------------------------------------------------------------------------- /flowctrl/flowctrl.go: -------------------------------------------------------------------------------- 1 | // 2 | // Written by Maxim Khitrov (November 2012) 3 | // 4 | 5 | // Package flowcontrol provides the tools for monitoring and limiting the 6 | // transfer rate of an arbitrary data stream. 7 | package flowctrl 8 | 9 | import ( 10 | "math" 11 | "sync" 12 | "time" 13 | ) 14 | 15 | // Monitor monitors and limits the transfer rate of a data stream. 16 | type Monitor struct { 17 | mu sync.Mutex // Mutex guarding access to all internal fields 18 | active bool // Flag indicating an active transfer 19 | start time.Duration // Transfer start time (clock() value) 20 | bytes int64 // Total number of bytes transferred 21 | samples int64 // Total number of samples taken 22 | 23 | rSample float64 // Most recent transfer rate sample (bytes per second) 24 | rEMA float64 // Exponential moving average of rSample 25 | rPeak float64 // Peak transfer rate (max of all rSamples) 26 | rWindow float64 // rEMA window (seconds) 27 | 28 | sBytes int64 // Number of bytes transferred since sLast 29 | sLast time.Duration // Most recent sample time (stop time when inactive) 30 | sRate time.Duration // Sampling rate 31 | 32 | tBytes int64 // Number of bytes expected in the current transfer 33 | tLast time.Duration // Time of the most recent transfer of at least 1 byte 34 | } 35 | 36 | // New creates a new flow control monitor. Instantaneous transfer rate is 37 | // measured and updated for each sampleRate interval. windowSize determines the 38 | // weight of each sample in the exponential moving average (EMA) calculation. 39 | // The exact formulas are: 40 | // 41 | // sampleTime = currentTime - prevSampleTime 42 | // sampleRate = byteCount / sampleTime 43 | // weight = 1 - exp(-sampleTime/windowSize) 44 | // newRate = weight*sampleRate + (1-weight)*oldRate 45 | // 46 | // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, 47 | // respectively. 48 | func New(sampleRate, windowSize time.Duration) *Monitor { 49 | if sampleRate = clockRound(sampleRate); sampleRate <= 0 { 50 | sampleRate = 5 * clockRate 51 | } 52 | if windowSize <= 0 { 53 | windowSize = 1 * time.Second 54 | } 55 | now := clock() 56 | return &Monitor{ 57 | active: true, 58 | start: now, 59 | rWindow: windowSize.Seconds(), 60 | sLast: now, 61 | sRate: sampleRate, 62 | tLast: now, 63 | } 64 | } 65 | 66 | // Update records the transfer of n bytes and returns n. It should be called 67 | // after each Read/Write operation, even if n is 0. 68 | func (m *Monitor) Update(n int) int { 69 | m.mu.Lock() 70 | m.update(n) 71 | m.mu.Unlock() 72 | return n 73 | } 74 | 75 | // Hack to set the current rEMA. 76 | func (m *Monitor) SetREMA(rEMA float64) { 77 | m.mu.Lock() 78 | m.rEMA = rEMA 79 | m.samples++ 80 | m.mu.Unlock() 81 | } 82 | 83 | // IO is a convenience method intended to wrap io.Reader and io.Writer method 84 | // execution. It calls m.Update(n) and then returns (n, err) unmodified. 85 | func (m *Monitor) IO(n int, err error) (int, error) { 86 | return m.Update(n), err 87 | } 88 | 89 | // Done marks the transfer as finished and prevents any further updates or 90 | // limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and 91 | // Limit methods become NOOPs. It returns the total number of bytes transferred. 92 | func (m *Monitor) Done() int64 { 93 | m.mu.Lock() 94 | if now := m.update(0); m.sBytes > 0 { 95 | m.reset(now) 96 | } 97 | m.active = false 98 | m.tLast = 0 99 | n := m.bytes 100 | m.mu.Unlock() 101 | return n 102 | } 103 | 104 | // timeRemLimit is the maximum Status.TimeRem value. 105 | const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second 106 | 107 | // Status represents the current Monitor status. All transfer rates are in bytes 108 | // per second rounded to the nearest byte. 109 | type Status struct { 110 | Active bool // Flag indicating an active transfer 111 | Start time.Time // Transfer start time 112 | Duration time.Duration // Time period covered by the statistics 113 | Idle time.Duration // Time since the last transfer of at least 1 byte 114 | Bytes int64 // Total number of bytes transferred 115 | Samples int64 // Total number of samples taken 116 | InstRate int64 // Instantaneous transfer rate 117 | CurRate int64 // Current transfer rate (EMA of InstRate) 118 | AvgRate int64 // Average transfer rate (Bytes / Duration) 119 | PeakRate int64 // Maximum instantaneous transfer rate 120 | BytesRem int64 // Number of bytes remaining in the transfer 121 | TimeRem time.Duration // Estimated time to completion 122 | Progress Percent // Overall transfer progress 123 | } 124 | 125 | // Status returns current transfer status information. The returned value 126 | // becomes static after a call to Done. 127 | func (m *Monitor) Status() Status { 128 | m.mu.Lock() 129 | now := m.update(0) 130 | s := Status{ 131 | Active: m.active, 132 | Start: clockToTime(m.start), 133 | Duration: m.sLast - m.start, 134 | Idle: now - m.tLast, 135 | Bytes: m.bytes, 136 | Samples: m.samples, 137 | PeakRate: round(m.rPeak), 138 | BytesRem: m.tBytes - m.bytes, 139 | Progress: percentOf(float64(m.bytes), float64(m.tBytes)), 140 | } 141 | if s.BytesRem < 0 { 142 | s.BytesRem = 0 143 | } 144 | if s.Duration > 0 { 145 | rAvg := float64(s.Bytes) / s.Duration.Seconds() 146 | s.AvgRate = round(rAvg) 147 | if s.Active { 148 | s.InstRate = round(m.rSample) 149 | s.CurRate = round(m.rEMA) 150 | if s.BytesRem > 0 { 151 | if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { 152 | ns := float64(s.BytesRem) / tRate * 1e9 153 | if ns > float64(timeRemLimit) { 154 | ns = float64(timeRemLimit) 155 | } 156 | s.TimeRem = clockRound(time.Duration(ns)) 157 | } 158 | } 159 | } 160 | } 161 | m.mu.Unlock() 162 | return s 163 | } 164 | 165 | // Limit restricts the instantaneous (per-sample) data flow to rate bytes per 166 | // second. It returns the maximum number of bytes (0 <= n <= want) that may be 167 | // transferred immediately without exceeding the limit. If block == true, the 168 | // call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, 169 | // or the transfer is inactive (after a call to Done). 170 | // 171 | // At least one byte is always allowed to be transferred in any given sampling 172 | // period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate 173 | // is 10 bytes per second. 174 | // 175 | // For usage examples, see the implementation of Reader and Writer in io.go. 176 | func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { 177 | if want < 1 || rate < 1 { 178 | return want 179 | } 180 | m.mu.Lock() 181 | 182 | // Determine the maximum number of bytes that can be sent in one sample 183 | limit := round(float64(rate) * m.sRate.Seconds()) 184 | if limit <= 0 { 185 | limit = 1 186 | } 187 | 188 | // If block == true, wait until m.sBytes < limit 189 | if now := m.update(0); block { 190 | for m.sBytes >= limit && m.active { 191 | now = m.waitNextSample(now) 192 | } 193 | } 194 | 195 | // Make limit <= want (unlimited if the transfer is no longer active) 196 | if limit -= m.sBytes; limit > int64(want) || !m.active { 197 | limit = int64(want) 198 | } 199 | m.mu.Unlock() 200 | 201 | if limit < 0 { 202 | limit = 0 203 | } 204 | return int(limit) 205 | } 206 | 207 | // SetTransferSize specifies the total size of the data transfer, which allows 208 | // the Monitor to calculate the overall progress and time to completion. 209 | func (m *Monitor) SetTransferSize(bytes int64) { 210 | if bytes < 0 { 211 | bytes = 0 212 | } 213 | m.mu.Lock() 214 | m.tBytes = bytes 215 | m.mu.Unlock() 216 | } 217 | 218 | // update accumulates the transferred byte count for the current sample until 219 | // clock() - m.sLast >= m.sRate. The monitor status is updated once the current 220 | // sample is done. 221 | func (m *Monitor) update(n int) (now time.Duration) { 222 | if !m.active { 223 | return 224 | } 225 | if now = clock(); n > 0 { 226 | m.tLast = now 227 | } 228 | m.sBytes += int64(n) 229 | if sTime := now - m.sLast; sTime >= m.sRate { 230 | t := sTime.Seconds() 231 | if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { 232 | m.rPeak = m.rSample 233 | } 234 | 235 | // Exponential moving average using a method similar to *nix load 236 | // average calculation. Longer sampling periods carry greater weight. 237 | if m.samples > 0 { 238 | w := math.Exp(-t / m.rWindow) 239 | m.rEMA = m.rSample + w*(m.rEMA-m.rSample) 240 | } else { 241 | m.rEMA = m.rSample 242 | } 243 | m.reset(now) 244 | } 245 | return 246 | } 247 | 248 | // reset clears the current sample state in preparation for the next sample. 249 | func (m *Monitor) reset(sampleTime time.Duration) { 250 | m.bytes += m.sBytes 251 | m.samples++ 252 | m.sBytes = 0 253 | m.sLast = sampleTime 254 | } 255 | 256 | // waitNextSample sleeps for the remainder of the current sample. The lock is 257 | // released and reacquired during the actual sleep period, so it's possible for 258 | // the transfer to be inactive when this method returns. 259 | func (m *Monitor) waitNextSample(now time.Duration) time.Duration { 260 | const minWait = 5 * time.Millisecond 261 | current := m.sLast 262 | 263 | // sleep until the last sample time changes (ideally, just one iteration) 264 | for m.sLast == current && m.active { 265 | d := current + m.sRate - now 266 | m.mu.Unlock() 267 | if d < minWait { 268 | d = minWait 269 | } 270 | time.Sleep(d) 271 | m.mu.Lock() 272 | now = m.update(0) 273 | } 274 | return now 275 | } 276 | -------------------------------------------------------------------------------- /flowctrl/io.go: -------------------------------------------------------------------------------- 1 | // 2 | // Written by Maxim Khitrov (November 2012) 3 | // 4 | 5 | package flowctrl 6 | 7 | import ( 8 | "errors" 9 | "io" 10 | ) 11 | 12 | // ErrLimit is returned by the Writer when a non-blocking write is short due to 13 | // the transfer rate limit. 14 | var ErrLimit = errors.New("flowcontrol: transfer rate limit exceeded") 15 | 16 | // Limiter is implemented by the Reader and Writer to provide a consistent 17 | // interface for monitoring and controlling data transfer. 18 | type Limiter interface { 19 | Done() int64 20 | Status() Status 21 | SetTransferSize(bytes int64) 22 | SetLimit(new int64) (old int64) 23 | SetBlocking(new bool) (old bool) 24 | } 25 | 26 | // Reader implements io.ReadCloser with a restriction on the rate of data 27 | // transfer. 28 | type Reader struct { 29 | io.Reader // Data source 30 | *Monitor // Flow control monitor 31 | 32 | limit int64 // Rate limit in bytes per second (unlimited when <= 0) 33 | block bool // What to do when no new bytes can be read due to the limit 34 | } 35 | 36 | // NewReader restricts all Read operations on r to limit bytes per second. 37 | func NewReader(r io.Reader, limit int64) *Reader { 38 | return &Reader{r, New(0, 0), limit, true} 39 | } 40 | 41 | // Read reads up to len(p) bytes into p without exceeding the current transfer 42 | // rate limit. It returns (0, nil) immediately if r is non-blocking and no new 43 | // bytes can be read at this time. 44 | func (r *Reader) Read(p []byte) (n int, err error) { 45 | p = p[:r.Limit(len(p), r.limit, r.block)] 46 | if len(p) > 0 { 47 | n, err = r.IO(r.Reader.Read(p)) 48 | } 49 | return 50 | } 51 | 52 | // SetLimit changes the transfer rate limit to new bytes per second and returns 53 | // the previous setting. 54 | func (r *Reader) SetLimit(new int64) (old int64) { 55 | old, r.limit = r.limit, new 56 | return 57 | } 58 | 59 | // SetBlocking changes the blocking behavior and returns the previous setting. A 60 | // Read call on a non-blocking reader returns immediately if no additional bytes 61 | // may be read at this time due to the rate limit. 62 | func (r *Reader) SetBlocking(new bool) (old bool) { 63 | old, r.block = r.block, new 64 | return 65 | } 66 | 67 | // Close closes the underlying reader if it implements the io.Closer interface. 68 | func (r *Reader) Close() error { 69 | defer r.Done() 70 | if c, ok := r.Reader.(io.Closer); ok { 71 | return c.Close() 72 | } 73 | return nil 74 | } 75 | 76 | // Writer implements io.WriteCloser with a restriction on the rate of data 77 | // transfer. 78 | type Writer struct { 79 | io.Writer // Data destination 80 | *Monitor // Flow control monitor 81 | 82 | limit int64 // Rate limit in bytes per second (unlimited when <= 0) 83 | block bool // What to do when no new bytes can be written due to the limit 84 | } 85 | 86 | // NewWriter restricts all Write operations on w to limit bytes per second. The 87 | // transfer rate and the default blocking behavior (true) can be changed 88 | // directly on the returned *Writer. 89 | func NewWriter(w io.Writer, limit int64) *Writer { 90 | return &Writer{w, New(0, 0), limit, true} 91 | } 92 | 93 | // Write writes len(p) bytes from p to the underlying data stream without 94 | // exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is 95 | // non-blocking and no additional bytes can be written at this time. 96 | func (w *Writer) Write(p []byte) (n int, err error) { 97 | var c int 98 | for len(p) > 0 && err == nil { 99 | s := p[:w.Limit(len(p), w.limit, w.block)] 100 | if len(s) > 0 { 101 | c, err = w.IO(w.Writer.Write(s)) 102 | } else { 103 | return n, ErrLimit 104 | } 105 | p = p[c:] 106 | n += c 107 | } 108 | return 109 | } 110 | 111 | // SetLimit changes the transfer rate limit to new bytes per second and returns 112 | // the previous setting. 113 | func (w *Writer) SetLimit(new int64) (old int64) { 114 | old, w.limit = w.limit, new 115 | return 116 | } 117 | 118 | // SetBlocking changes the blocking behavior and returns the previous setting. A 119 | // Write call on a non-blocking writer returns as soon as no additional bytes 120 | // may be written at this time due to the rate limit. 121 | func (w *Writer) SetBlocking(new bool) (old bool) { 122 | old, w.block = w.block, new 123 | return 124 | } 125 | 126 | // Close closes the underlying writer if it implements the io.Closer interface. 127 | func (w *Writer) Close() error { 128 | defer w.Done() 129 | if c, ok := w.Writer.(io.Closer); ok { 130 | return c.Close() 131 | } 132 | return nil 133 | } 134 | -------------------------------------------------------------------------------- /flowctrl/utils.go: -------------------------------------------------------------------------------- 1 | // 2 | // Written by Maxim Khitrov (November 2012) 3 | // 4 | 5 | package flowctrl 6 | 7 | import ( 8 | "math" 9 | "strconv" 10 | "time" 11 | ) 12 | 13 | // clockRate is the resolution and precision of clock(). 14 | const clockRate = 20 * time.Millisecond 15 | 16 | // czero is the process start time rounded down to the nearest clockRate 17 | // increment. 18 | var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate 19 | 20 | // clock returns a low resolution timestamp relative to the process start time. 21 | func clock() time.Duration { 22 | return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero 23 | } 24 | 25 | // clockToTime converts a clock() timestamp to an absolute time.Time value. 26 | func clockToTime(c time.Duration) time.Time { 27 | return time.Unix(0, int64(czero+c)) 28 | } 29 | 30 | // clockRound returns d rounded to the nearest clockRate increment. 31 | func clockRound(d time.Duration) time.Duration { 32 | return (d + clockRate>>1) / clockRate * clockRate 33 | } 34 | 35 | // round returns x rounded to the nearest int64 (non-negative values only). 36 | func round(x float64) int64 { 37 | if _, frac := math.Modf(x); frac >= 0.5 { 38 | return int64(math.Ceil(x)) 39 | } 40 | return int64(math.Floor(x)) 41 | } 42 | 43 | // Percent represents a percentage in increments of 1/1000th of a percent. 44 | type Percent uint32 45 | 46 | // percentOf calculates what percent of the total is x. 47 | func percentOf(x, total float64) Percent { 48 | if x < 0 || total <= 0 { 49 | return 0 50 | } else if p := round(x / total * 1e5); p <= math.MaxUint32 { 51 | return Percent(p) 52 | } 53 | return Percent(math.MaxUint32) 54 | } 55 | 56 | func (p Percent) Float() float64 { 57 | return float64(p) * 1e-3 58 | } 59 | 60 | func (p Percent) String() string { 61 | var buf [12]byte 62 | b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) 63 | n := len(b) 64 | b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) 65 | b[n] = '.' 66 | return string(append(b, '%')) 67 | } 68 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/xtfly/gofd 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/labstack/echo/v4 v4.1.6 7 | github.com/labstack/gommon v0.2.9 8 | github.com/xtfly/gokits v0.1.1 9 | github.com/xtfly/log4g v0.1.0 10 | gopkg.in/yaml.v2 v2.2.2 11 | ) 12 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 3 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 4 | github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= 5 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 6 | github.com/labstack/echo/v4 v4.1.6 h1:WOvLa4T1KzWCRpANwz0HGgWDelXSSGwIKtKBbFdHTv4= 7 | github.com/labstack/echo/v4 v4.1.6/go.mod h1:kU/7PwzgNxZH4das4XNsSpBSOD09XIF5YEPzjpkGnGE= 8 | github.com/labstack/gommon v0.2.9 h1:heVeuAYtevIQVYkGj6A41dtfT91LrvFG220lavpWhrU= 9 | github.com/labstack/gommon v0.2.9/go.mod h1:E8ZTmW9vw5az5/ZyHWCp0Lw4OH2ecsaBP1C/NKavGG4= 10 | github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= 11 | github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= 12 | github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= 13 | github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 14 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 15 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 16 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 17 | github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= 18 | github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= 19 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 20 | github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= 21 | github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= 22 | github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= 23 | github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= 24 | github.com/xtfly/gokits v0.1.1 h1:DGlLwM025A9NtduadoqdFsfhIH9KL6COwiJ6Ko/jpII= 25 | github.com/xtfly/gokits v0.1.1/go.mod h1:BoAPpQEE6ycTkhgqgIQZBaEHfn5l+U3CECoi43nErTM= 26 | github.com/xtfly/log4g v0.1.0 h1:K3UGlvFTooVrgrfZLSDSVBdLzgoZ+ieh7DsyiRkihEs= 27 | github.com/xtfly/log4g v0.1.0/go.mod h1:wZIHJolq4zBwrCVNsDm3cvKUZiOn8Jql3YxTlnAyADQ= 28 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 29 | golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= 30 | golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 31 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 32 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 33 | golang.org/x/net v0.0.0-20190607181551-461777fb6f67 h1:rJJxsykSlULwd2P2+pg/rtnwN2FrWp4IuCxOSyS0V00= 34 | golang.org/x/net v0.0.0-20190607181551-461777fb6f67/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 35 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 36 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 37 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 38 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 39 | golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 40 | golang.org/x/sys v0.0.0-20190609082536-301114b31cce h1:CQakrGkKbydnUmt7cFIlmQ4lNQiqdTPt6xzXij4nYCc= 41 | golang.org/x/sys v0.0.0-20190609082536-301114b31cce/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 42 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 43 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 44 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 45 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 46 | golang.org/x/tools v0.0.0-20190608022120-eacb66d2a7c3/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= 47 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= 48 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 49 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 50 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 51 | -------------------------------------------------------------------------------- /misc/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIGgzCCBGugAwIBAgIJAJ6fl+BNxVI3MA0GCSqGSIb3DQEBBQUAMIGHMQswCQYD 3 | VQQGEwJDTjESMBAGA1UECBMJR3Vhbmdkb25nMREwDwYDVQQHEwhTaGVuemhlbjEV 4 | MBMGA1UEChMMbGFubGluZ3ppLmNuMRUwEwYDVQQDEwxsYW5saW5nemkuY24xIzAh 5 | BgkqhkiG9w0BCQEWFHh0ZW5nZmVpQG91dGxvb2suY29tMB4XDTE2MDYyNDE0NDQx 6 | OVoXDTI2MDYyMjE0NDQxOVowgYcxCzAJBgNVBAYTAkNOMRIwEAYDVQQIEwlHdWFu 7 | Z2RvbmcxETAPBgNVBAcTCFNoZW56aGVuMRUwEwYDVQQKEwxsYW5saW5nemkuY24x 8 | FTATBgNVBAMTDGxhbmxpbmd6aS5jbjEjMCEGCSqGSIb3DQEJARYUeHRlbmdmZWlA 9 | b3V0bG9vay5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDOs3p2 10 | YpZJN7XG+dQ9T05QrPEM4Fb6makMeGIP0b6r0F131wJBP3KnjJAmE5ruTchAjLvP 11 | gkHWHUK/yZAQfS7/YTByjQzP5MHrNe83kbQY8YeYQaY4YD5iMc7OUoOjV8YDVHvH 12 | fI8BV+K+TczP1Ly+RwQEe1Qem1pYGpW0nfRXwJFMKBDllY74gOcib6+Obm7mcFkx 13 | 3Bp4vgaaZRJ/lPMOIHMS5f9DfB9dOysu08KDbOMCYwFTnnFlP4YiqpjSK0PtDN/K 14 | xWeoAUmo4g1oRgrNCuW2cC2+GCYqNU7Vq/9KeEUsW0nsQzUq+ltn11YQsGGACwLw 15 | FwUlUEFk/8F/HCiAT0YbD+O6LltfeudqROf6mRVKuJsA0a0F3pGBKveENEF7tEZ7 16 | n+2pAqK051o9LQOePO7ZOv4Sjl5LA08Z4xip8Pdfa28+MPFeyGfw9Ty8jhRgdwB9 17 | jpammkgnRSaakEbOO9jATyNXEF3BjnXR2tfGLYZg6x9KwuryiSLvlBgdRPCaEgAy 18 | L2J4/0BXS+CKF1SpGxf/ZQHqVi42DHJ4hWqJrxICRKYbsQN67DcVgt88fXdcBaNb 19 | 0T8rAR2t0EBcNOqBYbCF9wYmbSJygeYdZbLV+A3ZjorJys53D83WZ22cHRUohsML 20 | 9W3GG23zYoREPcDchVRd1TkjpzcaTjLrEYz+yQIDAQABo4HvMIHsMB0GA1UdDgQW 21 | BBTi7rbTQY7B9wByejOfnWmZ+FjfATCBvAYDVR0jBIG0MIGxgBTi7rbTQY7B9wBy 22 | ejOfnWmZ+FjfAaGBjaSBijCBhzELMAkGA1UEBhMCQ04xEjAQBgNVBAgTCUd1YW5n 23 | ZG9uZzERMA8GA1UEBxMIU2hlbnpoZW4xFTATBgNVBAoTDGxhbmxpbmd6aS5jbjEV 24 | MBMGA1UEAxMMbGFubGluZ3ppLmNuMSMwIQYJKoZIhvcNAQkBFhR4dGVuZ2ZlaUBv 25 | dXRsb29rLmNvbYIJAJ6fl+BNxVI3MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF 26 | BQADggIBAL1LWzfiTUHP686xOkHJ0z7n7eahC+8DUtkXAjl+uhqfcsPSYDVzQH5j 27 | 3hFLwUGHgUzoHn4ll6IIWyI3z8RTr4ekERCjYQsoamjIZw4Ruvtcb+1vXaKX/Pv4 28 | KTjmEyFJbRLvycWBA6aOn4B15gOxYoXifqQsedRgzIOUOoGhQczMbOmaWe++Rm73 29 | KpQSryFnmUqUwd2Vjc5dRIyrgPL49Ja/rVMDZ2Rv5h2F/P6RQpWzcF9N3JQBGESK 30 | UJWN/wmpj6jcP4THEbdFm8XpJ9nMPcw59zzN5DxMzXTkISYiNCTQB/rwIVVmiH/z 31 | +l18nNHHLNUVqrgvQwMwixu1AudT5upd1dXh78DSuFfc4JUOEh8NtO7ICaTRJTm0 32 | odNOG7KzLwMBiY+AZu/I/iecc1eB94+b15S/MPCwsuMAyARVCsuCq7jU5NsNR+M7 33 | 6SZOF3FVmnHOndbXtMnULSYQti3sChy0BHfEqT+XYRXdRzcqdUjJAhD3Vp5vRcwh 34 | hcpHjNso2s/tZxaWUfKGj9u2Pof5eKygnK8iRHWb6sJbTscswayq7rbHYumlBE8W 35 | 4Afs4fQzaaFAJFx18AC7asA6DCjcImo32ps7RURHUADlCjhDJLzkyPh6jsVuc7GB 36 | rqclaPayNwy0pVvKzOo1L/5uEyzUh7wta5RQnUMyU1BnjdHLSOuC 37 | -----END CERTIFICATE----- 38 | -------------------------------------------------------------------------------- /misc/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAzrN6dmKWSTe1xvnUPU9OUKzxDOBW+pmpDHhiD9G+q9Bdd9cC 3 | QT9yp4yQJhOa7k3IQIy7z4JB1h1Cv8mQEH0u/2Ewco0Mz+TB6zXvN5G0GPGHmEGm 4 | OGA+YjHOzlKDo1fGA1R7x3yPAVfivk3Mz9S8vkcEBHtUHptaWBqVtJ30V8CRTCgQ 5 | 5ZWO+IDnIm+vjm5u5nBZMdwaeL4GmmUSf5TzDiBzEuX/Q3wfXTsrLtPCg2zjAmMB 6 | U55xZT+GIqqY0itD7QzfysVnqAFJqOINaEYKzQrltnAtvhgmKjVO1av/SnhFLFtJ 7 | 7EM1KvpbZ9dWELBhgAsC8BcFJVBBZP/BfxwogE9GGw/jui5bX3rnakTn+pkVSrib 8 | ANGtBd6RgSr3hDRBe7RGe5/tqQKitOdaPS0Dnjzu2Tr+Eo5eSwNPGeMYqfD3X2tv 9 | PjDxXshn8PU8vI4UYHcAfY6WpppIJ0UmmpBGzjvYwE8jVxBdwY510drXxi2GYOsf 10 | SsLq8oki75QYHUTwmhIAMi9ieP9AV0vgihdUqRsX/2UB6lYuNgxyeIVqia8SAkSm 11 | G7EDeuw3FYLfPH13XAWjW9E/KwEdrdBAXDTqgWGwhfcGJm0icoHmHWWy1fgN2Y6K 12 | ycrOdw/N1mdtnB0VKIbDC/Vtxhtt82KERD3A3IVUXdU5I6c3Gk4y6xGM/skCAwEA 13 | AQKCAgEAqC6ImgKlXXWixf9InYoFosbUazGuE8ErNyZMQ6rZhVrIclXTU9uYIpPQ 14 | Rer1yKgpI7xeo/zMLLcqNHqAaaoxr0EY9lul7RGoZwU/WuuGqjT7qvrfkEde+u6j 15 | 4mRpD2K/jr4S6MGEagdkSotq9j+WQwvtTOg8DZtB6yXWYVuUEEJy9pAzD1qdNb1M 16 | U0ykfzODsKQBLvxz3YJgRQewAXbkPL5SYzIYfIRl0Knv/O33+o+CNdLwpWTpxwPk 17 | 1BgSuA7T+fDbtlTgfmZt1Kf5WLqE10jil4q27VEg3H77YgfMdH9faTPt9frJK+sA 18 | rIEdWf7plhFflpJkcXPXgaJHgnEa0bUOge09BrcfgsDwKa4Xd9dSo/9cwOpp0qVR 19 | eZCMVc1CxzPdphMfWCWP9ipD4hzGQ+8hWPPtnB/aZldIdoE1TAtVbGuXAIGGp1g6 20 | aLP/e6EMDmlEEq3vMzWXxodGGRmHwZxh7p0gXVxHS0hBLnTTGDOkdSG2Q1pit8p0 21 | KJHTrRHtwl1Husca6idlyqgkdetzyV8iH10rWTv5y7F+a9OQpDW0KWC2P168vITn 22 | EKGjHdjWFqY02+dBt8JS8h2+iBH35JP2NDPH2gkWv+4dbBkt5Bnm4YrQr90xDJYC 23 | SZnozINTJfQcRoKDui9ri3JUAQOzPYeE5h9KAT0JAQQeWBN2WAECggEBAO8766Qq 24 | K6uga8qfte06mALFxET8wL4CElS8vcQY4GqmrC1Pj7b7B3okq7eJybjWUS9y7rvE 25 | udDzEwxHyKXrMqOpiml3ZSRdVtpfsvlxnh5LABU+VNp4WD4ULNE2XpbQ0SohD5+m 26 | vWulRyqNKCUer1N6Frrsf2lP5jMyGuhsqgGuHEp6BbkHM7ckOJcaOaLtiyKR+Ajh 27 | dUs8WO2AvoaEwlHsXMQ7Lh2JSW19BPfxtx6qcxNG+ccM1dIERLMyCmvITlvELGfR 28 | bD9ioFNULHX0NKFMVPlH7rfc7pTiGEPCCKr0ih6+UqrX0idzRIdB+JPwegOKsAtp 29 | ph3IJOqxF8q+XkECggEBAN0v4uWJagEduZilXaq571CvwapJWdUg5K3bJOgjOREY 30 | lycT/HC6WXVnVr0Ljp+YDI+jZdCYdewIT47MaXwbo6igz5tPTKuHS4MFzH/VuI9v 31 | bjic550feNWNz6JK1XznXot4EwLfY7eh/jk3sOHdNLModRjtReca84nd640sccuq 32 | tODht0AoEYR8R8+uzKmWgcuIUjfM2npn3rfZt0w1QzbakNWpx72g0JswLVgnxGpI 33 | yxWGSzTb04q5hq7w1WcyVkznmxdH3UI8StUdRSZjSvt8u/AtqV3e4M0KM315ClBj 34 | hGOq9Ge03fM41J7ZC4tzx8DISdCRyZY2TH1gi4FFDokCggEASWXd7xkNGvP1/k2v 35 | 2F/QwZEBnZTaYbFrBFp/8kkbmT0XuDMfyakM6zmTAZWhsTb345HkkvFRfT0GjA7+ 36 | DYGyoKnkkGUUP+rvOcKAu61TMMIb/WiJShoAINO5yFpg/O6GLeHE1Fuo4/zqb7Ct 37 | L25Rpk/f52ulz5625uAph0PHiQptHaCAXvtJr5fKUPm30zgEJxAXxEm/3dxfAEcu 38 | rUDQCah18bKifGdhgtkHjDa50wvCkJiHC73xYCT1GvFwZw8mk1sFsLh8+Jl+zPIX 39 | Tf0lpJMxwuWB5SFAhac0gJ1oYWLzeWk1Hbtne7kOvlzMMj9mf+2uA2Jfl+WPBf5d 40 | aVOswQKCAQBcNU9Ys21+o56wtReUSckrkOmijMunXvwWQkpjf/OVXfP9dYaClB0h 41 | JYBydPPzMlbiNpMGX9ihGmHYPP0ooPkcHlYOAAQNaeyf5XX4QOec3C5N6ZNg3DLq 42 | jCDoHlxborS2XctHIkuWzTCHN+FEd4w1uAMA7kF687BdZCeDhei2qCaZrV9QnqOW 43 | 4JyrRsrJVBYt/FmSK5NU/iJJ8LhKS9YGcx14BTxgoD72wyHUI1ftmwUWpxKJKa9J 44 | oMg1++5rBAAYNWFhVjM2Hp5vs7EVLMWgi8JpgQ+fM3/x2nPWtfQHyUTyq90RssPo 45 | VmPFCOSYMR8wDg8/pNcuCNrgVbQyotBJAoIBAQCdsZDQ+SkUvov9wO01soUpr6Ja 46 | 85z0jaTAoYm/z4zpTLfwyKx5lhjom4SSZ1lcJfYiSo5TDtiKrveIpQAUGQsmU7BM 47 | 4vj2R2exyZBetKbaPu2ObAOKMpKopv8Ogx6vYTGvh5CyDHSLZfFs6w6wUwrypz4J 48 | RZ2CZvwuTrE2JRfpC31F+EglN4uZAZI3LXmK9gM/gtBqXmqZQ5vikjuAUD1zL084 49 | 7BW31cqCFc1ojUYTSCzaW9VrImvXbHuh78O7V/dn6W9BiP0imdCVOjNwZZYbCymv 50 | zJXtoRJRRbw5MN+ANWjsBsiya1qeKKmz91bHr51v8lMuJTaa1cWITpVEO6G6 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /p2p/api.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | // FileDict 一个文件的元数据信息 4 | type FileDict struct { 5 | Length int64 `json:"length"` 6 | Path string `json:"path"` 7 | Name string `json:"name"` 8 | Sum string `json:"sum" ` 9 | } 10 | 11 | // MetaInfo 一个任务内所有文件的元数据信息 12 | type MetaInfo struct { 13 | Length int64 `json:"length"` 14 | PieceLen int64 `json:"PieceLen"` 15 | Pieces []byte `json:"pieces"` 16 | Files []*FileDict `json:"files"` 17 | } 18 | 19 | // DispatchTask 下发给Agent的分发任务 20 | type DispatchTask struct { 21 | TaskID string `json:"taskId"` 22 | MetaInfo *MetaInfo `json:"metaInfo"` 23 | LinkChain *LinkChain `json:"linkChain"` 24 | Speed int64 `json:"speed"` 25 | } 26 | 27 | // StartTask 下发给Agent的分发任务 28 | type StartTask struct { 29 | TaskID string `json:"taskId"` 30 | LinkChain *LinkChain `json:"linkChain"` 31 | } 32 | 33 | // LinkChain 分发路径 34 | type LinkChain struct { 35 | // 软件分发的路径,要求服务端的地址排在第一个 36 | DispatchAddrs []string `json:"dispatchAddrs"` 37 | // 服务端管理接口,用于上报状态 38 | ServerAddr string `json:"serverAddr"` 39 | } 40 | 41 | // PHeader 连接认证消息头 42 | type PHeader struct { 43 | Len int32 44 | TaskID string 45 | Username string 46 | Password string 47 | Salt string 48 | } 49 | 50 | // StatusReport Agent分发状态上报 51 | type StatusReport struct { 52 | TaskID string `json:"taskId"` 53 | IP string `json:"ip"` 54 | PercentComplete float32 `json:"percentComplete"` 55 | } 56 | -------------------------------------------------------------------------------- /p2p/bitset.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "github.com/xtfly/gofd/common" 5 | ) 6 | 7 | // Bitset As defined by the bittorrent protocol, this bitset is big-endian, such that 8 | // the high bit of the first byte is block 0 9 | type Bitset struct { 10 | b []byte 11 | n int // size 12 | endIndex int 13 | endMask byte // Which bits of the last byte are valid 14 | } 15 | 16 | // NewBitset ... 17 | func NewBitset(n int) *Bitset { 18 | endIndex, endOffset := n>>3, n&7 19 | endMask := ^byte(255 >> byte(endOffset)) 20 | if endOffset == 0 { 21 | endIndex = -1 22 | } 23 | return &Bitset{make([]byte, (n+7)>>3), n, endIndex, endMask} 24 | } 25 | 26 | // NewBitsetFromBytes Creates a new bitset from a given byte stream. Returns nil if the 27 | // data is invalid in some way. 28 | func NewBitsetFromBytes(n int, data []byte) *Bitset { 29 | bitset := NewBitset(n) 30 | if len(bitset.b) != len(data) { 31 | return nil 32 | } 33 | copy(bitset.b, data) 34 | if bitset.endIndex >= 0 && bitset.b[bitset.endIndex]&(^bitset.endMask) != 0 { 35 | return nil 36 | } 37 | return bitset 38 | } 39 | 40 | // Set ... 41 | func (b *Bitset) Set(index int) { 42 | b.checkRange(index) 43 | b.b[index>>3] |= byte(128 >> byte(index&7)) 44 | } 45 | 46 | // Clear ... 47 | func (b *Bitset) Clear(index int) { 48 | b.checkRange(index) 49 | b.b[index>>3] &= ^byte(128 >> byte(index&7)) 50 | } 51 | 52 | // IsSet ... 53 | func (b *Bitset) IsSet(index int) bool { 54 | b.checkRange(index) 55 | return (b.b[index>>3] & byte(128>>byte(index&7))) != 0 56 | } 57 | 58 | // Len ... 59 | func (b *Bitset) Len() int { 60 | return b.n 61 | } 62 | 63 | // InRange ... 64 | func (b *Bitset) InRange(index int) bool { 65 | return 0 <= index && index < b.n 66 | } 67 | 68 | func (b *Bitset) checkRange(index int) { 69 | if !b.InRange(index) { 70 | common.LOG.Errorf("Index %d out of range 0..%d.", index, b.n) 71 | } 72 | } 73 | 74 | // IsEndValid ... 75 | func (b *Bitset) IsEndValid() bool { 76 | if b.endIndex >= 0 { 77 | return (b.b[b.endIndex] & b.endMask) == 0 78 | } 79 | return true 80 | } 81 | 82 | // FindNextSet ... 83 | // TODO: Make this fast 84 | func (b *Bitset) FindNextSet(index int) int { 85 | for i := index; i < b.n; i++ { 86 | if (b.b[i>>3] & byte(128>>byte(i&7))) != 0 { 87 | return i 88 | } 89 | } 90 | return -1 91 | } 92 | 93 | // FindNextClear ... 94 | // TODO: Make this fast 95 | func (b *Bitset) FindNextClear(index int) int { 96 | for i := index; i < b.n; i++ { 97 | if (b.b[i>>3] & byte(128>>byte(i&7))) == 0 { 98 | return i 99 | } 100 | } 101 | return -1 102 | } 103 | 104 | // Bytes ... 105 | func (b *Bitset) Bytes() []byte { 106 | return b.b 107 | } 108 | -------------------------------------------------------------------------------- /p2p/cache.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "github.com/labstack/gommon/log" 5 | "github.com/xtfly/gofd/common" 6 | "math" 7 | "sort" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | ) 12 | 13 | // CacheProvider ... 14 | type CacheProvider interface { 15 | NewCache(infohash string, numPieces int, pieceLength int, totalSize int64) FileCache 16 | } 17 | 18 | // FileCache ... 19 | type FileCache interface { 20 | //Read what's cached, returns parts that weren't available to read. 21 | readAt(p []byte, offset int64) []chunk 22 | //Writes to cache, returns uncommitted data that has been trimmed. 23 | writeAt(p []byte, offset int64) []chunk 24 | //Marks a piece as committed to permanent storage. 25 | MarkCommitted(piece int) 26 | //Close the cache and free all the things 27 | Close() 28 | } 29 | 30 | type inttuple struct { 31 | a, b int 32 | } 33 | 34 | type accessTime struct { 35 | index int 36 | atime time.Time 37 | } 38 | type byTime []accessTime 39 | 40 | func (a byTime) Len() int { return len(a) } 41 | func (a byTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] } 42 | func (a byTime) Less(i, j int) bool { return a[i].atime.Before(a[j].atime) } 43 | 44 | //RAMCacheProvider provider creates a ram cache for each torrent. 45 | //Each time a cache is created or closed, all cache 46 | //are recalculated so they total <= capacity (in MiB). 47 | type RAMCacheProvider struct { 48 | capacity int 49 | caches map[string]*RAMCache 50 | m *sync.Mutex 51 | } 52 | 53 | // NewRAMCacheProvider ... 54 | func NewRAMCacheProvider(capacity int) CacheProvider { 55 | rc := &RAMCacheProvider{capacity, make(map[string]*RAMCache), new(sync.Mutex)} 56 | return rc 57 | } 58 | 59 | // NewCache ... 60 | func (r *RAMCacheProvider) NewCache(infohash string, numPieces int, pieceSize int, torrentLength int64) FileCache { 61 | i := uint32(1) 62 | rc := &RAMCache{ 63 | pieceSize: pieceSize, 64 | atimes: make([]time.Time, numPieces), 65 | store: make([][]byte, numPieces), 66 | isBoxFull: *NewBitset(numPieces), 67 | isBoxCommit: *NewBitset(numPieces), 68 | isByteSet: make([]Bitset, numPieces), 69 | torrentLength: torrentLength, 70 | cacheProvider: r, 71 | capacity: &i, 72 | infohash: infohash} 73 | 74 | r.m.Lock() 75 | r.caches[infohash] = rc 76 | r.rebalance(true) 77 | r.m.Unlock() 78 | return rc 79 | } 80 | 81 | //rebalance the cache capacity allocations; has to be called on each cache creation or deletion. 82 | //'shouldTrim', if true, causes trimCommitted() to be called on all the caches. Recommended if a new cache was created 83 | //because otherwise the old caches would stay over the new capacity until their next WriteAt happens. 84 | func (r *RAMCacheProvider) rebalance(shouldTrim bool) { 85 | //Cache size is a diminishing return thing: 86 | //The more of it a torrent has, the less of a difference additional cache makes. 87 | //Thus, instead of scaling the distribution lineraly with torrent size, we'll do it by square-root 88 | common.LOG.Debug("Rebalancing caches...") 89 | var scalingTotal float64 90 | sqrts := make(map[string]float64) 91 | for i, cache := range r.caches { 92 | sqrts[i] = math.Sqrt(float64(cache.torrentLength)) 93 | scalingTotal += sqrts[i] 94 | } 95 | 96 | scalingFactor := float64(r.capacity*1024*1024) / scalingTotal 97 | for i, cache := range r.caches { 98 | newCap := int(math.Floor(scalingFactor * sqrts[i] / float64(cache.pieceSize))) 99 | if newCap == 0 { 100 | newCap = 1 //Something's better than nothing! 101 | } 102 | log.Debugf("Setting cache '%s' to new capacity %v (%v MiB)", cache.infohash, newCap, float32(newCap*cache.pieceSize)/float32(1024*1024)) 103 | cache.setCapacity(newCap) 104 | } 105 | 106 | if shouldTrim { 107 | for _, cache := range r.caches { 108 | cache.trimCommitted() 109 | } 110 | } 111 | } 112 | 113 | func (r *RAMCacheProvider) cacheClosed(infohash string) { 114 | r.m.Lock() 115 | delete(r.caches, infohash) 116 | r.rebalance(false) 117 | r.m.Unlock() 118 | } 119 | 120 | // RAMCache ... 121 | //'pieceSize' is the size of the average piece 122 | //'capacity' is how many pieces the cache can hold 123 | //'actualUsage' is how many pieces the cache has at the moment 124 | //'atime' is an array of access times for each stored box 125 | //'store' is an array of "boxes" ([]byte of 1 piece each) 126 | //'isBoxFull' indicates if a box entirely contains written data 127 | //'isBoxCommit' indicates if a box has been committed to storage 128 | //'isByteSet' for [i] indicates for box 'i' if a byte has been written to 129 | //'torrentLength' is the number of bytes in the torrent 130 | //'cacheProvider' is a pointer to the cacheProvider that created this cache 131 | //'infohash' is the infohash of the torrent 132 | type RAMCache struct { 133 | pieceSize int 134 | capacity *uint32 //Access only through getter/setter 135 | actualUsage int 136 | atimes []time.Time 137 | store [][]byte 138 | isBoxFull Bitset 139 | isBoxCommit Bitset 140 | isByteSet []Bitset 141 | torrentLength int64 142 | cacheProvider *RAMCacheProvider 143 | infohash string 144 | m sync.RWMutex 145 | } 146 | 147 | // Close ... 148 | func (r *RAMCache) Close() { 149 | r.cacheProvider.cacheClosed(r.infohash) 150 | //We don't need to do anything else. The garbage collector will take care of it. 151 | } 152 | 153 | func (r *RAMCache) readAt(p []byte, off int64) []chunk { 154 | r.m.RLock() 155 | defer r.m.RUnlock() 156 | var unfulfilled []chunk 157 | 158 | boxI := int(off / int64(r.pieceSize)) 159 | boxOff := int(off % int64(r.pieceSize)) 160 | 161 | for i := 0; i < len(p); { 162 | if r.store[boxI] == nil { //definitely not in cache 163 | end := len(p[i:]) 164 | if end > r.pieceSize-boxOff { 165 | end = r.pieceSize - boxOff 166 | } 167 | if len(unfulfilled) > 0 { 168 | last := unfulfilled[len(unfulfilled)-1] 169 | if last.i+int64(len(last.data)) == off+int64(i) { 170 | unfulfilled = unfulfilled[:len(unfulfilled)-1] 171 | i = int(last.i - off) 172 | end += len(last.data) 173 | } 174 | } 175 | unfulfilled = append(unfulfilled, chunk{off + int64(i), p[i : i+end]}) 176 | i += end 177 | } else if r.isBoxFull.IsSet(boxI) { //definitely in cache 178 | i += copy(p[i:], r.store[boxI][boxOff:]) 179 | } else { //Bah, do it byte by byte. 180 | missing := []*inttuple{&inttuple{-1, -1}} 181 | end := len(p[i:]) + boxOff 182 | if end > r.pieceSize { 183 | end = r.pieceSize 184 | } 185 | for j := boxOff; j < end; j++ { 186 | if r.isByteSet[boxI].IsSet(j) { 187 | p[i] = r.store[boxI][j] 188 | } else { 189 | lastIT := missing[len(missing)-1] 190 | if lastIT.b == i { 191 | lastIT.b = i + 1 192 | } else { 193 | missing = append(missing, &inttuple{i, i + 1}) 194 | } 195 | } 196 | i++ 197 | } 198 | for _, intt := range missing[1:] { 199 | unfulfilled = append(unfulfilled, chunk{off + int64(intt.a), p[intt.a:intt.b]}) 200 | } 201 | } 202 | boxI++ 203 | boxOff = 0 204 | } 205 | return unfulfilled 206 | } 207 | 208 | // writeAt 209 | func (r *RAMCache) writeAt(p []byte, off int64) []chunk { 210 | r.m.Lock() 211 | defer r.m.Unlock() 212 | boxI := int(off / int64(r.pieceSize)) 213 | boxOff := int(off % int64(r.pieceSize)) 214 | 215 | for i := 0; i < len(p); { 216 | if r.store[boxI] == nil { 217 | r.store[boxI] = make([]byte, r.pieceSize) 218 | r.actualUsage++ 219 | } 220 | copied := copy(r.store[boxI][boxOff:], p[i:]) 221 | i += copied 222 | r.atimes[boxI] = time.Now() 223 | if copied == r.pieceSize { 224 | r.isBoxFull.Set(boxI) 225 | } else { 226 | if r.isByteSet[boxI].n == 0 { 227 | r.isByteSet[boxI] = *NewBitset(r.pieceSize) 228 | } 229 | for j := boxOff; j < boxOff+copied; j++ { 230 | r.isByteSet[boxI].Set(j) 231 | } 232 | } 233 | boxI++ 234 | boxOff = 0 235 | } 236 | if r.actualUsage > r.getCapacity() { 237 | return r.trim() 238 | } 239 | return nil 240 | } 241 | 242 | // MarkCommitted ... 243 | func (r *RAMCache) MarkCommitted(piece int) { 244 | r.m.Lock() 245 | defer r.m.Unlock() 246 | if r.store[piece] != nil { 247 | r.isBoxFull.Set(piece) 248 | r.isBoxCommit.Set(piece) 249 | r.isByteSet[piece] = *NewBitset(0) 250 | } 251 | } 252 | 253 | func (r *RAMCache) removeBox(boxI int) { 254 | r.isBoxFull.Clear(boxI) 255 | r.isBoxCommit.Clear(boxI) 256 | r.isByteSet[boxI] = *NewBitset(0) 257 | r.store[boxI] = nil 258 | r.actualUsage-- 259 | } 260 | 261 | func (r *RAMCache) getCapacity() int { 262 | return int(atomic.LoadUint32(r.capacity)) 263 | } 264 | 265 | func (r *RAMCache) setCapacity(capacity int) { 266 | atomic.StoreUint32(r.capacity, uint32(capacity)) 267 | } 268 | 269 | //Trim stuff that's already been committed 270 | //Return true if we got underneath capacity, false if not. 271 | func (r *RAMCache) trimCommitted() bool { 272 | r.m.Lock() 273 | defer r.m.Unlock() 274 | for i := 0; i < r.isBoxCommit.Len(); i++ { 275 | if r.isBoxCommit.IsSet(i) { 276 | r.removeBox(i) 277 | } 278 | if r.actualUsage <= r.getCapacity() { 279 | return true 280 | } 281 | } 282 | return false 283 | } 284 | 285 | //Trim excess data. Returns any uncommitted chunks that were trimmed 286 | func (r *RAMCache) trim() []chunk { 287 | if r.trimCommitted() { 288 | return nil 289 | } 290 | 291 | var retVal []chunk 292 | 293 | //Still need more space? figure out what's oldest 294 | //RawWrite it to storage, and clear that then 295 | tATA := make([]accessTime, 0, r.actualUsage) 296 | 297 | for i, atime := range r.atimes { 298 | if r.store[i] != nil { 299 | tATA = append(tATA, accessTime{i, atime}) 300 | } 301 | } 302 | 303 | sort.Sort(byTime(tATA)) 304 | 305 | deficit := r.actualUsage - r.getCapacity() 306 | for i := 0; i < deficit; i++ { 307 | deadBox := tATA[i].index 308 | data := r.store[deadBox] 309 | if r.isBoxFull.IsSet(deadBox) { //Easy, the whole box has to go 310 | retVal = append(retVal, chunk{int64(deadBox) * int64(r.pieceSize), data}) 311 | } else { //Ugh, we'll just trim anything unused from the start and the end, and send that. 312 | off := int64(0) 313 | endData := r.pieceSize 314 | //Trim out any unset bytes at the beginning 315 | for j := 0; j < r.pieceSize; j++ { 316 | if !r.isByteSet[deadBox].IsSet(j) { 317 | off++ 318 | } else { 319 | break 320 | } 321 | } 322 | 323 | //Trim out any unset bytes at the end 324 | for j := r.pieceSize - 1; j > 0; j-- { 325 | if !r.isByteSet[deadBox].IsSet(j) { 326 | endData-- 327 | } else { 328 | break 329 | } 330 | } 331 | retVal = append(retVal, chunk{int64(deadBox)*int64(r.pieceSize) + off, data[off:endData]}) 332 | } 333 | r.removeBox(deadBox) 334 | } 335 | return retVal 336 | } 337 | -------------------------------------------------------------------------------- /p2p/file.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "errors" 5 | "github.com/labstack/gommon/log" 6 | "github.com/xtfly/gofd/common" 7 | "io" 8 | ) 9 | 10 | // File Interface for a file. 11 | // Multiple goroutines may access a File at the same time. 12 | type File interface { 13 | io.ReaderAt 14 | io.WriterAt 15 | io.Closer 16 | } 17 | 18 | // FsProvider Interface for a provider of filesystems. 19 | type FsProvider interface { 20 | NewFS() (FileSystem, error) 21 | } 22 | 23 | // FileSystem Interface for a file system. A file system contains files. 24 | type FileSystem interface { 25 | Open(name []string, length int64) (file File, err error) 26 | io.Closer 27 | } 28 | 29 | // FileStore a file store. 30 | type FileStore interface { 31 | io.ReaderAt 32 | io.WriterAt 33 | io.Closer 34 | SetCache(FileCache) 35 | Commit(int, []byte, int64) 36 | } 37 | 38 | type fileStore struct { 39 | fileSystem FileSystem 40 | offsets []int64 41 | files []fileEntry // Stored in increasing globalOffset order 42 | cache FileCache 43 | } 44 | 45 | type fileEntry struct { 46 | length int64 47 | file File 48 | } 49 | 50 | // NewFileStore 根据元数据信息打开所有文件 51 | func NewFileStore(info *MetaInfo, fileSystem FileSystem) (f FileStore, totalSize int64, err error) { 52 | fs := &fileStore{} 53 | fs.fileSystem = fileSystem 54 | 55 | numFiles := len(info.Files) 56 | fs.files = make([]fileEntry, numFiles) 57 | fs.offsets = make([]int64, numFiles) 58 | 59 | for i, src := range info.Files { 60 | var file File 61 | file, err = fs.fileSystem.Open([]string{src.Path, src.Name}, src.Length) 62 | if err != nil { 63 | common.LOG.Errorf("Open file failed, file=%v/%v, error=%v", src.Path, src.Name, err) 64 | // Close all files opened up to now. 65 | for i2 := 0; i2 < i; i2++ { 66 | fs.files[i2].file.Close() 67 | } 68 | return 69 | } 70 | fs.files[i].file = file 71 | fs.files[i].length = src.Length 72 | fs.offsets[i] = totalSize 73 | totalSize += src.Length 74 | } 75 | f = fs 76 | return 77 | } 78 | 79 | // SetCache ... 80 | func (f *fileStore) SetCache(cache FileCache) { 81 | f.cache = cache 82 | } 83 | 84 | func (f *fileStore) find(offset int64) int { 85 | // Binary search 86 | offsets := f.offsets 87 | low := 0 88 | high := len(offsets) 89 | for low < high-1 { 90 | probe := (low + high) / 2 91 | entry := offsets[probe] 92 | if offset < entry { 93 | high = probe 94 | } else { 95 | low = probe 96 | } 97 | } 98 | return low 99 | } 100 | 101 | // ReadAt ... 102 | func (f *fileStore) ReadAt(p []byte, off int64) (int, error) { 103 | if f.cache == nil { 104 | return f.RawReadAt(p, off) 105 | } 106 | 107 | unfullfilled := f.cache.readAt(p, off) 108 | 109 | var retErr error 110 | for _, unf := range unfullfilled { 111 | _, err := f.RawReadAt(unf.data, unf.i) 112 | if err != nil { 113 | log.Error("Got an error on read (off=", unf.i, "len=", len(unf.data), ") from filestore:", err) 114 | retErr = err 115 | } 116 | } 117 | return len(p), retErr 118 | } 119 | 120 | // RawReadAt ... 121 | func (f *fileStore) RawReadAt(p []byte, off int64) (n int, err error) { 122 | index := f.find(off) 123 | for len(p) > 0 && index < len(f.offsets) { 124 | chunk := int64(len(p)) 125 | entry := &f.files[index] 126 | itemOffset := off - f.offsets[index] 127 | if itemOffset < entry.length { 128 | space := entry.length - itemOffset 129 | if space < chunk { 130 | chunk = space 131 | } 132 | var nThisTime int 133 | nThisTime, err = entry.file.ReadAt(p[0:chunk], itemOffset) 134 | n = n + nThisTime 135 | if err != nil { 136 | return 137 | } 138 | p = p[nThisTime:] 139 | off += int64(nThisTime) 140 | } 141 | index++ 142 | } 143 | // At this point if there's anything left to read it means we've run off the 144 | // end of the file store. Read zeros. This is defined by the bittorrent protocol. 145 | for i := range p { 146 | p[i] = 0 147 | } 148 | return 149 | } 150 | 151 | // WriteAt ... 152 | func (f *fileStore) WriteAt(p []byte, off int64) (int, error) { 153 | if f.cache != nil { 154 | needRawWrite := f.cache.writeAt(p, off) 155 | if needRawWrite != nil { 156 | for _, nc := range needRawWrite { 157 | f.RawWriteAt(nc.data, nc.i) 158 | } 159 | } 160 | return len(p), nil 161 | } 162 | return f.RawWriteAt(p, off) 163 | } 164 | 165 | // Commit ... 166 | func (f *fileStore) Commit(pieceNum int, piece []byte, off int64) { 167 | if f.cache != nil { 168 | _, err := f.RawWriteAt(piece, off) 169 | if err != nil { 170 | log.Error("Error committing to storage:", err) 171 | return 172 | } 173 | f.cache.MarkCommitted(pieceNum) 174 | } 175 | } 176 | 177 | // RawWriteAt ... 178 | func (f *fileStore) RawWriteAt(p []byte, off int64) (n int, err error) { 179 | index := f.find(off) 180 | for len(p) > 0 && index < len(f.offsets) { 181 | chunk := int64(len(p)) 182 | entry := &f.files[index] 183 | itemOffset := off - f.offsets[index] 184 | if itemOffset < entry.length { 185 | space := entry.length - itemOffset 186 | if space < chunk { 187 | chunk = space 188 | } 189 | var nThisTime int 190 | nThisTime, err = entry.file.WriteAt(p[0:chunk], itemOffset) 191 | n += nThisTime 192 | if err != nil { 193 | return 194 | } 195 | p = p[nThisTime:] 196 | off += int64(nThisTime) 197 | } 198 | index++ 199 | } 200 | // At this point if there's anything left to write it means we've run off the 201 | // end of the file store. Check that the data is zeros. 202 | // This is defined by the bittorrent protocol. 203 | for i := range p { 204 | if p[i] != 0 { 205 | err = errors.New("Unexpected non-zero data at end of store.") 206 | n = n + i 207 | return 208 | } 209 | } 210 | n = n + len(p) 211 | return 212 | } 213 | 214 | // Close ... 215 | func (f *fileStore) Close() (err error) { 216 | for i := range f.files { 217 | f.files[i].file.Close() 218 | } 219 | if f.cache != nil { 220 | f.cache.Close() 221 | f.cache = nil 222 | } 223 | if f.fileSystem != nil { 224 | err = f.fileSystem.Close() 225 | } 226 | return 227 | } 228 | -------------------------------------------------------------------------------- /p2p/listen.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "fmt" 7 | "net" 8 | "time" 9 | 10 | "github.com/xtfly/gofd/common" 11 | "github.com/xtfly/gokits/gcrypto" 12 | ) 13 | 14 | // PeerConn wraps an incoming network connection and contains metadata that helps 15 | // identify which active p2pSession it's relevant for. 16 | type PeerConn struct { 17 | conn net.Conn 18 | client bool // 对端是否为客户端 19 | remoteAddr net.Addr 20 | taskID string 21 | } 22 | 23 | // StartListen listens on a TCP port for incoming connections and 24 | // demuxes them to the appropriate active p2pSession based on the taskId 25 | // in the header. 26 | func StartListen(cfg *common.Config) (conChan chan *PeerConn, listener net.Listener, err error) { 27 | listener, err = CreateListener(cfg) 28 | if err != nil { 29 | return 30 | } 31 | 32 | conChan = make(chan *PeerConn) 33 | go func(cfg *common.Config, conChan chan *PeerConn) { 34 | var tempDelay time.Duration 35 | for { 36 | conn, e := listener.Accept() 37 | if e != nil { 38 | if ne, ok := e.(net.Error); ok && ne.Temporary() { 39 | if tempDelay == 0 { 40 | tempDelay = 5 * time.Millisecond 41 | } else { 42 | tempDelay *= 2 43 | } 44 | if max := 1 * time.Second; tempDelay > max { 45 | tempDelay = max 46 | } 47 | common.LOG.Infof("Accept error: %v; retrying in %v", e, tempDelay) 48 | time.Sleep(tempDelay) 49 | continue 50 | } 51 | return 52 | } 53 | tempDelay = 0 54 | 55 | h, err := readPHeader(conn) 56 | if err != nil { 57 | common.LOG.Error("Error reading header: ", err) 58 | continue 59 | } 60 | 61 | if err := h.validate(cfg); err != nil { 62 | common.LOG.Error("header auth failed:", err) 63 | continue 64 | } 65 | 66 | conChan <- &PeerConn{ 67 | conn: conn, 68 | client: true, 69 | remoteAddr: conn.RemoteAddr(), 70 | taskID: h.TaskID, 71 | } 72 | } 73 | }(cfg, conChan) 74 | 75 | return 76 | } 77 | 78 | // CreateListener ... 79 | func CreateListener(cfg *common.Config) (listener net.Listener, err error) { 80 | listener, err = net.ListenTCP("tcp", 81 | &net.TCPAddr{ 82 | IP: net.ParseIP(cfg.Net.IP), 83 | Port: cfg.Net.DataPort, 84 | }) 85 | 86 | if err != nil { 87 | common.LOG.Error("Listen failed:", err) 88 | return 89 | } 90 | 91 | common.LOG.Infof("Listening for peers on %s:%v", cfg.Net.IP, cfg.Net.DataPort) 92 | return 93 | } 94 | 95 | // reading header info 96 | func readPHeader(conn net.Conn) (h *PHeader, err error) { 97 | h = &PHeader{} 98 | 99 | var bslen int32 100 | err = binary.Read(conn, binary.BigEndian, &bslen) 101 | if err != nil { 102 | err = fmt.Errorf("read length error: %v", err) 103 | return 104 | } 105 | 106 | if bslen <= 0 || bslen > 200 { 107 | err = fmt.Errorf("read length is invalid: %v", bslen) 108 | return 109 | } 110 | 111 | bs := make([]byte, bslen) 112 | _, err = conn.Read(bs) 113 | if err != nil { 114 | err = fmt.Errorf("Couldn't read auth info: %v", err) 115 | return 116 | } 117 | 118 | h.Len = bslen 119 | buf := bytes.NewBuffer(bs) 120 | 121 | if h.TaskID, err = readString(buf); err != nil { 122 | return 123 | } 124 | 125 | if h.Username, err = readString(buf); err != nil { 126 | return 127 | } 128 | 129 | if h.Password, err = readString(buf); err != nil { 130 | return 131 | } 132 | 133 | if h.Salt, err = readString(buf); err != nil { 134 | return 135 | } 136 | 137 | return 138 | } 139 | 140 | func readString(buf *bytes.Buffer) (str string, err error) { 141 | if str, err = buf.ReadString(byte(0x00)); err != nil { 142 | err = fmt.Errorf("Read string error: %v", err) 143 | return 144 | } 145 | str = str[:len(str)-1] 146 | return 147 | } 148 | 149 | func writePHeader(conn net.Conn, taskID string, cfg *common.Config) (err error) { 150 | pwd, salt := gcrypto.GenPbkdf2Passwd(cfg.Auth.Password, 8, 10000, 40) 151 | all := [][]byte{[]byte(taskID), 152 | []byte(cfg.Auth.Username), 153 | []byte(pwd), 154 | []byte(salt)} 155 | 156 | buf := bytes.NewBuffer(make([]byte, 0)) 157 | blen := 0 158 | for _, v := range all { 159 | blen += len(v) + 1 160 | } 161 | 162 | err = binary.Write(buf, binary.BigEndian, int32(blen)) 163 | if err != nil { 164 | return 165 | } 166 | for _, v := range all { 167 | buf.Write(v) 168 | buf.WriteByte(0) 169 | } 170 | 171 | _, err = conn.Write(buf.Bytes()) 172 | return 173 | } 174 | 175 | func (h *PHeader) validate(cfg *common.Config) error { 176 | if h.Username != cfg.Auth.Username { 177 | return fmt.Errorf("username or password is incorrect") 178 | } 179 | 180 | if !gcrypto.CmpPbkdf2Passwd(cfg.Auth.Password, h.Salt, h.Password, 10000, 40) { 181 | return fmt.Errorf("username or password is incorrect") 182 | } 183 | 184 | return nil 185 | } 186 | -------------------------------------------------------------------------------- /p2p/meta.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "crypto/sha1" 5 | "fmt" 6 | "github.com/labstack/gommon/log" 7 | "io" 8 | "os" 9 | "path" 10 | ) 11 | 12 | // fileSystemAdapter FileSystem接口适配 13 | type fileSystemAdapter struct { 14 | } 15 | 16 | // Open ... 17 | func (f *fileSystemAdapter) Open(name []string, length int64) (file File, err error) { 18 | var ff *os.File 19 | ff, err = os.Open(path.Clean(path.Join(name...))) 20 | if err != nil { 21 | return 22 | } 23 | stat, err := ff.Stat() 24 | if err != nil { 25 | return 26 | } 27 | actualSize := stat.Size() 28 | if actualSize != length { 29 | err = fmt.Errorf("Unexpected file size %v. Expected %v", actualSize, length) 30 | return 31 | } 32 | file = ff 33 | return 34 | } 35 | 36 | // Close ... 37 | func (f *fileSystemAdapter) Close() error { 38 | return nil 39 | } 40 | 41 | func (m *MetaInfo) addFiles(fileInfo os.FileInfo, file string, idx int) (err error) { 42 | fileDict := FileDict{Length: fileInfo.Size()} 43 | cleanFile := path.Clean(file) 44 | fileDict.Path, fileDict.Name = path.Split(cleanFile) 45 | fileDict.Sum, err = sha1Sum(file) 46 | if err != nil { 47 | return err 48 | } 49 | m.Files[idx] = &fileDict 50 | return 51 | } 52 | 53 | // CreateFileMeta ... 54 | func CreateFileMeta(roots []string, pieceLen int64) (mi *MetaInfo, err error) { 55 | mi = &MetaInfo{Files: make([]*FileDict, len(roots))} 56 | for idx, f := range roots { 57 | var fileInfo os.FileInfo 58 | fileInfo, err = os.Stat(f) 59 | if err != nil { 60 | log.Errorf("File not exist file=%s, error=%v", f, err) 61 | return 62 | } 63 | 64 | if fileInfo.IsDir() { 65 | return nil, fmt.Errorf("Not support dir") 66 | } 67 | 68 | err = mi.addFiles(fileInfo, f, idx) 69 | if err != nil { 70 | return nil, err 71 | } 72 | mi.Length += fileInfo.Size() 73 | } 74 | 75 | if pieceLen == 0 { 76 | pieceLen = choosePieceLength(mi.Length) 77 | } 78 | mi.PieceLen = pieceLen 79 | 80 | fileStore, fileStoreLength, err := NewFileStore(mi, &fileSystemAdapter{}) 81 | if err != nil { 82 | return nil, err 83 | } 84 | defer fileStore.Close() 85 | if fileStoreLength != mi.Length { 86 | return nil, fmt.Errorf("Filestore total length %v, expected %v", fileStoreLength, mi.Length) 87 | } 88 | 89 | var sums []byte 90 | sums, err = computeSums(fileStore, mi.Length, mi.PieceLen) 91 | if err != nil { 92 | return nil, err 93 | } 94 | mi.Pieces = sums 95 | log.Debugf("File totallength=%v, piecelength=%v", mi.Length, pieceLen) 96 | return mi, nil 97 | } 98 | 99 | func sha1Sum(file string) (sum string, err error) { 100 | var f *os.File 101 | f, err = os.Open(file) 102 | if err != nil { 103 | log.Errorf("Open file failed, file=%s, error=%v", file, err) 104 | return 105 | } 106 | defer f.Close() 107 | hash := sha1.New() 108 | _, err = io.Copy(hash, f) 109 | if err != nil { 110 | log.Errorf("Summary file by sha1 failed, file=%s, error=%v", file, err) 111 | return 112 | } 113 | sum = string(hash.Sum(nil)) 114 | return 115 | } 116 | 117 | const ( 118 | minimumPieceLength = 16 * 1024 119 | targetPieceCountLog2 = 10 120 | targetPieceCountMin = 1 << targetPieceCountLog2 121 | 122 | // Target piece count should be < targetPieceCountMax 123 | targetPieceCountMax = targetPieceCountMin << 1 124 | ) 125 | 126 | // Choose a good piecelength. 127 | func choosePieceLength(totalLength int64) (pieceLength int64) { 128 | // Must be a power of 2. 129 | // Must be a multiple of 16KB 130 | // Prefer to provide around 1024..2048 pieces. 131 | pieceLength = minimumPieceLength 132 | pieces := totalLength / pieceLength 133 | for pieces >= targetPieceCountMax { 134 | pieceLength <<= 1 135 | pieces >>= 1 136 | } 137 | return 138 | } 139 | -------------------------------------------------------------------------------- /p2p/osfile.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "errors" 5 | "os" 6 | "path" 7 | "strings" 8 | ) 9 | 10 | // a FileSystem that is backed by real OS files 11 | type osFileSystem struct { 12 | } 13 | 14 | // A File that is backed by an OS file 15 | type osFile struct { 16 | filePath string 17 | } 18 | 19 | // OsFsProvider ... 20 | type OsFsProvider struct{} 21 | 22 | // NewFS ... 23 | func (o OsFsProvider) NewFS() (fs FileSystem, err error) { 24 | return &osFileSystem{}, nil 25 | } 26 | 27 | // Open ... 28 | func (o *osFileSystem) Open(name []string, length int64) (file File, err error) { 29 | cleanSrcPath := path.Clean(path.Join(name...)) 30 | fullPath := path.Join(cleanSrcPath) 31 | err = ensureDirectory(fullPath) 32 | if err != nil { 33 | return 34 | } 35 | osfile := &osFile{fullPath} 36 | file = osfile 37 | err = osfile.ensureExists(length) 38 | return 39 | } 40 | 41 | // Close ... 42 | func (o *osFileSystem) Close() error { 43 | return nil 44 | } 45 | 46 | // Close ... 47 | func (o *osFile) Close() (err error) { 48 | return 49 | } 50 | 51 | func ensureDirectory(fullPath string) (err error) { 52 | fullPath = path.Clean(fullPath) 53 | if !strings.HasPrefix(fullPath, "/") { 54 | // Transform into absolute path. 55 | var cwd string 56 | if cwd, err = os.Getwd(); err != nil { 57 | return 58 | } 59 | fullPath = cwd + "/" + fullPath 60 | } 61 | base, _ := path.Split(fullPath) 62 | if base == "" { 63 | panic("Programming error: could not find base directory for absolute path " + fullPath) 64 | } 65 | err = os.MkdirAll(base, 0755) 66 | return 67 | } 68 | 69 | func (o *osFile) ensureExists(length int64) (err error) { 70 | name := o.filePath 71 | st, err := os.Stat(name) 72 | if err != nil && os.IsNotExist(err) { 73 | f, err := os.Create(name) 74 | defer f.Close() 75 | if err != nil { 76 | return err 77 | } 78 | } else { 79 | if st.Size() == length { 80 | return 81 | } 82 | } 83 | err = os.Truncate(name, length) 84 | if err != nil { 85 | err = errors.New("Could not truncate file.") 86 | return 87 | } 88 | return 89 | } 90 | 91 | // ReadAt ... 92 | func (o *osFile) ReadAt(p []byte, off int64) (n int, err error) { 93 | file, err := os.OpenFile(o.filePath, os.O_RDWR, 0600) 94 | if err != nil { 95 | return 96 | } 97 | defer file.Close() 98 | return file.ReadAt(p, off) 99 | } 100 | 101 | // WriteAt ... 102 | func (o *osFile) WriteAt(p []byte, off int64) (n int, err error) { 103 | file, err := os.OpenFile(o.filePath, os.O_RDWR, 0600) 104 | if err != nil { 105 | return 106 | } 107 | defer file.Close() 108 | return file.WriteAt(p, off) 109 | } 110 | -------------------------------------------------------------------------------- /p2p/peer.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "io" 5 | "net" 6 | "time" 7 | 8 | "github.com/xtfly/gofd/common" 9 | "github.com/xtfly/gofd/flowctrl" 10 | ) 11 | 12 | const ( 13 | // 最多从其它Peer发送请求 14 | maxOurRequests = 5 15 | ) 16 | 17 | const ( 18 | // HAVE 每当客户端下载了一个piece,即将该piece的下标作为have消息的负载构造have消息,并把该消息发送给所有建立连接的peer 19 | HAVE = iota 20 | 21 | // BITFIELD 交换位图 22 | BITFIELD 23 | 24 | // REQUEST 向该peer发送数据请求 25 | REQUEST 26 | 27 | // PIECE 当客户端收到某个peer的request消息后,则发送piece消息将文件数据传给该peer。 28 | PIECE 29 | ) 30 | 31 | // 下载连接端 32 | type peer struct { 33 | taskID string // 任务标识 34 | address string // 对端地址 35 | conn net.Conn // 物理连接 36 | client bool // 对端是否为客户端 37 | 38 | writeChan chan []byte // 连接的写Chan 39 | flowctrlWriter *flowctrl.Writer // 基于流控的写 40 | 41 | lastReadTime time.Time // 42 | have *Bitset // 已有的Piece 43 | 44 | ourRequests map[uint64]time.Time // What we requested, when we requested it 45 | } 46 | 47 | type peerMessage struct { 48 | peer *peer 49 | message []byte // nil means an error occurred 50 | } 51 | 52 | // newPeer ... 53 | func newPeer(c *PeerConn, speed int64) *peer { 54 | writeChan := make(chan []byte) 55 | return &peer{ 56 | taskID: c.taskID, 57 | conn: c.conn, 58 | address: c.remoteAddr.String(), 59 | client: c.client, 60 | writeChan: writeChan, 61 | flowctrlWriter: flowctrl.NewWriter(c.conn, speed), 62 | ourRequests: make(map[uint64]time.Time, maxOurRequests), 63 | } 64 | } 65 | 66 | // Close ... 67 | func (p *peer) Close() { 68 | common.LOG.Infof("[%s] Closing connection to %s", p.taskID, p.address) 69 | p.conn.Close() 70 | //close(p.writeChan) 71 | } 72 | 73 | func (p *peer) sendMessage(b []byte) { 74 | p.writeChan <- b 75 | } 76 | 77 | func (p *peer) keepAlive() { 78 | p.sendMessage([]byte{}) 79 | } 80 | 81 | // This func is designed to be run as a goroutine. It 82 | // listens for messages on a channel and sends them to a peer. 83 | func (p *peer) peerWriter(errorChan chan peerMessage) { 84 | common.LOG.Infof("[%s] Writing messages to peer[%s]", p.taskID, p.address) 85 | var lastWriteTime time.Time 86 | 87 | for msg := range p.writeChan { 88 | now := time.Now() 89 | if len(msg) == 0 { 90 | // This is a keep-alive message. 91 | if now.Sub(lastWriteTime) < 2*time.Minute { 92 | continue 93 | } 94 | common.LOG.Tracef("[%s] Sending keep alive to peer[%s]", p.taskID, p.address) 95 | } 96 | lastWriteTime = now 97 | 98 | //common.LOG.Debugf("[%s] Sending message to peer[%s], length=%v", p.taskID, p.address, uint32(len(msg))) 99 | err := writeNBOUint32(p.flowctrlWriter, uint32(len(msg))) 100 | if err != nil { 101 | common.LOG.Error(err) 102 | break 103 | } 104 | _, err = p.flowctrlWriter.Write(msg) 105 | if err != nil { 106 | common.LOG.Errorf("[%s] Failed to write a message to peer[%s], length=%v, err=%v", p.taskID, p.address, len(msg), err) 107 | break 108 | } 109 | } 110 | 111 | common.LOG.Infof("[%s] Exiting Writing messages to peer[%s]", p.taskID, p.address) 112 | errorChan <- peerMessage{p, nil} 113 | } 114 | 115 | // This func is designed to be run as a goroutine. It 116 | // listens for messages from the peer and forwards them to a channel. 117 | func (p *peer) peerReader(msgChan chan peerMessage) { 118 | common.LOG.Infof("[%s] Reading messages from peer[%s]", p.taskID, p.address) 119 | for { 120 | var n uint32 121 | n, err := readNBOUint32(p.conn) 122 | if err != nil { 123 | break 124 | } 125 | if n > maxBlockLen { 126 | common.LOG.Error("[", p.taskID, "] Message size too large: ", n) 127 | break 128 | } 129 | 130 | var buf []byte 131 | if n == 0 { 132 | // keep-alive - we want an empty message 133 | buf = make([]byte, 1) 134 | } else { 135 | buf = make([]byte, n) 136 | } 137 | 138 | _, err = io.ReadFull(p.conn, buf) 139 | if err != nil { 140 | break 141 | } 142 | msgChan <- peerMessage{p, buf} 143 | } 144 | 145 | msgChan <- peerMessage{p, nil} 146 | common.LOG.Infof("[%s] Exiting reading messages from peer[%s]", p.taskID, p.address) 147 | } 148 | 149 | // SendBitfield 发送位图 150 | func (p *peer) SendBitfield(bs *Bitset) { 151 | msg := make([]byte, len(bs.Bytes())+1) 152 | msg[0] = BITFIELD 153 | copy(msg[1:], bs.Bytes()) 154 | common.LOG.Tracef("[%s] send BITFIELD to peer[%s]", p.taskID, p.address) 155 | p.sendMessage(msg) 156 | } 157 | 158 | // SendHave ... 159 | func (p *peer) SendHave(piece uint32) { 160 | haveMsg := make([]byte, 5) 161 | haveMsg[0] = HAVE 162 | uint32ToBytes(haveMsg[1:5], piece) 163 | common.LOG.Tracef("[%s] send HAVE to peer[%s], piece=%v", p.taskID, p.address, piece) 164 | p.sendMessage(haveMsg) 165 | } 166 | 167 | // SendRequest ... 168 | func (p *peer) SendRequest(piece, begin, length int) { 169 | req := make([]byte, 13) 170 | req[0] = byte(REQUEST) 171 | uint32ToBytes(req[1:5], uint32(piece)) 172 | uint32ToBytes(req[5:9], uint32(begin)) 173 | uint32ToBytes(req[9:13], uint32(length)) 174 | requestIndex := (uint64(piece) << 32) | uint64(begin) 175 | 176 | p.ourRequests[requestIndex] = time.Now() 177 | common.LOG.Tracef("[%s] send REQUEST to peer[%s], piece=%v, begin=%v, length=%v", 178 | p.taskID, p.address, piece, begin, length) 179 | p.sendMessage(req) 180 | } 181 | -------------------------------------------------------------------------------- /p2p/piece.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "crypto/sha1" 5 | "errors" 6 | "fmt" 7 | "runtime" 8 | ) 9 | 10 | const ( 11 | // 每个Piece分成多个Block,每次下载块的大小 12 | standardBlockLen = 32 * 1024 13 | 14 | // 最大块的长度 15 | maxBlockLen = 128 * 1024 16 | ) 17 | 18 | type chunk struct { 19 | i int64 20 | data []byte 21 | } 22 | 23 | func countPieces(totalSize, pieceLen int64) (totalPieces, lastPieceLength int) { 24 | totalPieces = int(totalSize / pieceLen) 25 | lastPieceLength = int(totalSize % pieceLen) 26 | if lastPieceLength == 0 { // last piece is a full piece 27 | lastPieceLength = int(pieceLen) 28 | } else { 29 | totalPieces++ 30 | } 31 | return 32 | } 33 | 34 | // 根据元数据信息,在文件中检查已下载的位图信息,有多少好的Piece,有多少块的Piece 35 | func checkPieces(fs FileStore, totalLength int64, m *MetaInfo) (good, bad int, goodBits *Bitset, err error) { 36 | pieceLen := m.PieceLen 37 | totalPieces, _ := countPieces(totalLength, pieceLen) 38 | goodBits = NewBitset(int(totalPieces)) 39 | ref := m.Pieces 40 | refLen := len(ref) 41 | if refLen != totalPieces*sha1.Size { 42 | err = errors.New(fmt.Sprint("Incorrect MetaInfo.Pieces length ", totalPieces*sha1.Size, "actual length ", refLen)) 43 | return 44 | } 45 | currentSums, err := computeSums(fs, totalLength, pieceLen) 46 | if err != nil { 47 | return 48 | } 49 | for i := 0; i < totalPieces; i++ { 50 | base := i * sha1.Size 51 | end := base + sha1.Size 52 | if checkEqual([]byte(ref[base:end]), currentSums[base:end]) { 53 | good++ 54 | goodBits.Set(int(i)) 55 | } else { 56 | bad++ 57 | } 58 | } 59 | return 60 | } 61 | 62 | // computeSums reads the file content and computes the SHA1 hash for each 63 | // piece. Spawns parallel goroutines to compute the hashes, since each 64 | // computation takes ~30ms. 65 | func computeSums(fs FileStore, totalLength int64, pieceLength int64) (sums []byte, err error) { 66 | // Calculate the SHA1 hash for each piece in parallel goroutines. 67 | hashes := make(chan chunk) 68 | results := make(chan chunk, 3) 69 | for i := 0; i < runtime.GOMAXPROCS(0); i++ { 70 | go hashPiece(hashes, results) 71 | } 72 | 73 | // Read file content and send to "pieces", keeping order. 74 | numPieces := (totalLength + pieceLength - 1) / pieceLength 75 | go func() { 76 | for i := int64(0); i < numPieces; i++ { 77 | piece := make([]byte, pieceLength, pieceLength) 78 | if i == numPieces-1 { 79 | piece = piece[0 : totalLength-i*pieceLength] 80 | } 81 | // Ignore errors. 82 | _, _ = fs.ReadAt(piece, i*pieceLength) 83 | hashes <- chunk{i: i, data: piece} 84 | } 85 | close(hashes) 86 | }() 87 | 88 | // Merge back the results. 89 | sums = make([]byte, sha1.Size*numPieces) 90 | for i := int64(0); i < numPieces; i++ { 91 | h := <-results 92 | copy(sums[h.i*sha1.Size:], h.data) 93 | } 94 | return 95 | } 96 | 97 | func hashPiece(h chan chunk, result chan chunk) { 98 | hasher := sha1.New() 99 | for piece := range h { 100 | hasher.Reset() 101 | _, err := hasher.Write(piece.data) 102 | if err != nil { 103 | result <- chunk{piece.i, nil} 104 | } else { 105 | result <- chunk{piece.i, hasher.Sum(nil)} 106 | } 107 | } 108 | } 109 | 110 | func computePieceSum(fs FileStore, totalLength int64, pieceLength int64, pieceIndex int) (sum []byte, piece []byte, err error) { 111 | numPieces := (totalLength + pieceLength - 1) / pieceLength 112 | hasher := sha1.New() 113 | piece = make([]byte, pieceLength) 114 | if int64(pieceIndex) == numPieces-1 { 115 | piece = piece[0 : totalLength-int64(pieceIndex)*pieceLength] 116 | } 117 | _, err = fs.ReadAt(piece, int64(pieceIndex)*pieceLength) 118 | if err != nil { 119 | return 120 | } 121 | _, err = hasher.Write(piece) 122 | if err != nil { 123 | return 124 | } 125 | sum = hasher.Sum(nil) 126 | return 127 | } 128 | 129 | func checkPiece(fs FileStore, totalLength int64, m *MetaInfo, pieceIndex int) (good bool, piece []byte, err error) { 130 | ref := m.Pieces 131 | var currentSum []byte 132 | currentSum, piece, err = computePieceSum(fs, totalLength, m.PieceLen, pieceIndex) 133 | if err != nil { 134 | return 135 | } 136 | base := pieceIndex * sha1.Size 137 | end := base + sha1.Size 138 | refSha1 := []byte(ref[base:end]) 139 | good = checkEqual(refSha1, currentSum) 140 | if !good { 141 | err = fmt.Errorf("reference sha1: %v != piece sha1: %v", refSha1, currentSum) 142 | } 143 | return 144 | } 145 | 146 | // ActivePiece 正在下载的Piece 147 | type ActivePiece struct { 148 | downloaderCount []int // -1 means piece is already downloaded 149 | pieceLength int 150 | } 151 | 152 | // NewActivePiece ... 153 | func NewActivePiece(pieceLength int) *ActivePiece { 154 | pieceCount := (pieceLength + standardBlockLen - 1) / standardBlockLen 155 | return &ActivePiece{make([]int, pieceCount), pieceLength} 156 | } 157 | 158 | func (a *ActivePiece) chooseBlockToDownload(endgame bool) (index int) { 159 | if endgame { 160 | return a.chooseBlockToDownloadEndgame() 161 | } 162 | return a.chooseBlockToDownloadNormal() 163 | } 164 | 165 | func (a *ActivePiece) chooseBlockToDownloadNormal() (index int) { 166 | for i, v := range a.downloaderCount { 167 | if v == 0 { 168 | a.downloaderCount[i]++ 169 | return i 170 | } 171 | } 172 | return -1 173 | } 174 | 175 | func (a *ActivePiece) chooseBlockToDownloadEndgame() (index int) { 176 | index, minCount := -1, -1 177 | for i, v := range a.downloaderCount { 178 | if v >= 0 && (minCount == -1 || minCount > v) { 179 | index, minCount = i, v 180 | } 181 | } 182 | if index > -1 { 183 | a.downloaderCount[index]++ 184 | } 185 | return 186 | } 187 | 188 | func (a *ActivePiece) recordBlock(index int) (requestCount int) { 189 | requestCount = a.downloaderCount[index] 190 | a.downloaderCount[index] = -1 191 | return 192 | } 193 | 194 | func (a *ActivePiece) isComplete() bool { 195 | for _, v := range a.downloaderCount { 196 | if v != -1 { 197 | return false 198 | } 199 | } 200 | return true 201 | } 202 | -------------------------------------------------------------------------------- /p2p/report.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | 7 | "github.com/xtfly/gofd/common" 8 | ) 9 | 10 | type reportInfo struct { 11 | serverAddr string 12 | percentComplete float32 13 | } 14 | 15 | type reporter struct { 16 | taskID string 17 | cfg *common.Config 18 | client *http.Client 19 | 20 | reportChan chan *reportInfo 21 | } 22 | 23 | func newReporter(taskID string, cfg *common.Config) *reporter { 24 | r := &reporter{ 25 | taskID: taskID, 26 | cfg: cfg, 27 | client: common.CreateHTTPClient(cfg), 28 | reportChan: make(chan *reportInfo, 20), 29 | } 30 | 31 | go r.run() 32 | return r 33 | } 34 | 35 | func (r *reporter) run() { 36 | for rc := range r.reportChan { 37 | r.reportImp(rc) 38 | } 39 | } 40 | 41 | func (r *reporter) DoReport(serverAddr string, pecent float32) { 42 | r.reportChan <- &reportInfo{serverAddr: serverAddr, percentComplete: pecent} 43 | } 44 | 45 | func (r *reporter) Close() { 46 | close(r.reportChan) 47 | } 48 | 49 | func (r *reporter) reportImp(ri *reportInfo) { 50 | if int(ri.percentComplete) == 100 { 51 | common.LOG.Infof("[%s] Report session status... completed", r.taskID) 52 | } 53 | csr := &StatusReport{ 54 | TaskID: r.taskID, 55 | IP: r.cfg.Net.IP, 56 | PercentComplete: ri.percentComplete, 57 | } 58 | bs, err := json.Marshal(csr) 59 | if err != nil { 60 | common.LOG.Errorf("[%s] Report session status failed. error=%v", r.taskID, err) 61 | return 62 | } 63 | 64 | _, err = common.SendHTTPReq(r.cfg, "POST", 65 | ri.serverAddr, "/api/v1/server/tasks/status", bs) 66 | if err != nil { 67 | common.LOG.Errorf("[%s] Report session status failed. error=%v", r.taskID, err) 68 | } 69 | return 70 | } 71 | -------------------------------------------------------------------------------- /p2p/session.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "math/rand" 8 | "net" 9 | "path/filepath" 10 | "time" 11 | 12 | "github.com/xtfly/gofd/common" 13 | "github.com/xtfly/gokits/gfile" 14 | ) 15 | 16 | const ( 17 | // 同一地址最大连接次数 18 | maxRetryConnectTimes = 10 19 | ) 20 | 21 | // TaskSession ... 22 | type TaskSession struct { 23 | // 全局信息 24 | g *global 25 | 26 | // 任务信息 27 | taskID string 28 | task *DispatchTask 29 | fileStore FileStore 30 | 31 | // 下载过程中的Pieces信息 32 | pieceSet *Bitset // 本节点已存在Piece 33 | totalPieces int // 整个Piece个数 34 | totalSize int64 // 所有文件大小 35 | lastPieceLength int // 最一块Piece的长度 36 | goodPieces int // 已下载的Piece个数 37 | downloaded uint64 // 已下载的字节数 38 | checkPieceTime float64 // 检查Piece所花费的时间累计 39 | 40 | // 正在下载的Piece 41 | activePieces map[int]*ActivePiece 42 | 43 | // Peer信息 44 | addPeerChan chan *PeerConn 45 | startChan chan *StartTask 46 | peers map[string]*peer 47 | peerMessageChan chan peerMessage 48 | 49 | // 重新连接定时器 50 | retryConnTimeChan <-chan time.Time 51 | indexInChain int 52 | connFailCount int 53 | 54 | // 55 | quitChan chan struct{} 56 | endedChan chan struct{} 57 | stopSessChan chan string // sessionmgnt 58 | 59 | // 60 | reportor *reporter 61 | reportStep int 62 | 63 | // 64 | initedAt time.Time 65 | startAt time.Time 66 | finishedAt time.Time 67 | } 68 | 69 | // NewTaskSession ... 70 | func NewTaskSession(g *global, dt *DispatchTask, stopSessChan chan string) (s *TaskSession, err error) { 71 | s = &TaskSession{ 72 | g: g, 73 | taskID: dt.TaskID, 74 | task: dt, 75 | 76 | activePieces: make(map[int]*ActivePiece), 77 | peers: make(map[string]*peer), 78 | 79 | addPeerChan: make(chan *PeerConn, 5), // 不要阻塞 80 | startChan: make(chan *StartTask), 81 | peerMessageChan: make(chan peerMessage, 5), 82 | 83 | quitChan: make(chan struct{}), 84 | endedChan: make(chan struct{}), 85 | 86 | stopSessChan: stopSessChan, 87 | reportor: newReporter(dt.TaskID, g.cfg), 88 | } 89 | return 90 | } 91 | 92 | func (s *TaskSession) init() error { 93 | common.LOG.Infof("[%s] Initing p2p session...", s.taskID) 94 | fileSystem, err := s.g.fsProvider.NewFS() 95 | if err != nil { 96 | return err 97 | } 98 | 99 | // 初始化存储 100 | m := s.task.MetaInfo 101 | s.fileStore, s.totalSize, err = NewFileStore(m, fileSystem) 102 | if err != nil { 103 | return err 104 | } 105 | 106 | s.totalPieces, s.lastPieceLength = countPieces(s.totalSize, m.PieceLen) 107 | return nil 108 | } 109 | 110 | func (s *TaskSession) initInServer() error { 111 | if err := s.init(); err != nil { 112 | return err 113 | } 114 | 115 | s.goodPieces = int(s.totalPieces) 116 | // 标识服务端都是下载完成的 117 | s.pieceSet = NewBitset(s.goodPieces) 118 | for index := 0; index < s.goodPieces; index++ { 119 | s.pieceSet.Set(index) 120 | } 121 | 122 | common.LOG.Infof("[%s] Inited p2p server session", s.taskID) 123 | s.initedAt = time.Now() 124 | return nil 125 | } 126 | 127 | func (s *TaskSession) initInClient() error { 128 | // 客户端与服务端的下载路径不同,修改路径 129 | exsited := false 130 | for _, fd := range s.task.MetaInfo.Files { 131 | fd.Path = s.g.cfg.DownDir 132 | exsited = gfile.FileExist(filepath.Join(s.g.cfg.DownDir, fd.Name)) 133 | } 134 | 135 | if err := s.init(); err != nil { 136 | return err 137 | } 138 | 139 | //计算已经下载的块信息 140 | if exsited { 141 | var err error 142 | start := time.Now() 143 | s.goodPieces, _, s.pieceSet, err = checkPieces(s.fileStore, s.totalSize, s.task.MetaInfo) 144 | end := time.Now() 145 | s.checkPieceTime += end.Sub(start).Seconds() 146 | common.LOG.Infof("[%s] Computed missing pieces: total(%v), good(%v) (%.2f seconds)", s.taskID, 147 | s.totalPieces, s.goodPieces, s.checkPieceTime) 148 | if err != nil { 149 | return err 150 | } 151 | } else { 152 | s.pieceSet = NewBitset(s.totalPieces) 153 | s.goodPieces = 0 154 | } 155 | 156 | common.LOG.Infof("[%s] Inited p2p client session", s.taskID) 157 | s.initedAt = time.Now() 158 | return nil 159 | } 160 | 161 | func (s *TaskSession) initPeersBitset() { 162 | // Enlarge any existing peers piece maps 163 | for _, p := range s.peers { 164 | if p.have.n != s.totalPieces { 165 | if p.have.n != 0 { 166 | common.LOG.Error("Expected p.have.n == 0") 167 | panic("Expected p.have.n == 0") 168 | } 169 | p.have = NewBitset(s.totalPieces) 170 | } 171 | } 172 | } 173 | 174 | // Start ... 175 | func (s *TaskSession) Start(st *StartTask) { 176 | s.startChan <- st 177 | } 178 | 179 | func (s *TaskSession) startImp(st *StartTask) { 180 | if s.g.cfg.Server { 181 | s.startAt = time.Now() 182 | return 183 | } 184 | 185 | if s.totalPieces == s.goodPieces { 186 | // 本地文件的Piece与Block都下载完成,不再需要下载 187 | common.LOG.Infof("[%s] All piece has already download.", s.taskID) 188 | go s.reportStatus(float32(100)) 189 | return 190 | } 191 | 192 | common.LOG.Infof("[%s] Starting p2p session...", s.taskID) 193 | // 更新路径 194 | s.task.LinkChain = st.LinkChain 195 | 196 | // 找到分发路径中位置 197 | net := s.g.cfg.Net 198 | self := fmt.Sprintf("%s:%v", net.IP, net.DataPort) 199 | addrs := s.task.LinkChain.DispatchAddrs 200 | count := len(addrs) 201 | for idx := count - 1; idx > 0; idx-- { 202 | if self == addrs[idx] { 203 | s.indexInChain = idx - 1 204 | break 205 | } 206 | } 207 | 208 | // 尝试与上一个节点建立连接 209 | s.tryNewPeer() 210 | s.initPeersBitset() 211 | s.startAt = time.Now() 212 | common.LOG.Infof("[%s] Started p2p client session", s.taskID) 213 | } 214 | 215 | // 寻找可用的地址并连接 216 | func (s *TaskSession) tryNewPeer() { 217 | addrs := s.task.LinkChain.DispatchAddrs 218 | if s.connFailCount >= maxRetryConnectTimes { 219 | s.indexInChain-- 220 | } 221 | if s.indexInChain < 0 { 222 | s.indexInChain = 0 223 | } 224 | peer := addrs[s.indexInChain] 225 | s.connectToPeer(peer) 226 | } 227 | 228 | // 连接其它的Peer 229 | func (s *TaskSession) connectToPeer(peer string) error { 230 | common.LOG.Debugf("[%s] Try connect to peer[%s]", s.taskID, peer) 231 | conn, err := net.DialTimeout("tcp", peer, 1*time.Second) 232 | if err != nil { 233 | common.LOG.Errorf("[%s] Failed to connect to peer[%s], error=%v", s.taskID, peer, err) 234 | conn.Close() 235 | s.connFailCount++ 236 | s.retryConnTimeChan = time.After(50 * time.Microsecond) 237 | return err 238 | } 239 | 240 | // 发送消息头,用于认证 241 | err = writePHeader(conn, s.taskID, s.g.cfg) 242 | if err != nil { 243 | common.LOG.Errorf("[%s] Failed to send header to peer[%s], error=%v", s.taskID, peer, err) 244 | conn.Close() 245 | s.indexInChain-- //连接下一个 246 | s.retryConnTimeChan = time.After(50 * time.Microsecond) 247 | return err 248 | } 249 | 250 | // 阻塞接收响应 251 | bs := make([]byte, 1) 252 | _, err = conn.Read(bs) 253 | if err != nil { 254 | // 认证通过了,但没有返回正确的响应,Peer还没创建对应Task的Session 255 | common.LOG.Errorf("[%s] Failed to reading header from peer[%s], error=%v", s.taskID, peer, err) 256 | conn.Close() 257 | s.retryConnTimeChan = time.After(50 * time.Microsecond) 258 | return err 259 | } 260 | 261 | s.connFailCount = 0 262 | common.LOG.Infof("[%s] Success to connect to peer[%s]", s.taskID, peer) 263 | p2pconn := &PeerConn{ 264 | conn: conn, 265 | client: false, // 对端是Server 266 | remoteAddr: conn.RemoteAddr(), 267 | taskID: s.taskID, 268 | } 269 | 270 | s.addPeerImp(p2pconn) 271 | return nil 272 | } 273 | 274 | // AcceptNewPeer 接入其它的Peer连接 275 | func (s *TaskSession) AcceptNewPeer(c *PeerConn) { 276 | // 先回一个连接响应 277 | _, err := c.conn.Write([]byte{byte(0xFF)}) 278 | if err != nil { 279 | common.LOG.Errorf("[%s] Write connection init response to peer[%s] failed", s.taskID, c.remoteAddr.String()) 280 | return 281 | } 282 | s.addPeerChan <- c 283 | } 284 | 285 | // 处理连接到其它成功的Peer,或者是其它Peer的接入 286 | func (s *TaskSession) addPeerImp(c *PeerConn) { 287 | peerAddr := c.remoteAddr.String() 288 | common.LOG.Infof("[%s] Add new peer, peer[%s]", c.taskID, peerAddr) 289 | // 创建一个Peer对象 290 | ps := newPeer(c, s.task.Speed) 291 | 292 | // 位图 293 | ps.have = NewBitset(s.totalPieces) 294 | s.peers[peerAddr] = ps 295 | 296 | // 一个从连接上写消息,或读消息 297 | go ps.peerWriter(s.peerMessageChan) 298 | go ps.peerReader(s.peerMessageChan) 299 | 300 | // 连接建立之后, 把自己的位置信息给对端 301 | if s.pieceSet != nil { 302 | ps.SendBitfield(s.pieceSet) 303 | } 304 | } 305 | 306 | // 关闭Peer 307 | func (s *TaskSession) closePeerAndTryReconn(peer *peer) { 308 | s.ClosePeer(peer) 309 | if !peer.client { 310 | s.tryNewPeer() 311 | } 312 | } 313 | 314 | // ClosePeer 关闭Peer 315 | func (s *TaskSession) ClosePeer(peer *peer) { 316 | peer.Close() 317 | s.removeRequests(peer) 318 | delete(s.peers, peer.address) 319 | } 320 | 321 | // 删除REQUEST信息 322 | func (s *TaskSession) removeRequests(p *peer) (err error) { 323 | for k := range p.ourRequests { 324 | piece := int(k >> 32) 325 | begin := int(k & 0xffffffff) 326 | block := begin / standardBlockLen 327 | common.LOG.Infof("[%s] Forgetting we requested block %v.%v", s.taskID, piece, block) 328 | s.removeRequest(piece, block) 329 | } 330 | p.ourRequests = make(map[uint64]time.Time, maxOurRequests) 331 | return 332 | } 333 | 334 | // 删除REQUEST信息 335 | func (s *TaskSession) removeRequest(piece, block int) { 336 | v, ok := s.activePieces[piece] 337 | if ok && v.downloaderCount[block] > 0 { 338 | v.downloaderCount[block]-- 339 | } 340 | } 341 | 342 | // 接收Peer消息并发送消息 343 | func (s *TaskSession) doMessage(p *peer, message []byte) (err error) { 344 | if message == nil { 345 | return io.EOF // The reader or writer goroutine has exited 346 | } 347 | 348 | if len(message) == 0 { // keep alive 349 | return 350 | } 351 | 352 | err = s.generalMessage(message, p) 353 | return 354 | } 355 | 356 | func (s *TaskSession) generalMessage(message []byte, p *peer) (err error) { 357 | messageID := message[0] 358 | 359 | switch messageID { 360 | case HAVE: // 处理Peer发送过来的HAVE消息 361 | common.LOG.Tracef("[%s] Recv HAVE from peer[%s] ", p.taskID, p.address) 362 | if len(message) != 5 { 363 | return errors.New("Unexpected length") 364 | } 365 | n := bytesToUint32(message[1:]) 366 | if n >= uint32(p.have.n) { 367 | return errors.New("have index is out of range") 368 | } 369 | p.have.Set(int(n)) 370 | if !p.client { 371 | for i := 0; i < maxOurRequests; i++ { 372 | s.requestBlock(p) // 向请此Peer上请求发送块 373 | } 374 | } 375 | case BITFIELD: // 处理Peer发送过来的BITFIELD消息 376 | common.LOG.Tracef("[%s] Recv BITFIELD from peer[%s] isclient=%v", p.taskID, p.address, p.client) 377 | p.have = NewBitsetFromBytes(s.totalPieces, message[1:]) 378 | if p.have == nil { 379 | return errors.New("Invalid bitfield data") 380 | } 381 | if !p.client { 382 | s.requestBlock(p) // 向Server Peer请求发送块 383 | } 384 | case REQUEST: // 处理Peer发送过来的REQUEST消息 385 | common.LOG.Tracef("[%s] Recv REQUEST from peer[%s] ", p.taskID, p.address) 386 | index, begin, length, err := s.decodeRequest(message, p) 387 | if err != nil { 388 | return err 389 | } 390 | return s.sendPiece(p, index, begin, length) 391 | case PIECE: // 处理Peer发送过来的PIECE消息 392 | common.LOG.Tracef("[%s] Recv PIECE from peer[%s]", p.taskID, p.address) 393 | index, begin, length, err := s.decodePiece(message, p) 394 | if err != nil { 395 | return err 396 | } 397 | 398 | if s.pieceSet.IsSet(int(index)) { 399 | common.LOG.Debugf("[%s] Recv PIECE from peer[%s] is already", p.taskID, p.address) 400 | err = s.requestBlock(p) 401 | break // 本Peer已存在此Piece,则继续 402 | } 403 | 404 | globalOffset := int64(index)*s.task.MetaInfo.PieceLen + int64(begin) 405 | _, err = s.fileStore.WriteAt(message[9:], globalOffset) 406 | if err != nil { 407 | return err 408 | } 409 | 410 | // 存储块的信息 411 | s.recordBlock(p, index, begin, uint32(length)) 412 | err = s.requestBlock(p) // 继续向此Peer请求发送块信息 413 | default: 414 | return fmt.Errorf("Uknown message id: %d\n", messageID) 415 | } 416 | 417 | return 418 | } 419 | 420 | func (s *TaskSession) decodeRequest(message []byte, p *peer) (index, begin, length uint32, err error) { 421 | if len(message) != 13 { 422 | err = errors.New("Unexpected message length") 423 | return 424 | } 425 | index = bytesToUint32(message[1:5]) 426 | begin = bytesToUint32(message[5:9]) 427 | length = bytesToUint32(message[9:13]) 428 | if index >= uint32(p.have.n) { 429 | err = errors.New("piece out of range") 430 | return 431 | } 432 | if !s.pieceSet.IsSet(int(index)) { 433 | err = errors.New("we don't have that piece") 434 | return 435 | } 436 | if int64(begin) >= s.task.MetaInfo.PieceLen { 437 | err = errors.New("begin out of range") 438 | return 439 | } 440 | if int64(begin)+int64(length) > s.task.MetaInfo.PieceLen { 441 | err = errors.New("begin + length out of range") 442 | return 443 | } 444 | return 445 | } 446 | 447 | // 给Peer发送块消息 448 | func (s *TaskSession) sendPiece(p *peer, index, begin, length uint32) (err error) { 449 | common.LOG.Debugf("[%s] Sending block to peer[%s], index=%v, begin=%v, length=%v", 450 | s.taskID, p.address, index, begin, length) 451 | buf := make([]byte, length+9) 452 | buf[0] = PIECE 453 | uint32ToBytes(buf[1:5], index) 454 | uint32ToBytes(buf[5:9], begin) 455 | _, err = s.fileStore.ReadAt(buf[9:], 456 | int64(index)*s.task.MetaInfo.PieceLen+int64(begin)) 457 | if err != nil { 458 | common.LOG.Errorf("[%s] Read file failed, error=%v", s.taskID, err) 459 | return 460 | } 461 | p.sendMessage(buf) 462 | 463 | return 464 | } 465 | 466 | // 接收块消息 467 | func (s *TaskSession) recordBlock(p *peer, piece, begin, length uint32) (err error) { 468 | block := begin / standardBlockLen 469 | common.LOG.Debugf("[%s] Received block from peer[%s] %v.%v", s.taskID, p.address, piece, block) 470 | 471 | requestIndex := (uint64(piece) << 32) | uint64(begin) 472 | delete(p.ourRequests, requestIndex) 473 | v, ok := s.activePieces[int(piece)] 474 | if !ok { 475 | common.LOG.Debugf("[%s] Received a block we already have from peer[%s], piece=%v.%v", s.taskID, p.address, piece, block) 476 | return 477 | } 478 | 479 | v.recordBlock(int(block)) 480 | s.downloaded += uint64(length) 481 | if !v.isComplete() { 482 | return 483 | } 484 | 485 | // Piece完成下载,清理资源,提交文件 486 | delete(s.activePieces, int(piece)) 487 | start := time.Now() 488 | good, pieceBytes, err := checkPiece(s.fileStore, s.totalSize, s.task.MetaInfo, int(piece)) 489 | s.checkPieceTime += time.Now().Sub(start).Seconds() 490 | if !good || err != nil { 491 | common.LOG.Errorf("[%s] Closing peer[%s] that sent a bad piece=%v, error=%v", s.taskID, p.address, piece, err) 492 | go s.reportStatus(float32(-1)) 493 | p.Close() 494 | return 495 | } 496 | 497 | // 提交文件存储 498 | s.fileStore.Commit(int(piece), pieceBytes, s.task.MetaInfo.PieceLen*int64(piece)) 499 | s.pieceSet.Set(int(piece)) 500 | s.goodPieces++ 501 | 502 | var percentComplete float32 503 | if s.totalPieces > 0 { 504 | percentComplete = float32(s.goodPieces*100) / float32(s.totalPieces) 505 | } 506 | common.LOG.Debugf("[%s] Have %v of %v pieces %v%% complete", s.taskID, s.goodPieces, s.totalPieces, 507 | percentComplete) 508 | if s.goodPieces == s.totalPieces { 509 | s.finishedAt = time.Now() // 下载完成 510 | go s.reportStatus(percentComplete) 511 | } else { 512 | // 减少上报次数,减轻Server的压力 513 | if int(percentComplete) > s.reportStep { 514 | s.reportStep += 10 515 | go s.reportStatus(percentComplete) 516 | } 517 | } 518 | 519 | // 每当客户端下载了一个piece,即将该piece的下标作为have消息的负载构造have消息, 520 | // 并把该消息发送给所有建立连接的Peer。 521 | for _, p := range s.peers { 522 | if p.have != nil && 523 | (int(piece) >= p.have.n || !p.have.IsSet(int(piece))) { 524 | p.SendHave(piece) 525 | } 526 | } 527 | 528 | return 529 | } 530 | 531 | func (s *TaskSession) decodePiece(message []byte, p *peer) (index, begin, length uint32, err error) { 532 | if len(message) < 9 { 533 | err = errors.New("unexpected message length") 534 | return 535 | } 536 | index = bytesToUint32(message[1:5]) 537 | begin = bytesToUint32(message[5:9]) 538 | length = uint32(len(message) - 9) 539 | 540 | if index >= uint32(p.have.n) { 541 | err = errors.New("piece out of range") 542 | return 543 | } 544 | 545 | if int64(begin) >= s.task.MetaInfo.PieceLen { 546 | err = errors.New("begin out of range") 547 | return 548 | } 549 | if int64(begin)+int64(length) > s.task.MetaInfo.PieceLen { 550 | err = errors.New("begin + length out of range") 551 | return 552 | } 553 | if length > maxBlockLen { 554 | err = errors.New("Block length too large") 555 | return 556 | } 557 | return 558 | } 559 | 560 | // 请求下载时,选择一个可用的Piece 561 | func (s *TaskSession) choosePiece(p *peer) (piece int) { 562 | n := s.totalPieces 563 | start := rand.Intn(n) 564 | piece = s.checkRange(p, start, n) 565 | if piece == -1 { 566 | piece = s.checkRange(p, 0, start) 567 | } 568 | return 569 | } 570 | 571 | func (s *TaskSession) checkRange(p *peer, start, end int) (piece int) { 572 | clampedEnd := min(end, min(p.have.n, s.pieceSet.n)) 573 | for i := start; i < clampedEnd; i++ { 574 | // 本Peer没有,但其它Peer存在时 575 | if (!s.pieceSet.IsSet(i)) && p.have.IsSet(i) { 576 | if _, ok := s.activePieces[i]; !ok { 577 | return i 578 | } 579 | } 580 | } 581 | return -1 582 | } 583 | 584 | // 构建请求块(本Peer缺失)信息 585 | func (s *TaskSession) requestBlock(p *peer) (err error) { 586 | for k := range s.activePieces { 587 | if p.have.IsSet(k) { 588 | err = s.requestBlock2(p, k, false) 589 | if err != io.EOF { 590 | return 591 | } 592 | } 593 | } 594 | 595 | // No active pieces. (Or no suitable active pieces.) Pick one 596 | piece := s.choosePiece(p) 597 | if piece < 0 { 598 | for k := range s.activePieces { 599 | if p.have.IsSet(k) { 600 | err = s.requestBlock2(p, k, true) 601 | if err != io.EOF { 602 | return 603 | } 604 | } 605 | } 606 | } 607 | 608 | // 所有piece与block都下载完成了 609 | if piece < 0 { 610 | return 611 | } 612 | 613 | s.activePieces[piece] = NewActivePiece(s.pieceLength(piece)) 614 | return s.requestBlock2(p, piece, false) 615 | 616 | } 617 | 618 | func (s *TaskSession) requestBlock2(p *peer, piece int, endGame bool) (err error) { 619 | v := s.activePieces[piece] 620 | block := v.chooseBlockToDownload(endGame) 621 | if block >= 0 { 622 | s.requestBlockImp(p, piece, block) 623 | } else { 624 | //common.LOG.Debugf("[%s] Request block from peer[%s], EOF", s.taskID, p.address) 625 | return io.EOF 626 | } 627 | return 628 | } 629 | 630 | // Request a block 631 | func (s *TaskSession) requestBlockImp(p *peer, piece int, block int) { 632 | begin := block * standardBlockLen 633 | length := standardBlockLen 634 | if piece == s.totalPieces-1 { 635 | left := s.lastPieceLength - begin 636 | if left < length { 637 | length = left 638 | } 639 | } 640 | 641 | //common.LOG.Tracef("[%s] Requesting block from peer[%s], piece=%v.%v, length=%v", s.taskID, p.address, piece, block, length) 642 | p.SendRequest(piece, begin, length) 643 | return 644 | } 645 | 646 | func (s *TaskSession) pieceLength(piece int) int { 647 | if piece < s.totalPieces-1 { 648 | return int(s.task.MetaInfo.PieceLen) 649 | } 650 | return s.lastPieceLength 651 | } 652 | 653 | // Quit ... 654 | func (s *TaskSession) Quit() { 655 | select { 656 | case s.quitChan <- struct{}{}: 657 | case <-s.endedChan: // 防quit阻塞 658 | } 659 | return 660 | } 661 | 662 | func (s *TaskSession) shutdown() { 663 | for _, peer := range s.peers { 664 | s.ClosePeer(peer) 665 | } 666 | 667 | if s.fileStore != nil { 668 | if err := s.fileStore.Close(); err != nil { 669 | common.LOG.Errorf("[%s] Error closing filestore : %v", s.taskID, err) 670 | } 671 | } 672 | 673 | if s.reportor != nil { 674 | s.reportor.Close() 675 | } 676 | 677 | close(s.endedChan) 678 | return 679 | } 680 | 681 | // Init 初始化 682 | func (s *TaskSession) Init() { 683 | // 开启缓存 684 | if s.fileStore != nil { 685 | cache := s.g.cache.NewCache(s.taskID, s.totalPieces, int(s.task.MetaInfo.PieceLen), s.totalSize) 686 | s.fileStore.SetCache(cache) 687 | } 688 | 689 | if s.g.cfg.Server { 690 | if err := s.initInServer(); err != nil { 691 | common.LOG.Errorf("[%s] Init p2p server session failed, %v", s.taskID, err) 692 | } 693 | } else { 694 | if err := s.initInClient(); err != nil { 695 | common.LOG.Errorf("[%s] Init p2p client session failed, %v", s.taskID, err) 696 | } 697 | } 698 | 699 | keepAliveChan := time.Tick(60 * time.Second) 700 | tickDuration := 2 * time.Second 701 | tickChan := time.Tick(tickDuration) 702 | lastDownloaded := s.downloaded 703 | 704 | for { 705 | select { 706 | case conn := <-s.addPeerChan: 707 | s.addPeerImp(conn) 708 | case st := <-s.startChan: 709 | s.startImp(st) 710 | case pm := <-s.peerMessageChan: 711 | peer, message := pm.peer, pm.message 712 | peer.lastReadTime = time.Now() 713 | err2 := s.doMessage(peer, message) 714 | if err2 != nil { 715 | if err2 != io.EOF { 716 | common.LOG.Error("[", s.taskID, "] Closing peer[", peer.address, "] because ", err2) 717 | s.closePeerAndTryReconn(peer) 718 | } else { 719 | s.ClosePeer(peer) 720 | } 721 | } 722 | case <-keepAliveChan: 723 | if s.timeout() { 724 | // Session超时没有启动,需要stop 725 | s.stopSessChan <- s.taskID 726 | common.LOG.Info("[", s.taskID, "] P2p session is timeout") 727 | } 728 | s.peersKeepAlive() 729 | case <-tickChan: 730 | if !s.g.cfg.Server && s.totalPieces != s.goodPieces { 731 | speed := humanSize(float64(s.downloaded-lastDownloaded) / tickDuration.Seconds()) 732 | lastDownloaded = s.downloaded 733 | common.LOG.Infof("[%s] downloaded: %d(%s/s), pieces: %d/%d, check pieces: (%.2f seconds)", 734 | s.taskID, s.downloaded, speed, s.goodPieces, s.totalPieces, s.checkPieceTime) 735 | } 736 | case <-s.retryConnTimeChan: 737 | s.tryNewPeer() 738 | case <-s.quitChan: 739 | common.LOG.Info("[", s.taskID, "] Quit p2p session") 740 | s.shutdown() 741 | return 742 | } 743 | } 744 | } 745 | 746 | func (s *TaskSession) doCheckRequests(p *peer) (err error) { 747 | now := time.Now() 748 | for k, v := range p.ourRequests { 749 | if now.Sub(v).Seconds() > 30 { 750 | piece := int(k >> 32) 751 | block := int(k&0xffffffff) / standardBlockLen 752 | common.LOG.Error("[", s.taskID, "] Timing out request of ", piece, ".", block) 753 | s.removeRequest(piece, block) 754 | } 755 | } 756 | return 757 | } 758 | 759 | func (s *TaskSession) peersKeepAlive() { 760 | now := time.Now() 761 | for _, peer := range s.peers { 762 | if peer.lastReadTime.Second() != 0 && now.Sub(peer.lastReadTime) > 3*time.Minute { 763 | common.LOG.Error("[", s.taskID, "] Closing peer [", peer.address, "] because timed out") 764 | s.ClosePeer(peer) 765 | continue 766 | } 767 | err2 := s.doCheckRequests(peer) 768 | if err2 != nil { 769 | if err2 != io.EOF { 770 | common.LOG.Error("[", s.taskID, "] Closing peer[", peer.address, "] because", err2) 771 | } 772 | s.ClosePeer(peer) 773 | continue 774 | } 775 | peer.keepAlive() 776 | } 777 | } 778 | 779 | // 检查是否超时了 780 | func (s *TaskSession) timeout() bool { 781 | now := time.Now() 782 | if s.startAt.IsZero() && now.Sub(s.initedAt) >= 3*time.Minute { 783 | return true 784 | } 785 | 786 | if !s.finishedAt.IsZero() && now.Sub(s.finishedAt) >= 3*time.Minute { 787 | return true 788 | } 789 | return false 790 | } 791 | 792 | func (s *TaskSession) reportStatus(pecent float32) { 793 | s.reportor.DoReport(s.task.LinkChain.ServerAddr, pecent) 794 | } 795 | -------------------------------------------------------------------------------- /p2p/sessionmgnt.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "github.com/xtfly/gofd/common" 5 | ) 6 | 7 | type global struct { 8 | cfg *common.Config // 全局配置 9 | 10 | fsProvider FsProvider // 读取文件 11 | cache CacheProvider // 用于缓存块信息 12 | } 13 | 14 | // TaskSessionMgnt ... 15 | type TaskSessionMgnt struct { 16 | g *global // 17 | 18 | quitChan chan struct{} // 退出 19 | 20 | createSessChan chan *DispatchTask // 要创建的Task 21 | startSessChan chan *StartTask // 22 | stopSessChan chan string // 要关闭的Task 23 | sessions map[string]*TaskSession // 24 | } 25 | 26 | // NewSessionMgnt ... 27 | func NewSessionMgnt(cfg *common.Config) *TaskSessionMgnt { 28 | return &TaskSessionMgnt{ 29 | g: &global{ 30 | cfg: cfg, 31 | fsProvider: OsFsProvider{}, 32 | cache: NewRAMCacheProvider(cfg.Control.CacheSize), 33 | }, 34 | quitChan: make(chan struct{}, 1), 35 | createSessChan: make(chan *DispatchTask, cfg.Control.MaxActive), 36 | startSessChan: make(chan *StartTask, cfg.Control.MaxActive), 37 | stopSessChan: make(chan string, 1), 38 | sessions: make(map[string]*TaskSession, 10), 39 | } 40 | } 41 | 42 | // Start 启动监控 43 | func (sm *TaskSessionMgnt) Start() error { 44 | conChan, listener, err := StartListen(sm.g.cfg) 45 | if err != nil { 46 | common.LOG.Error("Couldn't listen for peers connection: ", err) 47 | return err 48 | } 49 | defer listener.Close() 50 | 51 | for { 52 | select { 53 | case task := <-sm.createSessChan: 54 | if ts, err := NewTaskSession(sm.g, task, sm.stopSessChan); err != nil { 55 | common.LOG.Error("Could not create p2p task session.", err) 56 | } else { 57 | common.LOG.Infof("[%s] Created p2p task session", task.TaskID) 58 | sm.sessions[ts.taskID] = ts 59 | go func(s *TaskSession) { 60 | s.Init() 61 | }(ts) 62 | } 63 | case task := <-sm.startSessChan: 64 | if ts, ok := sm.sessions[task.TaskID]; ok { 65 | ts.Start(task) 66 | } else { 67 | common.LOG.Errorf("[%s] Not find p2p task session", task.TaskID) 68 | } 69 | case taskID := <-sm.stopSessChan: 70 | common.LOG.Infof("[%s] Stop p2p task session", taskID) 71 | if ts, ok := sm.sessions[taskID]; ok { 72 | delete(sm.sessions, taskID) 73 | ts.Quit() 74 | } 75 | case <-sm.quitChan: 76 | for _, ts := range sm.sessions { 77 | go ts.Quit() 78 | } 79 | common.LOG.Info("Closed all sessions") 80 | return nil 81 | case c := <-conChan: 82 | common.LOG.Infof("[%s] New p2p connection, peer addr %s", c.taskID, c.remoteAddr.String()) 83 | if ts, ok := sm.sessions[c.taskID]; ok { 84 | ts.AcceptNewPeer(c) 85 | } else { 86 | common.LOG.Errorf("[%s] Not find p2p task session", c.taskID) 87 | if err := c.conn.Close(); err != nil { // TODO让客户端重连 88 | common.LOG.Errorf("[%s] Close connection failed, connect %v", c.taskID, c.conn.RemoteAddr()) 89 | } 90 | } 91 | } 92 | } 93 | } 94 | 95 | // Stop 停止所有的任务,并退出监控 96 | func (sm *TaskSessionMgnt) Stop() { 97 | sm.quitChan <- struct{}{} 98 | } 99 | 100 | // CreateTask 创建一个任务 101 | func (sm *TaskSessionMgnt) CreateTask(dt *DispatchTask) { 102 | go func(dt *DispatchTask) { 103 | sm.createSessChan <- dt 104 | }(dt) 105 | } 106 | 107 | // StartTask 启动一个任务 108 | func (sm *TaskSessionMgnt) StartTask(st *StartTask) { 109 | go func(st *StartTask) { 110 | sm.startSessChan <- st 111 | }(st) 112 | } 113 | 114 | // StopTask 停止一下任务 115 | func (sm *TaskSessionMgnt) StopTask(taskID string) { 116 | go func(taskID string) { 117 | sm.stopSessChan <- taskID 118 | }(taskID) 119 | } 120 | -------------------------------------------------------------------------------- /p2p/utils.go: -------------------------------------------------------------------------------- 1 | package p2p 2 | 3 | import ( 4 | "encoding/binary" 5 | "fmt" 6 | "io" 7 | ) 8 | 9 | func checkEqual(ref, current []byte) bool { 10 | for i := 0; i < len(current); i++ { 11 | if ref[i] != current[i] { 12 | return false 13 | } 14 | } 15 | return true 16 | } 17 | 18 | func min(a, b int) int { 19 | if a < b { 20 | return a 21 | } 22 | return b 23 | } 24 | 25 | func uint32ToBytes(buf []byte, n uint32) { 26 | binary.BigEndian.PutUint32(buf, n) 27 | } 28 | 29 | func bytesToUint32(buf []byte) uint32 { 30 | return binary.BigEndian.Uint32(buf) 31 | } 32 | 33 | func writeNBOUint32(w io.Writer, n uint32) (err error) { 34 | buf := make([]byte, 4) 35 | uint32ToBytes(buf, n) 36 | _, err = w.Write(buf[0:]) 37 | return 38 | } 39 | 40 | func readNBOUint32(r io.Reader) (n uint32, err error) { 41 | var buf [4]byte 42 | _, err = io.ReadFull(r, buf[0:]) 43 | if err != nil { 44 | return 45 | } 46 | n = bytesToUint32(buf[0:]) 47 | return 48 | } 49 | 50 | func humanSize(value float64) string { 51 | switch { 52 | case value > 1<<30: 53 | return fmt.Sprintf("%.2f GB", value/(1<<30)) 54 | case value > 1<<20: 55 | return fmt.Sprintf("%.2f MB", value/(1<<20)) 56 | case value > 1<<10: 57 | return fmt.Sprintf("%.2f kB", value/(1<<10)) 58 | } 59 | return fmt.Sprintf("%.2f B", value) 60 | } 61 | -------------------------------------------------------------------------------- /server/api.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | //---------------------------------------- 4 | import "time" 5 | 6 | // CreateTask 创建分发任务 7 | type CreateTask struct { 8 | ID string `json:"id"` 9 | DispatchFiles []string `json:"dispatchFiles"` 10 | DestIPs []string `json:"destIPs"` 11 | } 12 | 13 | // TaskInfo 查询分发任务 14 | type TaskInfo struct { 15 | ID string `json:"id"` 16 | Status string `json:"status"` 17 | 18 | StartedAt time.Time `json:"startedAt"` 19 | FinishedAt time.Time `json:"finishedAt"` 20 | 21 | DispatchInfos map[string]*DispatchInfo `json:"dispatchInfos,omitempty"` 22 | } 23 | 24 | // DispatchInfo 单个IP的分发信息 25 | type DispatchInfo struct { 26 | Status string `json:"status"` 27 | PercentComplete float32 `json:"percentComplete"` 28 | 29 | StartedAt time.Time `json:"startedAt"` 30 | FinishedAt time.Time `json:"finishedAt"` 31 | 32 | DispatchFiles []*DispatchFile `json:"dispatchFiles"` 33 | } 34 | 35 | // DispatchFile 单个文件分发状态 36 | type DispatchFile struct { 37 | FileName string `json:"filename"` 38 | PercentComplete float32 `json:"-"` 39 | } 40 | 41 | // TaskStatus 任务状态 42 | type TaskStatus int 43 | 44 | // the enum of TaskStatus 45 | const ( 46 | TaskNotExist TaskStatus = iota 47 | TaskExist 48 | TaskInit 49 | TaskFailed 50 | TaskCompleted 51 | TaskInProgress 52 | TaskFileNotExist 53 | ) 54 | 55 | // convert task status to a string 56 | func (ts TaskStatus) String() string { 57 | switch ts { 58 | case TaskNotExist: 59 | return "TASK_NOT_EXISTED" 60 | case TaskExist: 61 | return "TASK_EXISTED" 62 | case TaskInit: 63 | return "INIT" 64 | case TaskFailed: 65 | return "FAILED" 66 | case TaskCompleted: 67 | return "COMPLETED" 68 | case TaskInProgress: 69 | return "INPROGESS" 70 | case TaskFileNotExist: 71 | return "FILE_NOT_EXISTED" 72 | default: 73 | return "TASK_NOT_EXISTED" 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /server/handler.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/labstack/gommon/log" 5 | "net/http" 6 | 7 | "github.com/labstack/echo/v4" 8 | "github.com/xtfly/gofd/common" 9 | "github.com/xtfly/gofd/p2p" 10 | "github.com/xtfly/gokits/gcache" 11 | ) 12 | 13 | // CreateTask POST /api/v1/server/tasks 14 | func (s *Server) CreateTask(c echo.Context) (err error) { 15 | // 获取Body 16 | t := new(CreateTask) 17 | if err = c.Bind(t); err != nil { 18 | common.LOG.Errorf("Recv [%s] request, decode body failed. %v", c.Request().URL, err) 19 | return 20 | } 21 | 22 | // 检查任务是否存在 23 | v, ok := s.cache.Get(t.ID) 24 | if ok { 25 | cti := v.(*CachedTaskInfo) 26 | if cti.EqualCmp(t) { 27 | return c.String(http.StatusAccepted, "") 28 | } 29 | common.LOG.Debugf("[%s] Recv task, task is existed", t.ID) 30 | return c.String(http.StatusBadRequest, TaskExist.String()) 31 | } 32 | 33 | common.LOG.Infof("[%s] Recv task, file=%v, ips=%v", t.ID, t.DispatchFiles, t.DestIPs) 34 | 35 | cti := NewCachedTaskInfo(s, t) 36 | s.cache.Set(t.ID, cti, gcache.NoExpiration) 37 | s.cache.OnEvicted(func(id string, v interface{}) { 38 | common.LOG.Infof("[%s] Remove task cache", t.ID) 39 | cti := v.(*CachedTaskInfo) 40 | cti.quitChan <- struct{}{} 41 | }) 42 | go cti.Start() 43 | 44 | return c.String(http.StatusAccepted, "") 45 | } 46 | 47 | // CancelTask DELETE /api/v1/server/tasks/:id 48 | func (s *Server) CancelTask(c echo.Context) error { 49 | id := c.Param("id") 50 | common.LOG.Infof("[%s] Recv cancel task", id) 51 | v, ok := s.cache.Get(id) 52 | if !ok { 53 | return c.String(http.StatusBadRequest, TaskNotExist.String()) 54 | } 55 | cti := v.(*CachedTaskInfo) 56 | cti.stopChan <- struct{}{} 57 | return c.JSON(http.StatusAccepted, "") 58 | } 59 | 60 | // QueryTask GET /api/v1/server/tasks/:id 61 | func (s *Server) QueryTask(c echo.Context) error { 62 | id := c.Param("id") 63 | log.Infof("[%s] Recv query task", id) 64 | v, ok := s.cache.Get(id) 65 | if !ok { 66 | return c.String(http.StatusBadRequest, TaskNotExist.String()) 67 | } 68 | cti := v.(*CachedTaskInfo) 69 | return c.JSON(http.StatusOK, cti.Query()) 70 | 71 | } 72 | 73 | // ReportTask POST /api/v1/server/tasks/status 74 | func (s *Server) ReportTask(c echo.Context) (err error) { 75 | // 获取Body 76 | csr := new(p2p.StatusReport) 77 | if err = c.Bind(csr); err != nil { 78 | common.LOG.Errorf("Recv [%s] request, decode body failed. %v", c.Request().URL, err) 79 | return 80 | } 81 | 82 | common.LOG.Debugf("[%s] Recv task report, ip=%v, percent=%v", csr.TaskID, csr.IP, csr.PercentComplete) 83 | if v, ok := s.cache.Get(csr.TaskID); ok { 84 | cti := v.(*CachedTaskInfo) 85 | cti.reportChan <- csr 86 | } 87 | 88 | return c.String(http.StatusOK, "") 89 | } 90 | -------------------------------------------------------------------------------- /server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/labstack/echo/v4" 7 | "github.com/labstack/echo/v4/middleware" 8 | "github.com/xtfly/gofd/common" 9 | "github.com/xtfly/gofd/p2p" 10 | "github.com/xtfly/gokits/gcache" 11 | ) 12 | 13 | // Server .. 14 | type Server struct { 15 | *common.BaseService 16 | // 用于缓存当前接收到任务 17 | cache *gcache.Cache 18 | // Session管理 19 | sessionMgnt *p2p.TaskSessionMgnt 20 | } 21 | 22 | // NewServer .. 23 | func NewServer(cfg *common.Config) (*Server, error) { 24 | s := &Server{ 25 | cache: gcache.NewCache(5 * time.Minute), 26 | sessionMgnt: p2p.NewSessionMgnt(cfg), 27 | } 28 | s.BaseService = common.NewBaseService(cfg, cfg.Name, s) 29 | return s, nil 30 | } 31 | 32 | // OnStart ... 33 | func (s *Server) OnStart(c *common.Config, e *echo.Echo) error { 34 | go func() { s.sessionMgnt.Start() }() 35 | 36 | e.Use(middleware.BasicAuth(s.Auth)) 37 | e.POST("/api/v1/server/tasks", s.CreateTask) 38 | e.DELETE("/api/v1/server/tasks/:id", s.CancelTask) 39 | e.GET("/api/v1/server/tasks/:id", s.QueryTask) 40 | e.POST("/api/v1/server/tasks/status", s.ReportTask) 41 | 42 | return nil 43 | } 44 | 45 | // OnStop ... 46 | func (s *Server) OnStop(c *common.Config, e *echo.Echo) { 47 | s.sessionMgnt.Stop() 48 | } 49 | -------------------------------------------------------------------------------- /server/taskmgnt.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/labstack/gommon/log" 10 | "github.com/xtfly/gofd/common" 11 | "github.com/xtfly/gofd/p2p" 12 | "github.com/xtfly/gokits/gcache" 13 | ) 14 | 15 | type clientRsp struct { 16 | IP string 17 | Success bool 18 | } 19 | 20 | type cmpTask struct { 21 | t *CreateTask 22 | out chan bool 23 | } 24 | 25 | type queryTask struct { 26 | out chan *TaskInfo 27 | } 28 | 29 | // CachedTaskInfo 每一个Task,对应一个缓存对象,所有与它关联的操作都由一个Goroutine来处理 30 | type CachedTaskInfo struct { 31 | s *Server 32 | 33 | id string 34 | dispatchFiles []string 35 | destIPs []string 36 | ti *TaskInfo 37 | 38 | succCount int 39 | failCount int 40 | allCount int 41 | 42 | stopChan chan struct{} 43 | quitChan chan struct{} 44 | reportChan chan *p2p.StatusReport 45 | agentRspChan chan *clientRsp 46 | cmpChan chan *cmpTask 47 | queryChan chan *queryTask 48 | } 49 | 50 | // NewCachedTaskInfo ... 51 | func NewCachedTaskInfo(s *Server, t *CreateTask) *CachedTaskInfo { 52 | return &CachedTaskInfo{ 53 | s: s, 54 | id: t.ID, 55 | dispatchFiles: t.DispatchFiles, 56 | destIPs: t.DestIPs, 57 | ti: newTaskInfo(t), 58 | 59 | stopChan: make(chan struct{}), 60 | quitChan: make(chan struct{}), 61 | reportChan: make(chan *p2p.StatusReport, 10), 62 | agentRspChan: make(chan *clientRsp, 10), 63 | cmpChan: make(chan *cmpTask, 2), 64 | queryChan: make(chan *queryTask, 2), 65 | } 66 | } 67 | 68 | func newTaskInfo(t *CreateTask) *TaskInfo { 69 | init := TaskInit.String() 70 | ti := &TaskInfo{ID: t.ID, Status: init, StartedAt: time.Now()} 71 | ti.DispatchInfos = make(map[string]*DispatchInfo, len(t.DestIPs)) 72 | for _, ip := range t.DestIPs { 73 | di := &DispatchInfo{Status: init, StartedAt: time.Now()} 74 | di.DispatchFiles = make([]*DispatchFile, len(t.DispatchFiles)) 75 | ti.DispatchInfos[ip] = di 76 | for j, fn := range t.DispatchFiles { 77 | di.DispatchFiles[j] = &DispatchFile{FileName: fn} 78 | } 79 | } 80 | return ti 81 | } 82 | 83 | func createLinkChain(cfg *common.Config, ips []string, ti *TaskInfo) *p2p.LinkChain { 84 | lc := new(p2p.LinkChain) 85 | lc.ServerAddr = fmt.Sprintf("%s:%v", cfg.Net.IP, cfg.Net.MgntPort) 86 | lc.DispatchAddrs = make([]string, 1+len(ips)) 87 | // 第一个节点为服务端 88 | lc.DispatchAddrs[0] = fmt.Sprintf("%s:%v", cfg.Net.IP, cfg.Net.DataPort) 89 | 90 | idx := 1 91 | for _, ip := range ips { 92 | if di, ok := ti.DispatchInfos[ip]; ok && di.Status == TaskInProgress.String() { 93 | lc.DispatchAddrs[idx] = fmt.Sprintf("%s:%v", ip, cfg.Net.AgentDataPort) 94 | idx++ 95 | } 96 | } 97 | lc.DispatchAddrs = lc.DispatchAddrs[:idx] 98 | 99 | return lc 100 | } 101 | 102 | // Start 使用一个Goroutine来启动任务操作 103 | func (ct *CachedTaskInfo) Start() { 104 | if ts := ct.createTask(); ts != TaskInProgress { 105 | ct.endTask(ts) 106 | } 107 | 108 | for { 109 | select { 110 | case <-ct.quitChan: 111 | log.Infof("[%s] Quit task goroutine", ct.id) 112 | return 113 | case <-ct.stopChan: 114 | ct.endTask(TaskFailed) 115 | ct.stopAllClientTask() 116 | case c := <-ct.cmpChan: 117 | // 内容不相同 118 | if !equalSlice(c.t.DestIPs, ct.destIPs) || !equalSlice(c.t.DispatchFiles, ct.dispatchFiles) { 119 | c.out <- false 120 | } 121 | // 内容相同,如果失败了,则重新启动 122 | c.out <- true 123 | if ct.ti.Status == TaskFailed.String() { 124 | ct.s.cache.Replace(ct.id, ct, gcache.NoExpiration) 125 | log.Infof("[%s] Task status is FAILED, will start task try again", ct.id) 126 | if ts := ct.createTask(); ts != TaskInProgress { 127 | ct.endTask(ts) 128 | } 129 | } 130 | case q := <-ct.queryChan: 131 | q.out <- ct.ti 132 | case csr := <-ct.reportChan: 133 | ct.reportStatus(csr) 134 | if ts, ok := checkFinished(ct.ti); ok { 135 | ct.endTask(ts) 136 | ct.stopAllClientTask() 137 | } 138 | } 139 | } 140 | } 141 | 142 | func (ct *CachedTaskInfo) endTask(ts TaskStatus) { 143 | log.Errorf("[%s] Task status changed, status=%v", ct.id, ts) 144 | ct.ti.Status = ts.String() 145 | ct.ti.FinishedAt = time.Now() 146 | log.Infof("[%s] Task elapsed time: (%.2f seconds)", ct.id, ct.ti.FinishedAt.Sub(ct.ti.StartedAt).Seconds()) 147 | ct.s.cache.Replace(ct.id, ct, 5*time.Minute) 148 | ct.s.sessionMgnt.StopTask(ct.id) 149 | } 150 | 151 | func (ct *CachedTaskInfo) createTask() TaskStatus { 152 | // 先产生任务元数据信息 153 | start := time.Now() 154 | mi, err := p2p.CreateFileMeta(ct.dispatchFiles, 1024*1024) 155 | end := time.Now() 156 | if err != nil { 157 | log.Errorf("[%s] Create file meta failed, error=%v", ct.id, err) 158 | return TaskFileNotExist 159 | } 160 | log.Infof("[%s] Create metainfo: (%.2f seconds)", ct.id, end.Sub(start).Seconds()) 161 | 162 | dt := &p2p.DispatchTask{ 163 | TaskID: ct.id, 164 | MetaInfo: mi, 165 | Speed: int64(ct.s.Cfg.Control.Speed * 1024 * 1024), 166 | } 167 | dt.LinkChain = createLinkChain(ct.s.Cfg, []string{}, ct.ti) // 168 | 169 | dtbytes, err1 := json.Marshal(dt) 170 | if err1 != nil { 171 | return TaskFailed 172 | } 173 | log.Debugf("[%s] Create dispatch task, task=%v", ct.id, string(dtbytes)) 174 | 175 | ct.allCount = len(ct.destIPs) 176 | ct.succCount, ct.failCount = 0, 0 177 | ct.ti.Status = TaskInProgress.String() 178 | // 提交到session管理中运行 179 | ct.s.sessionMgnt.CreateTask(dt) 180 | // 给各节点发送创建分发任务的Rest消息 181 | ct.sendReqToClients(ct.destIPs, "/api/v1/agent/tasks", dtbytes) 182 | 183 | for { 184 | select { 185 | case tdr := <-ct.agentRspChan: 186 | ct.checkAgentRsp(tdr) 187 | if ct.failCount == ct.allCount { 188 | return TaskFailed 189 | } 190 | if ct.succCount+ct.failCount == ct.allCount { 191 | if ts := ct.startTask(); ts != TaskInProgress { 192 | return ts 193 | } 194 | // 部分节点响应,则也继续 195 | return TaskInProgress 196 | } 197 | case <-time.After(5 * time.Second): // 等超时 198 | if ct.succCount == 0 { 199 | common.LOG.Errorf("[%s] Wait client response timeout.", ct.id) 200 | return TaskFailed 201 | } 202 | } 203 | } 204 | } 205 | 206 | func (ct *CachedTaskInfo) checkAgentRsp(tcr *clientRsp) { 207 | if di, ok := ct.ti.DispatchInfos[tcr.IP]; ok { 208 | di.StartedAt = time.Now() 209 | if tcr.Success { 210 | di.Status = TaskInProgress.String() 211 | ct.succCount++ 212 | } else { 213 | di.Status = TaskFailed.String() 214 | di.FinishedAt = time.Now() 215 | ct.failCount++ 216 | } 217 | } 218 | } 219 | 220 | func (ct *CachedTaskInfo) startTask() TaskStatus { 221 | log.Infof("[%s] Recv all client response, will send start command to clients", ct.id) 222 | st := &p2p.StartTask{TaskID: ct.id} 223 | st.LinkChain = createLinkChain(ct.s.Cfg, ct.destIPs, ct.ti) 224 | 225 | stbytes, err1 := json.Marshal(st) 226 | if err1 != nil { 227 | return TaskFailed 228 | } 229 | log.Debugf("[%s] Create start task, task=%v", ct.id, string(stbytes)) 230 | 231 | // 第一个是Server,不用发送启动 232 | ct.allCount = len(st.LinkChain.DispatchAddrs) - 1 233 | ct.succCount, ct.failCount = 0, 0 234 | ct.s.sessionMgnt.StartTask(st) 235 | 236 | // 给其它各节点发送启支分发任务的Rest消息 237 | ct.sendReqToClients(st.LinkChain.DispatchAddrs[1:], "/api/v1/agent/tasks/start", stbytes) 238 | for { 239 | select { 240 | case tdr := <-ct.agentRspChan: 241 | ct.checkAgentRsp(tdr) 242 | if ct.failCount == ct.allCount { 243 | return TaskFailed 244 | } 245 | if ct.succCount+ct.failCount == ct.allCount { 246 | return TaskInProgress 247 | } 248 | case <-time.After(5 * time.Second): // 等超时 249 | if ct.succCount == 0 { 250 | log.Errorf("[%s] Wait client response timeout.", ct.id) 251 | return TaskFailed 252 | } 253 | } 254 | } 255 | } 256 | 257 | func (ct *CachedTaskInfo) sendReqToClients(ips []string, url string, body []byte) { 258 | for _, ip := range ips { 259 | if idx := strings.Index(ip, ":"); idx > 0 { 260 | ip = ip[:idx] 261 | } 262 | 263 | go func(ip string) { 264 | if _, err2 := ct.s.HTTPPost(ip, url, body); err2 != nil { 265 | log.Errorf("[%s] Send http request failed. POST, ip=%s, url=%s, error=%v", ct.id, ip, url, err2) 266 | ct.agentRspChan <- &clientRsp{IP: ip, Success: false} 267 | } else { 268 | log.Debugf("[%s] Send http request success. POST, ip=%s, url=%s", ct.id, ip, url) 269 | ct.agentRspChan <- &clientRsp{IP: ip, Success: true} 270 | } 271 | }(ip) 272 | } 273 | } 274 | 275 | // 给所有客户端发送停止命令 276 | func (ct *CachedTaskInfo) stopAllClientTask() { 277 | url := "/api/v1/agent/tasks/" + ct.id 278 | ct.s.sessionMgnt.StopTask(ct.id) 279 | for _, ip := range ct.destIPs { 280 | go func(ip string) { 281 | if err2 := ct.s.HTTPDelete(ip, url); err2 != nil { 282 | log.Errorf("[%s] Send http request failed. DELETE, ip=%s, url=%s, error=%v", ct.id, ip, url, err2) 283 | } else { 284 | log.Debugf("[%s] Send http request success. DELETE, ip=%s, url=%s", ct.id, ip, url) 285 | } 286 | }(ip) 287 | } 288 | } 289 | 290 | func (ct *CachedTaskInfo) reportStatus(csr *p2p.StatusReport) { 291 | if di, ok := ct.ti.DispatchInfos[csr.IP]; ok { 292 | if int(csr.PercentComplete) == 100 { 293 | di.Status = TaskCompleted.String() 294 | di.FinishedAt = time.Now() 295 | log.Infof("[%s] Recv report task status is completed, ip=%s", ct.id, csr.IP) 296 | } else if int(csr.PercentComplete) == -1 { 297 | di.Status = TaskFailed.String() 298 | di.FinishedAt = time.Now() 299 | log.Infof("[%s] Recv report task status is failed, ip=%s", ct.id, csr.IP) 300 | } 301 | di.PercentComplete = csr.PercentComplete 302 | } 303 | } 304 | 305 | // Query ... 306 | func (ct *CachedTaskInfo) Query() *TaskInfo { 307 | qchan := make(chan *TaskInfo, 2) 308 | ct.queryChan <- &queryTask{out: qchan} 309 | defer close(qchan) 310 | return <-qchan 311 | } 312 | 313 | // EqualCmp ... 314 | func (ct *CachedTaskInfo) EqualCmp(t *CreateTask) bool { 315 | cchan := make(chan bool, 2) 316 | ct.cmpChan <- &cmpTask{t: t, out: cchan} 317 | defer close(cchan) 318 | return <-cchan 319 | } 320 | 321 | func checkFinished(ti *TaskInfo) (TaskStatus, bool) { 322 | completed := 0 323 | failed := 0 324 | for _, v := range ti.DispatchInfos { 325 | if v.Status == TaskCompleted.String() { 326 | completed++ 327 | } 328 | if v.Status == TaskFailed.String() { 329 | failed++ 330 | } 331 | } 332 | 333 | count := len(ti.DispatchInfos) 334 | if completed == count { 335 | return TaskCompleted, true 336 | } 337 | 338 | if completed+failed == count { 339 | return TaskCompleted, true 340 | } 341 | 342 | return TaskInProgress, false 343 | } 344 | 345 | func equalSlice(a, b []string) bool { 346 | if len(a) != len(b) { 347 | return false 348 | } 349 | for _, i := range a { 350 | for _, j := range b { 351 | if i != j { 352 | return false 353 | } 354 | } 355 | } 356 | return true 357 | } 358 | --------------------------------------------------------------------------------