├── .gitignore ├── LICENSE ├── README.md ├── conn.go ├── control.go ├── dirwatch.go ├── dirwatch_test.go ├── files.go ├── files_test.go ├── generate.go ├── listen.go ├── lpd.go ├── main.go ├── metadata.go ├── metainfo.go ├── nat.go ├── natpmp.go ├── peer.go ├── peers.go ├── pex.go ├── pieces.go ├── pieces_test.go ├── pkg ├── bitset │ └── bitset.go ├── id │ ├── id.go │ └── id_test.go └── sharesession │ └── sharesession.go ├── proxy.go ├── test.bash ├── testData ├── 1Mrandom ├── 1Mrandom.torrent ├── a.torrent ├── dsl-1M.torrent └── testFile ├── testdht.bash ├── torrent.go ├── trackerClient.go ├── upnp.go ├── uri.go └── uri_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.[56ao] 3 | Taipei-Torrent 4 | rakoshare 5 | *.exe 6 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2010 Jack Palevich. All rights reserved. 2 | // 3 | // Redistribution and use in source and binary forms, with or without 4 | // modification, are permitted provided that the following conditions are 5 | // met: 6 | // 7 | // * Redistributions of source code must retain the above copyright 8 | // notice, this list of conditions and the following disclaimer. 9 | // * Redistributions in binary form must reproduce the above 10 | // copyright notice, this list of conditions and the following disclaimer 11 | // in the documentation and/or other materials provided with the 12 | // distribution. 13 | // * Neither the name of Google Inc. nor the names of its 14 | // contributors may be used to endorse or promote products derived from 15 | // this software without specific prior written permission. 16 | // 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Rakoshare [![Gobuild Download](http://gobuild.io/badge/github.com/rakoo/rakoshare/download.png)](http://gobuild.io/github.com/rakoo/rakoshare) 2 | ========= 3 | 4 | Rakoshare is a sharing tool to synchronize a folder on multiple 5 | computers with no configuration. 6 | 7 | It was inspired by Bittorrent Sync and aims to be an Open Source 8 | alternative. Internally it reuses the Bittorrent protocol to build a 9 | more dynamic protocol. 10 | 11 | Rakoshare is forked off of 12 | [Taipei-Torrent](https://github.com/jackpal/Taipei-Torrent), a pure go 13 | Bittorrent application. 14 | 15 | Current Status 16 | -------------- 17 | 18 | Working but unstable. Anything, both in the implementation or the 19 | protocol, can change at any time (although the general idea should 20 | remain the same) 21 | 22 | Rakoshare might currently eat your data if you're not cautious. 23 | 24 | Tested on Linux. 25 | 26 | - [x] On-the-fly encryption with [spiped](https://github.com/dchest/spipe) 27 | - [ ] Capabilities on the model of Tahoe-LAFS 28 | - [x] ReadWrite capability 29 | - [x] Read capability 30 | - [ ] Store capability (ie keep data on disk without being able to read it) 31 | 32 | Development Roadmap 33 | ------------------- 34 | 35 | * Encryption 36 | 37 | * Encryption of the data at-rest is desirable to send data to 38 | untrusted servers. 39 | 40 | * Speed 41 | * Rakoshare is currently naive in how the folders are checked, there 42 | is room for improvement on this side 43 | 44 | Download, Install, and Build Instructions 45 | ----------------------------------------- 46 | 47 | 1. Download and install the Go tools from http://golang.org 48 | 49 | 2. Use the "go" command to download, install, and build the Rakoshare 50 | app: 51 | 52 | go get github.com/rakoo/rakoshare 53 | 54 | Usage Instructions 55 | ------------------ 56 | 57 | 1. Create a share session: 58 | 59 | `$ ./rakoshare gen -dir ` 60 | 61 | The result will be a list of different ids, each with a different 62 | capability: 63 | 64 | ``` 65 | WriteReadStore: AgpUdzGDo14K7hmkce3pLUXWq3nf1sJfXmmyzeN9vmrNpfRxwZUQjuvhPaZvnysJtB6K2M8tT6f1vvriTko2hP38 66 | ReadStore: ovWviooStXvVpgY7Vrd2FryM8pwDHNL9TyUWdsxAUBUv 67 | Store: 2CSNWUTbN9arXsF37Eu9HmYbUeD5VukpRsgQnCwRAnMyg 68 | ``` 69 | 70 | It will also create a session file in your ~/.local/share/rakoshare 71 | folder. You shouldn't need to look at it. 72 | 73 | 2. Start sharing content: 74 | 75 | `$ ./rakoshare share -id ` 76 | 77 | If you use the WriteReadStore id, any change you make will be 78 | propagated. If you use the ReadStore id, no local changes will be 79 | propagated. The Store id currently works as a ReadStore id. 80 | 81 | Send an Id to someone else, and start sharing ! 82 | 83 | 2. If you receive content from someone else, start receiving and sharing content: 84 | 85 | `$ ./rakoshare share -id -dir ` 86 | 87 | For more info: 88 | 89 | rakoshare help 90 | 91 | Demo ! 92 | ------ 93 | 94 | I'm fetching a slew of pics from /r/EarthPorn with 95 | [redditEarthPorn](https://github.com/rakoo/redditEarthPorn) that are 96 | then shared with a rakoshare instance that I run on my server. Use id 97 | evK5HghADU2GrgM2NWFyKsRUj4ymkhoagz9DXgpW6EcN to receive a bunch of 98 | gorgeous images, courtesy of reddit users ! The folder is updated 99 | approximately every hour, and I will do my best to cap the size of the 100 | folder to a reasonable amount (expect ~ 70 MB). 101 | 102 | To test locally: 103 | 104 | `$ ./rakoshare share -id evK5HghADU2GrgM2NWFyKsRUj4ymkhoagz9DXgpW6EcN -dir /some/dir/` 105 | 106 | Note that this id is read-only, meaning you can't modify what is 107 | shared by everyone. 108 | 109 | Third-party Packages 110 | -------------------- 111 | 112 | http://code.google.com/p/bencode-go - Bencode encoder/decoder 113 | 114 | https://github.com/zeebo/bencode - Another Bencode encoder/decoder 115 | 116 | http://code.google.com/p/go-nat-pmp - NAT-PMP firewall client 117 | 118 | https://github.com/hailiang/gosocks - SOCKS5 proxy support 119 | 120 | https://github.com/nictuku/dht - Distributed Hash Table 121 | 122 | https://github.com/nictuku/nettools - Network utilities 123 | 124 | https://github.com/dchest/spipe - pure-go [spiped](https://www.tarsnap.com/spiped.html) implementation 125 | 126 | https://github.com/codegangsta/cli - CLI application builder package 127 | 128 | Related Projects 129 | ---------------- 130 | 131 | https://github.com/jackpal/Taipei-Torrent is the base of rakoshare 132 | 133 | Discussion 134 | ---------- 135 | 136 | Please open issues on the github tracker 137 | (https://github.com/rakoo/rakoshare/issues), or discuss over the mailing 138 | list: rakoshare served-by googlegroups.com 139 | -------------------------------------------------------------------------------- /conn.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net" 5 | "time" 6 | 7 | "github.com/dchest/spipe" 8 | ) 9 | 10 | type BufferedSpipeConn struct { 11 | net.Conn 12 | packets chan []byte 13 | quit chan struct{} 14 | } 15 | 16 | func newBufferedSpipeConn(conn net.Conn) BufferedSpipeConn { 17 | bsc := BufferedSpipeConn{conn, make(chan []byte), make(chan struct{}, 1)} 18 | 19 | go func() { 20 | var buf [1024]byte 21 | buflen := 0 22 | 23 | // experimentally, the callers don't batch faster than every 24 | // 10ms, so 50ms is a safe amount of time to wait 25 | tick := time.Tick(50 * time.Millisecond) 26 | 27 | for { 28 | select { 29 | case packet := <-bsc.packets: 30 | if buflen+len(packet) < len(buf) { 31 | n := copy(buf[buflen:], packet) 32 | buflen += n 33 | break 34 | } 35 | 36 | copy(buf[buflen:], packet[:1024-buflen]) 37 | conn.Write(buf[:]) 38 | 39 | packet = packet[1024-buflen:] 40 | for len(packet) > 1024 { 41 | conn.Write(packet[:1024]) 42 | packet = packet[1024:] 43 | } 44 | 45 | buflen = copy(buf[:], packet) 46 | case <-tick: 47 | if buflen == 0 { 48 | break 49 | } 50 | 51 | conn.Write(buf[:buflen]) 52 | buflen = 0 53 | case <-bsc.quit: 54 | return 55 | } 56 | } 57 | }() 58 | 59 | return bsc 60 | } 61 | 62 | func (bsc BufferedSpipeConn) Write(p []byte) (n int, err error) { 63 | bsc.packets <- p 64 | 65 | // MEH 66 | return len(p), nil 67 | } 68 | 69 | func (bsc BufferedSpipeConn) Close() error { 70 | bsc.quit <- struct{}{} 71 | return bsc.Conn.Close() 72 | } 73 | 74 | func NewTCPConn(key []byte, peer string) (conn net.Conn, err error) { 75 | sconn, err := spipe.Dial(key, "tcp", peer) 76 | if err != nil { 77 | return 78 | } 79 | 80 | return newBufferedSpipeConn(sconn), nil 81 | } 82 | -------------------------------------------------------------------------------- /control.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "errors" 7 | "flag" 8 | "fmt" 9 | "io" 10 | "log" 11 | "math" 12 | "math/rand" 13 | "net" 14 | "os" 15 | "strconv" 16 | "strings" 17 | "time" 18 | 19 | "github.com/rakoo/rakoshare/pkg/id" 20 | "github.com/rakoo/rakoshare/pkg/sharesession" 21 | 22 | ed "github.com/agl/ed25519" 23 | "github.com/nictuku/dht" 24 | "github.com/zeebo/bencode" 25 | ) 26 | 27 | var ( 28 | // This error is returned when the incoming message is not of correct 29 | // type, ie EXTENSION (which is 20) 30 | errInvalidType = errors.New("invalid message type") 31 | errMetadataMessage = errors.New("Couldn't create metadata message") 32 | ) 33 | 34 | var useDHT = flag.Bool("useDHT", true, "Use DHT to get peers") 35 | 36 | type ControlSession struct { 37 | ID id.Id 38 | Port int 39 | PeerID string 40 | 41 | // A channel of all announces we get from peers. 42 | // If the announce is for the same torrent as the current one, then it 43 | // is not broadcasted in this channel. 44 | Torrents chan Announce 45 | 46 | // A channel of all new peers we acknowledge, in a ip:port format 47 | // The port is the one advertised 48 | NewPeers chan string 49 | 50 | // The current data torrent 51 | currentIH string 52 | rev string 53 | 54 | ourExtensions map[int]string 55 | header []byte 56 | quit chan struct{} 57 | dht *dht.DHT 58 | peers *Peers 59 | peerMessageChan chan peerMessage 60 | 61 | trackers []string 62 | 63 | session *sharesession.Session 64 | } 65 | 66 | func NewControlSession(shareid id.Id, listenPort int, session *sharesession.Session, trackers []string) (*ControlSession, error) { 67 | sid := "-tt" + strconv.Itoa(os.Getpid()) + "_" + strconv.FormatInt(rand.Int63(), 10) 68 | 69 | // TODO: UPnP UDP port mapping. 70 | cfg := dht.NewConfig() 71 | cfg.Port = listenPort 72 | cfg.NumTargetPeers = TARGET_NUM_PEERS 73 | 74 | dhtNode, err := dht.New(cfg) 75 | if err != nil { 76 | log.Fatal("DHT node creation error", err) 77 | } 78 | 79 | current := session.GetCurrentIHMessage() 80 | var currentIhMessage IHMessage 81 | err = bencode.NewDecoder(strings.NewReader(current)).Decode(¤tIhMessage) 82 | if err != nil { 83 | log.Printf("Couldn't decode current message, starting from scratch: %s\n", err) 84 | } 85 | 86 | rev := "0-" 87 | if currentIhMessage.Info.Rev != "" { 88 | parts := strings.Split(currentIhMessage.Info.Rev, "-") 89 | if len(parts) == 2 { 90 | if _, err := strconv.Atoi(parts[0]); err == nil { 91 | rev = currentIhMessage.Info.Rev 92 | } 93 | } 94 | } 95 | 96 | cs := &ControlSession{ 97 | Port: listenPort, 98 | PeerID: sid[:20], 99 | ID: shareid, 100 | Torrents: make(chan Announce), 101 | NewPeers: make(chan string), 102 | dht: dhtNode, 103 | peerMessageChan: make(chan peerMessage), 104 | quit: make(chan struct{}), 105 | ourExtensions: map[int]string{ 106 | 1: "ut_pex", 107 | 2: "bs_metadata", 108 | }, 109 | peers: newPeers(), 110 | 111 | currentIH: currentIhMessage.Info.InfoHash, 112 | rev: rev, 113 | 114 | trackers: trackers, 115 | 116 | session: session, 117 | } 118 | go cs.dht.Run() 119 | cs.dht.PeersRequest(string(cs.ID.Infohash), true) 120 | 121 | go cs.Run() 122 | 123 | return cs, nil 124 | } 125 | 126 | func (cs *ControlSession) log(message string, others ...interface{}) { 127 | log.Println("[CONTROL]", message, others) 128 | } 129 | 130 | func (cs *ControlSession) logf(format string, args ...interface{}) { 131 | log.Println("[CONTROL]", fmt.Sprintf(format, args)) 132 | } 133 | 134 | func (cs *ControlSession) Header() (header []byte) { 135 | if len(cs.header) > 0 { 136 | return cs.header 137 | } 138 | 139 | header = make([]byte, 68) 140 | copy(header, kBitTorrentHeader[0:]) 141 | header[27] = header[27] | 0x01 142 | // Support Extension Protocol (BEP-0010) 143 | header[25] |= 0x10 144 | 145 | copy(header[28:48], cs.ID.Infohash) 146 | copy(header[48:68], []byte(cs.PeerID)) 147 | 148 | cs.header = header 149 | 150 | return 151 | } 152 | 153 | func (cs *ControlSession) deadlockDetector(heartbeat, quit chan struct{}) { 154 | lastHeartbeat := time.Now() 155 | 156 | deadlockLoop: 157 | for { 158 | select { 159 | case <-quit: 160 | break deadlockLoop 161 | case <-heartbeat: 162 | lastHeartbeat = time.Now() 163 | case <-time.After(15 * time.Second): 164 | age := time.Now().Sub(lastHeartbeat) 165 | cs.log("Starvation or deadlock of main thread detected. Look in the stack dump for what Run() is currently doing.") 166 | cs.log("Last heartbeat", age.Seconds(), "seconds ago") 167 | panic("Killed by deadlock detector") 168 | } 169 | } 170 | } 171 | func (cs *ControlSession) Run() { 172 | // deadlock 173 | heartbeat := make(chan struct{}, 1) 174 | quitDeadlock := make(chan struct{}) 175 | go cs.deadlockDetector(heartbeat, quitDeadlock) 176 | 177 | rechokeChan := time.Tick(10 * time.Second) 178 | verboseChan := time.Tick(10 * time.Minute) 179 | keepAliveChan := time.Tick(60 * time.Second) 180 | 181 | // Start out polling tracker every 20 seconds until we get a response. 182 | // Maybe be exponential backoff here? 183 | retrackerChan := time.Tick(20 * time.Second) 184 | trackerInfoChan := make(chan *TrackerResponse) 185 | 186 | trackerClient := NewTrackerClient("", [][]string{cs.trackers}) 187 | trackerClient.Announce(cs.makeClientStatusReport("started")) 188 | 189 | for { 190 | select { 191 | case <-retrackerChan: 192 | trackerClient.Announce(cs.makeClientStatusReport("")) 193 | case dhtInfoHashPeers := <-cs.dht.PeersRequestResults: 194 | newPeerCount := 0 195 | // key = infoHash. The torrent client currently only 196 | // supports one download at a time, so let's assume 197 | // it's the case. 198 | for _, peers := range dhtInfoHashPeers { 199 | for _, peer := range peers { 200 | peer = dht.DecodePeerAddress(peer) 201 | if cs.hintNewPeer(peer) { 202 | newPeerCount++ 203 | } 204 | } 205 | } 206 | case ti := <-trackerInfoChan: 207 | cs.logf("Got response from tracker: %#v\n", ti) 208 | newPeerCount := 0 209 | for _, peer := range ti.Peers { 210 | if cs.hintNewPeer(peer) { 211 | newPeerCount++ 212 | } 213 | } 214 | for _, peer6 := range ti.Peers6 { 215 | if cs.hintNewPeer(peer6) { 216 | newPeerCount++ 217 | } 218 | } 219 | 220 | cs.log("Contacting", newPeerCount, "new peers") 221 | interval := ti.Interval 222 | if interval < 120 { 223 | interval = 120 224 | } else if interval > 24*3600 { 225 | interval = 24 * 3600 226 | } 227 | cs.log("..checking again in", interval, "seconds.") 228 | retrackerChan = time.Tick(interval * time.Second) 229 | cs.log("Contacting", newPeerCount, "new peers") 230 | 231 | case pm := <-cs.peerMessageChan: 232 | peer, message := pm.peer, pm.message 233 | peer.lastReadTime = time.Now() 234 | err2 := cs.DoMessage(peer, message) 235 | if err2 != nil { 236 | if err2 != io.EOF { 237 | cs.log("Closing peer", peer.address, "because", err2) 238 | } 239 | cs.ClosePeer(peer) 240 | } 241 | case <-rechokeChan: 242 | // TODO: recalculate who to choke / unchoke 243 | heartbeat <- struct{}{} 244 | if cs.peers.Len() < TARGET_NUM_PEERS { 245 | go cs.dht.PeersRequest(string(cs.ID.Infohash), true) 246 | } 247 | case <-verboseChan: 248 | cs.log("Peers:", cs.peers.Len()) 249 | case <-keepAliveChan: 250 | now := time.Now() 251 | 252 | for _, peer := range cs.peers.All() { 253 | if peer.lastReadTime.Second() != 0 && now.Sub(peer.lastReadTime) > 3*time.Minute { 254 | // log.Println("Closing peer", peer.address, "because timed out.") 255 | cs.ClosePeer(peer) 256 | continue 257 | } 258 | go peer.keepAlive(now) 259 | } 260 | 261 | case <-cs.quit: 262 | cs.log("Quitting torrent session") 263 | quitDeadlock <- struct{}{} 264 | return 265 | } 266 | } 267 | 268 | } 269 | 270 | func (cs *ControlSession) Quit() error { 271 | cs.quit <- struct{}{} 272 | for _, peer := range cs.peers.All() { 273 | cs.ClosePeer(peer) 274 | } 275 | if cs.dht != nil { 276 | cs.dht.Stop() 277 | } 278 | return nil 279 | } 280 | 281 | func (cs *ControlSession) makeClientStatusReport(event string) ClientStatusReport { 282 | return ClientStatusReport{ 283 | Event: event, 284 | InfoHash: string(cs.ID.Infohash), 285 | PeerId: cs.PeerID, 286 | Port: cs.Port, 287 | } 288 | } 289 | 290 | func (cs *ControlSession) connectToPeer(peer string) { 291 | conn, err := NewTCPConn([]byte(cs.ID.Psk[:]), peer) 292 | if err != nil { 293 | // log.Println("Failed to connect to", peer, err) 294 | return 295 | } 296 | 297 | header := cs.Header() 298 | _, err = conn.Write(header) 299 | if err != nil { 300 | cs.log("Failed to send header to", peer, err) 301 | return 302 | } 303 | 304 | theirheader, err := readHeader(conn) 305 | if err != nil { 306 | // log.Printf("Failed to read header from %s: %s\n", peer, err) 307 | return 308 | } 309 | 310 | peersInfoHash := string(theirheader[8:28]) 311 | id := string(theirheader[28:48]) 312 | 313 | // If it's us, we don't need to continue 314 | if id == cs.PeerID { 315 | conn.Close() 316 | return 317 | } 318 | 319 | btconn := &btConn{ 320 | header: theirheader, 321 | infohash: peersInfoHash, 322 | id: id, 323 | conn: conn, 324 | } 325 | cs.session.SavePeer(conn.RemoteAddr().String(), cs.peers.HasPeer) 326 | cs.AddPeer(btconn) 327 | } 328 | 329 | func (cs *ControlSession) backoffHintNewPeer(peer string) { 330 | go func() { 331 | for backoff := 1; backoff < 5; backoff++ { 332 | cs.hintNewPeer(peer) 333 | wait := 10 * int(math.Pow(float64(2), float64(backoff))) 334 | // cs.logf("backoff for %s: %d", peer, wait) 335 | <-time.After(time.Duration(wait) * time.Second) 336 | } 337 | return 338 | }() 339 | } 340 | 341 | func (cs *ControlSession) hintNewPeer(peer string) (isnew bool) { 342 | if cs.peers.Know(peer, "") { 343 | return false 344 | } 345 | 346 | go cs.connectToPeer(peer) 347 | return true 348 | } 349 | 350 | func (cs *ControlSession) AcceptNewPeer(btconn *btConn) { 351 | // If it's us, we don't need to continue 352 | if btconn.id == cs.PeerID { 353 | btconn.conn.Close() 354 | return 355 | } 356 | 357 | _, err := btconn.conn.Write(cs.Header()) 358 | if err != nil { 359 | cs.logf("Error writing header: %s\n", err) 360 | btconn.conn.Close() 361 | return 362 | } 363 | cs.AddPeer(btconn) 364 | } 365 | 366 | func (cs *ControlSession) AddPeer(btconn *btConn) { 367 | theirheader := btconn.header 368 | 369 | peer := btconn.conn.RemoteAddr().String() 370 | if cs.peers.Len() >= MAX_NUM_PEERS { 371 | cs.log("We have enough peers. Rejecting additional peer", peer) 372 | btconn.conn.Close() 373 | return 374 | } 375 | ps := NewPeerState(btconn.conn) 376 | ps.address = peer 377 | ps.id = btconn.id 378 | 379 | if keep := cs.peers.Add(ps); !keep { 380 | return 381 | } 382 | 383 | // If 128, then it supports DHT. 384 | if int(theirheader[7])&0x01 == 0x01 { 385 | // It's OK if we know this node already. The DHT engine will 386 | // ignore it accordingly. 387 | go cs.dht.AddNode(ps.address) 388 | } 389 | go ps.peerWriter(cs.peerMessageChan) 390 | go ps.peerReader(cs.peerMessageChan) 391 | 392 | if int(theirheader[5])&0x10 == 0x10 { 393 | ps.SendExtensions(cs.ourExtensions, 0) 394 | } 395 | 396 | go func() { 397 | cs.NewPeers <- peer 398 | }() 399 | 400 | cs.logf("AddPeer: added %s", btconn.conn.RemoteAddr().String()) 401 | } 402 | 403 | func (cs *ControlSession) ClosePeer(peer *peerState) { 404 | cs.peers.Delete(peer) 405 | peer.Close() 406 | cs.backoffHintNewPeer(peer.address) 407 | } 408 | 409 | func (cs *ControlSession) DoMessage(p *peerState, message []byte) (err error) { 410 | if message == nil { 411 | return io.EOF // The reader or writer goroutine has exited 412 | } 413 | if len(message) == 0 { // keep alive 414 | return 415 | } 416 | 417 | if message[0] != EXTENSION { 418 | cs.logf("Wrong message type: %d\n", message[0]) 419 | return errInvalidType 420 | } 421 | switch message[1] { 422 | case EXTENSION_HANDSHAKE: 423 | err = cs.DoHandshake(message[1:], p) 424 | default: 425 | err = cs.DoOther(message[1:], p) 426 | } 427 | 428 | return 429 | } 430 | 431 | func (cs *ControlSession) DoHandshake(msg []byte, p *peerState) (err error) { 432 | var h ExtensionHandshake 433 | err = bencode.NewDecoder(bytes.NewReader(msg[1:])).Decode(&h) 434 | if err != nil { 435 | cs.log("Error when unmarshaling extension handshake") 436 | return err 437 | } 438 | 439 | p.theirExtensions = make(map[string]int) 440 | for name, code := range h.M { 441 | p.theirExtensions[name] = code 442 | } 443 | 444 | // Now that handshake is done and we know their extension, send the 445 | // current ih message, if we have one 446 | // 447 | // We need to de-serialize the current ih message saved in db before 448 | // passing it to the sender otherwise it is serialized into a string 449 | var currentIHMessage IHMessage 450 | currentFromSession := cs.session.GetCurrentIHMessage() 451 | if len(currentFromSession) > 0 { 452 | err = bencode.NewDecoder(strings.NewReader(currentFromSession)).Decode(¤tIHMessage) 453 | if err != nil { 454 | cs.log("Error deserializing current ih message to be resent", err) 455 | } else { 456 | p.sendExtensionMessage("bs_metadata", currentIHMessage) 457 | } 458 | } 459 | 460 | return nil 461 | } 462 | 463 | func (cs *ControlSession) DoOther(msg []byte, p *peerState) (err error) { 464 | if ext, ok := cs.ourExtensions[int(msg[0])]; ok { 465 | switch ext { 466 | case "bs_metadata": 467 | err = cs.DoMetadata(msg[1:], p) 468 | case "ut_pex": 469 | err = cs.DoPex(msg[1:], p) 470 | default: 471 | err = errors.New(fmt.Sprintf("unknown extension: %s", ext)) 472 | } 473 | } else { 474 | err = errors.New(fmt.Sprintf("Unknown extension: %d", int(msg[0]))) 475 | } 476 | 477 | return 478 | } 479 | 480 | type IHMessage struct { 481 | Info NewInfo `bencode:"info"` 482 | 483 | // The port we are listening on 484 | Port int64 `bencode:"port"` 485 | 486 | // The signature of the info dict 487 | Sig string `bencode:"sig"` 488 | } 489 | 490 | type NewInfo struct { 491 | InfoHash string `bencode:"infohash"` 492 | 493 | // The revision, ala CouchDB 494 | // ie - 495 | Rev string `bencode:"rev"` 496 | } 497 | 498 | func NewIHMessage(port int64, ih, rev string, priv id.PrivKey) (mm IHMessage, err error) { 499 | 500 | info := NewInfo{ 501 | InfoHash: ih, 502 | Rev: rev, 503 | } 504 | 505 | var buf bytes.Buffer 506 | err = bencode.NewEncoder(&buf).Encode(info) 507 | if err != nil { 508 | log.Println("[CONTROL] Couldn't encode ih message, returning now") 509 | return mm, err 510 | } 511 | 512 | var privarg [ed.PrivateKeySize]byte 513 | copy(privarg[:], priv[:]) 514 | sig := ed.Sign(&privarg, buf.Bytes()) 515 | 516 | return IHMessage{ 517 | Info: info, 518 | Port: port, 519 | Sig: string(sig[:]), 520 | }, nil 521 | } 522 | 523 | func (cs *ControlSession) DoMetadata(msg []byte, p *peerState) (err error) { 524 | var message IHMessage 525 | err = bencode.NewDecoder(bytes.NewReader(msg)).Decode(&message) 526 | if err != nil { 527 | cs.log("Couldn't decode metadata message: ", err) 528 | return 529 | } 530 | if message.Port == 0 { 531 | return 532 | } 533 | 534 | // take his IP addr, use the advertised port 535 | ip := p.conn.RemoteAddr().(*net.TCPAddr).IP.String() 536 | port := strconv.Itoa(int(message.Port)) 537 | peer := ip + ":" + port 538 | 539 | if cs.isNewerThan(message.Info.Rev) { 540 | return 541 | } 542 | 543 | var tmpInfoBuf bytes.Buffer 544 | err = bencode.NewEncoder(&tmpInfoBuf).Encode(message.Info) 545 | if err != nil { 546 | cs.log("Couldn't encode ih message, returning now") 547 | return err 548 | } 549 | rawInfo := tmpInfoBuf.Bytes() 550 | 551 | pub := [ed.PublicKeySize]byte(cs.ID.Pub) 552 | var sig [ed.SignatureSize]byte 553 | copy(sig[0:ed.SignatureSize], message.Sig) 554 | ok := ed.Verify(&pub, rawInfo, &sig) 555 | if !ok { 556 | return errors.New("Bad Signature") 557 | } 558 | 559 | var test IHMessage 560 | err = bencode.NewDecoder(bytes.NewReader(msg)).Decode(&test) 561 | cs.session.SaveIHMessage(msg) 562 | cs.Torrents <- Announce{ 563 | infohash: message.Info.InfoHash, 564 | peer: peer, 565 | } 566 | 567 | return 568 | } 569 | 570 | func (cs *ControlSession) isNewerThan(rev string) bool { 571 | remoteParts := strings.Split(rev, "-") 572 | if len(remoteParts) != 2 { 573 | return true 574 | } 575 | remoteCounter, err := strconv.Atoi(remoteParts[0]) 576 | if err != nil { 577 | return true 578 | } 579 | 580 | localParts := strings.Split(cs.rev, "-") 581 | if len(localParts) != 2 { 582 | return true 583 | } 584 | localCounter, err := strconv.Atoi(localParts[0]) 585 | if err != nil { 586 | return true 587 | } 588 | 589 | return localCounter >= remoteCounter 590 | } 591 | 592 | func (cs *ControlSession) DoPex(msg []byte, p *peerState) (err error) { 593 | return 594 | } 595 | 596 | func (cs *ControlSession) Matches(ih string) bool { 597 | return string(cs.ID.Infohash) == ih 598 | } 599 | 600 | func (cs *ControlSession) SetCurrent(ih string) error { 601 | if cs.currentIH == ih { 602 | return nil 603 | } 604 | 605 | parts := strings.Split(cs.rev, "-") 606 | if len(parts) != 2 { 607 | cs.logf("Invalid rev: %s\n", cs.rev) 608 | parts = []string{"0", ""} 609 | } 610 | 611 | counter, err := strconv.Atoi(parts[0]) 612 | if err != nil { 613 | counter = 0 614 | } 615 | newCounter := strconv.Itoa(counter + 1) 616 | 617 | cs.logf("Updating rev with ih %x", ih) 618 | newRev := newCounter + "-" + fmt.Sprintf("%x", sha1.Sum([]byte(ih+parts[1]))) 619 | 620 | mess, err := NewIHMessage(int64(cs.Port), ih, newRev, cs.ID.Priv) 621 | if err != nil { 622 | return err 623 | } 624 | var buf bytes.Buffer 625 | err = bencode.NewEncoder(&buf).Encode(mess) 626 | if err != nil { 627 | return err 628 | } 629 | err = cs.session.SaveIHMessage(buf.Bytes()) 630 | if err != nil { 631 | return err 632 | } 633 | 634 | cs.currentIH = ih 635 | cs.rev = newRev 636 | 637 | cs.broadcast(mess) 638 | return nil 639 | } 640 | 641 | func (cs *ControlSession) broadcast(message IHMessage) { 642 | for _, ps := range cs.peers.All() { 643 | if _, ok := ps.theirExtensions["bs_metadata"]; !ok { 644 | continue 645 | } 646 | 647 | ps.sendExtensionMessage("bs_metadata", message) 648 | } 649 | } 650 | -------------------------------------------------------------------------------- /dirwatch.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "errors" 7 | "fmt" 8 | "hash" 9 | "io" 10 | "log" 11 | "os" 12 | "path/filepath" 13 | "strings" 14 | "sync" 15 | "time" 16 | 17 | "github.com/zeebo/bencode" 18 | 19 | "github.com/rakoo/rakoshare/pkg/sharesession" 20 | ) 21 | 22 | var ( 23 | errNewFile = errors.New("Got new file") 24 | errInvalidDir = errors.New("Invalid watched dir") 25 | ) 26 | 27 | type state int 28 | 29 | const ( 30 | IDEM = iota 31 | CHANGED 32 | ) 33 | 34 | type Watcher struct { 35 | session *sharesession.Session 36 | watchedDir string 37 | lock sync.Mutex 38 | 39 | PingNewTorrent chan string 40 | } 41 | 42 | func NewWatcher(session *sharesession.Session, watchedDir string) (w *Watcher, err error) { 43 | w = &Watcher{ 44 | session: session, 45 | watchedDir: watchedDir, 46 | PingNewTorrent: make(chan string), 47 | } 48 | 49 | go w.watch() 50 | 51 | // Initialization, only if there is something in the dir 52 | if _, err := os.Stat(watchedDir); err != nil { 53 | return nil, err 54 | } 55 | 56 | st, err := os.Stat(watchedDir) 57 | if err != nil { 58 | return nil, err 59 | } 60 | if !st.IsDir() { 61 | return nil, errInvalidDir 62 | } 63 | 64 | go func() { 65 | w.PingNewTorrent <- session.GetCurrentInfohash() 66 | }() 67 | 68 | return 69 | } 70 | 71 | func (w *Watcher) watch() { 72 | var previousState, currentState state 73 | currentState = IDEM 74 | 75 | compareTime := w.session.GetLastModTime() 76 | 77 | // All paths in previous scan, sorted alphabetically 78 | previousScanPaths := []string{} 79 | 80 | w.lock.Lock() 81 | currentTorrent := w.session.GetCurrentTorrent() 82 | if len(currentTorrent) != 0 { 83 | m, err := NewMetaInfoFromContent([]byte(currentTorrent)) 84 | if err == nil { 85 | for _, f := range m.Info.Files { 86 | previousScanPaths = append(previousScanPaths, filepath.Join(f.Path...)) 87 | } 88 | } 89 | } 90 | w.lock.Unlock() 91 | 92 | for _ = range time.Tick(10 * time.Second) { 93 | w.lock.Lock() 94 | 95 | err := torrentWalk(w.watchedDir, func(path string, info os.FileInfo, perr error) (err error) { 96 | if perr != nil { 97 | return perr 98 | } 99 | 100 | if info.ModTime().After(compareTime) { 101 | fmt.Printf("[TORRENTWATCH] newer at %s\n", path) 102 | return errNewFile 103 | } 104 | return nil 105 | }) 106 | 107 | w.lock.Unlock() 108 | 109 | if err == errNewFile { 110 | currentState = CHANGED 111 | } else if err == nil { 112 | currentState = IDEM 113 | } else { 114 | log.Println("Error while walking dir:", err) 115 | } 116 | 117 | compareTime = time.Now() 118 | 119 | if currentState == IDEM && previousState == CHANGED { 120 | // Note that we may be in the CHANGED state for multiple 121 | // iterations, such as when changes take more than 10 seconds to 122 | // finish. When we go back to "idle" state, we kick in the 123 | // metadata creation. 124 | 125 | // Block until we completely manage it. We will take 126 | // care of other changes in the next run of the loop. 127 | ih, err := w.torrentify() 128 | if err != nil { 129 | log.Printf("Couldn't torrentify: ", err) 130 | continue 131 | } 132 | w.PingNewTorrent <- ih 133 | } 134 | 135 | previousState = currentState 136 | } 137 | } 138 | 139 | func (w *Watcher) torrentify() (ih string, err error) { 140 | w.lock.Lock() 141 | defer w.lock.Unlock() 142 | 143 | meta, err := createMeta(w.watchedDir) 144 | if err != nil { 145 | log.Println(err) 146 | return 147 | } 148 | 149 | var buf bytes.Buffer 150 | err = bencode.NewEncoder(&buf).Encode(meta) 151 | if err != nil { 152 | return 153 | } 154 | w.session.SaveTorrent(buf.Bytes(), meta.InfoHash, time.Now().Format(time.RFC3339)) 155 | 156 | return meta.InfoHash, err 157 | } 158 | 159 | func createMeta(dir string) (meta *MetaInfo, err error) { 160 | blockSize := int64(1 << 20) // 1MiB 161 | 162 | fileDicts := make([]*FileDict, 0) 163 | 164 | hasher := NewBlockHasher(blockSize) 165 | err = torrentWalk(dir, func(path string, info os.FileInfo, perr error) (err error) { 166 | if perr != nil { 167 | return perr 168 | } 169 | 170 | f, err := os.Open(path) 171 | if err != nil { 172 | return errors.New(fmt.Sprintf("Couldn't open %s for hashing: %s\n", path, err)) 173 | } 174 | defer f.Close() 175 | 176 | _, err = io.Copy(hasher, f) 177 | if err != nil { 178 | log.Printf("Couldn't hash %s: %s\n", path, err) 179 | return err 180 | } 181 | 182 | relPath, err := filepath.Rel(dir, path) 183 | if err != nil { 184 | return 185 | } 186 | 187 | fileDict := &FileDict{ 188 | Length: info.Size(), 189 | Path: strings.Split(relPath, string(os.PathSeparator)), 190 | } 191 | fileDicts = append(fileDicts, fileDict) 192 | 193 | return 194 | }) 195 | if err != nil { 196 | return 197 | } 198 | 199 | end := hasher.Close() 200 | if end != nil { 201 | return 202 | } 203 | 204 | meta = &MetaInfo{ 205 | Info: &InfoDict{ 206 | Pieces: string(hasher.Pieces), 207 | PieceLength: blockSize, 208 | Private: 0, 209 | Name: "rakoshare", 210 | Files: fileDicts, 211 | }, 212 | } 213 | 214 | hash := sha1.New() 215 | err = bencode.NewEncoder(hash).Encode(meta.Info) 216 | if err != nil { 217 | return 218 | } 219 | meta.InfoHash = string(hash.Sum(nil)) 220 | 221 | return 222 | } 223 | 224 | type BlockHasher struct { 225 | sha1er hash.Hash 226 | left int64 227 | blockSize int64 228 | Pieces []byte 229 | } 230 | 231 | func NewBlockHasher(blockSize int64) (h *BlockHasher) { 232 | return &BlockHasher{ 233 | blockSize: blockSize, 234 | sha1er: sha1.New(), 235 | left: blockSize, 236 | } 237 | } 238 | 239 | // You shouldn't use this one 240 | func (h *BlockHasher) Write(p []byte) (n int, err error) { 241 | n2, err := h.ReadFrom(bytes.NewReader(p)) 242 | return int(n2), err 243 | } 244 | 245 | func (h *BlockHasher) ReadFrom(rd io.Reader) (n int64, err error) { 246 | var stop bool 247 | 248 | for { 249 | if h.left > 0 { 250 | thisN, err := io.CopyN(h.sha1er, rd, h.left) 251 | if err != nil { 252 | if err == io.EOF { 253 | stop = true 254 | } else { 255 | return n, err 256 | } 257 | } 258 | h.left -= thisN 259 | n += thisN 260 | } 261 | if h.left == 0 { 262 | h.Pieces = h.sha1er.Sum(h.Pieces) 263 | h.sha1er = sha1.New() 264 | h.left = h.blockSize 265 | } 266 | 267 | if stop { 268 | break 269 | } 270 | } 271 | 272 | return 273 | } 274 | 275 | func (h *BlockHasher) Close() (err error) { 276 | if h.left == h.blockSize { 277 | // We're at the end of a blockSize, we don't have any buffered data 278 | return 279 | } 280 | h.Pieces = h.sha1er.Sum(h.Pieces) 281 | return 282 | } 283 | 284 | func torrentWalk(root string, fn filepath.WalkFunc) (err error) { 285 | return filepath.Walk(root, func(path string, info os.FileInfo, perr error) (err error) { 286 | if info == nil || !info.Mode().IsRegular() { 287 | return 288 | } 289 | 290 | // Torrents can't have empty files 291 | if info.Size() == 0 { 292 | return 293 | } 294 | 295 | if strings.HasPrefix(filepath.Base(path), ".") { 296 | return 297 | } 298 | 299 | if filepath.Ext(path) == ".part" { 300 | return 301 | } 302 | 303 | return fn(path, info, perr) 304 | }) 305 | } 306 | -------------------------------------------------------------------------------- /dirwatch_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha1" 5 | "io/ioutil" 6 | "os" 7 | "testing" 8 | ) 9 | 10 | type testVector struct { 11 | dir string 12 | expectedTorrent string 13 | } 14 | 15 | var vecs = []testVector{ 16 | { 17 | dir: "testData/dsl-4.4.10.iso", 18 | expectedTorrent: "testData/dsl-1M.torrent", 19 | }, 20 | { 21 | dir: "testData/1Mrandom", 22 | expectedTorrent: "testData/1Mrandom.torrent", 23 | }, 24 | } 25 | 26 | func TestTorrentify(t *testing.T) { 27 | if testing.Short() { 28 | t.Skip("This test requires the iso") 29 | } 30 | 31 | for _, vec := range vecs { 32 | if _, err := os.Stat("testData/dsl-4.4.10.iso"); err != nil && os.IsNotExist(err) { 33 | t.Fatal("You need to download the iso relative to a.torrent to run this test") 34 | } 35 | 36 | actualMeta, err := createMeta(vec.dir) 37 | if err != nil { 38 | t.Fatal(err) 39 | } 40 | actualPieces := split(actualMeta.Info.Pieces) 41 | 42 | expected, err := ioutil.ReadFile(vec.expectedTorrent) 43 | if err != nil { 44 | t.Fatal(err) 45 | } 46 | 47 | expectedMeta, err := NewMetaInfoFromContent(expected) 48 | if err != nil { 49 | t.Fatal(err) 50 | } 51 | expectedPieces := split(expectedMeta.Info.Pieces) 52 | 53 | if len(expectedPieces) != len(actualPieces) { 54 | t.Fatalf("Invalid length! Expected %d piece(s), got %d\n", len(expectedPieces), len(actualPieces)) 55 | } 56 | 57 | for i, exp := range expectedPieces { 58 | if actualPieces[i] != exp { 59 | t.Fatalf("%x - %x\n", exp, actualPieces[i]) 60 | } 61 | } 62 | 63 | } 64 | } 65 | 66 | func split(pieces string) []string { 67 | out := make([]string, len(pieces)/sha1.Size) 68 | for i := 0; i < len(out); i++ { 69 | from := i * sha1.Size 70 | to := (i + 1) * sha1.Size 71 | if i == len(out)-1 { 72 | to = len(pieces) 73 | } 74 | out[i] = pieces[from:to] 75 | } 76 | return out 77 | } 78 | -------------------------------------------------------------------------------- /files.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "io" 6 | "log" 7 | "os" 8 | "path" 9 | "sort" 10 | "strings" 11 | ) 12 | 13 | type FileStore interface { 14 | io.ReaderAt 15 | io.WriterAt 16 | io.Closer 17 | 18 | // Set all pieces from this one to be bad 19 | SetBad(from int64) 20 | 21 | // When downloading is finished, call Finish to move .part files to 22 | // real files 23 | Cleanup() error 24 | } 25 | 26 | type fileEntry struct { 27 | length int64 28 | name string 29 | } 30 | 31 | type fileStore struct { 32 | offsets []int64 33 | files []fileEntry // Stored in increasing globalOffset order 34 | } 35 | 36 | func (fe *fileEntry) open(name string, length int64) (err error) { 37 | partname := name + ".part" 38 | _, parterr := os.Stat(partname) 39 | if parterr == nil { 40 | parterr = os.Remove(partname) 41 | if parterr != nil { 42 | log.Printf("Couldn't remove part file: ", parterr) 43 | } 44 | } 45 | 46 | fe.length = length 47 | fe.name = name 48 | 49 | // Test for existence and correct length 50 | var needToRetrieve bool 51 | st, errStat := os.Stat(name) 52 | if errStat != nil && os.IsNotExist(errStat) { 53 | needToRetrieve = true 54 | } else if st.Size() != fe.length { 55 | needToRetrieve = true 56 | } 57 | 58 | if needToRetrieve { 59 | 60 | // Cap filename to 60 unicode characters because most filesystems have 61 | // a limit of 255 bytes (see 62 | // https://en.wikipedia.org/wiki/Comparison_of_file_systems) so we 63 | // take a very high margin by expecting each character to be on 4 64 | // bytes (theoretically possible with UTF-8) 65 | ext := path.Ext(name) 66 | rawname := strings.Replace(name, ext, "", 1) 67 | if len(rawname) > 60 { 68 | rawname = rawname[:60] + "[...]" 69 | } 70 | partname = rawname + ext + ".part" 71 | 72 | f, err := os.Create(partname) 73 | defer f.Close() 74 | if err != nil { 75 | return err 76 | } 77 | fe.name = partname 78 | 79 | err = os.Truncate(fe.name, length) 80 | if err != nil { 81 | err = errors.New("could not truncate file") 82 | } 83 | } 84 | 85 | return 86 | } 87 | 88 | func (fe *fileEntry) isPart() bool { 89 | return strings.HasSuffix(fe.name, ".part") 90 | } 91 | 92 | func (fe *fileEntry) SetPart() { 93 | if fe.isPart() { 94 | return 95 | } 96 | 97 | err := copyfile(fe.name, fe.name+".part") 98 | if err != nil { 99 | log.Println("Error at copying to .part file: ", err) 100 | } 101 | 102 | fe.name = fe.name + ".part" 103 | 104 | err = os.Truncate(fe.name, fe.length) 105 | if err != nil { 106 | log.Println("Could not truncate file.") 107 | } 108 | } 109 | 110 | func (fe *fileEntry) ReadAt(p []byte, off int64) (n int, err error) { 111 | file, err := os.Open(fe.name) 112 | if err != nil { 113 | return 114 | } 115 | defer file.Close() 116 | n, err = file.ReadAt(p, off) 117 | if err != nil { 118 | log.Printf("Couldn't read %d-%d from %s: %s\n", off, 119 | off+int64(len(p)), fe.name, err) 120 | return 121 | } 122 | return 123 | } 124 | 125 | func (fe *fileEntry) WriteAt(p []byte, off int64) (n int, err error) { 126 | file, err := os.OpenFile(fe.name, os.O_RDWR, 0600) 127 | if err != nil { 128 | return 129 | } 130 | defer file.Close() 131 | return file.WriteAt(p, off) 132 | } 133 | 134 | func (fe *fileEntry) Cleanup() (err error) { 135 | if fe.isPart() { 136 | realname := strings.Replace(fe.name, ".part", "", 1) 137 | err = copyfile(fe.name, realname) 138 | if err != nil { 139 | log.Printf("Couldn't copy to real file: ", err) 140 | } 141 | 142 | err = os.Remove(fe.name) 143 | if err != nil { 144 | log.Printf("Couldn't remove part file: ", err) 145 | } 146 | fe.name = realname 147 | } 148 | 149 | return 150 | } 151 | 152 | func ensureDirectory(fullPath string) (err error) { 153 | fullPath = path.Clean(fullPath) 154 | if !strings.HasPrefix(fullPath, "/") { 155 | // Transform into absolute path. 156 | var cwd string 157 | if cwd, err = os.Getwd(); err != nil { 158 | return 159 | } 160 | fullPath = cwd + "/" + fullPath 161 | } 162 | base, _ := path.Split(fullPath) 163 | if base == "" { 164 | panic("Programming error: could not find base directory for absolute path " + fullPath) 165 | } 166 | err = os.MkdirAll(base, 0755) 167 | return 168 | } 169 | 170 | func NewFileStore(info *InfoDict, storePath string) (f FileStore, totalSize int64, err error) { 171 | fs := new(fileStore) 172 | numFiles := len(info.Files) 173 | if numFiles == 0 { 174 | // Create dummy Files structure. 175 | info = &InfoDict{Files: []*FileDict{&FileDict{info.Length, []string{info.Name}, info.Md5sum}}} 176 | numFiles = 1 177 | } 178 | fs.files = make([]fileEntry, numFiles) 179 | fs.offsets = make([]int64, numFiles) 180 | for i, _ := range info.Files { 181 | src := info.Files[i] 182 | // Clean the source path before appending to the storePath. This 183 | // ensures that source paths that start with ".." can't escape. 184 | cleanSrcPath := path.Clean("/" + path.Join(src.Path...))[1:] 185 | fullPath := path.Join(storePath, cleanSrcPath) 186 | err = ensureDirectory(fullPath) 187 | if err != nil { 188 | return 189 | } 190 | err = fs.files[i].open(fullPath, src.Length) 191 | if err != nil { 192 | return 193 | } 194 | fs.offsets[i] = totalSize 195 | totalSize += src.Length 196 | } 197 | f = fs 198 | return 199 | } 200 | 201 | func (f *fileStore) find(offset int64) int { 202 | return sort.Search(len(f.offsets), func(i int) bool { 203 | if i >= len(f.offsets)-1 { 204 | return true 205 | } 206 | return f.offsets[i+1] >= offset 207 | }) 208 | } 209 | 210 | func (f *fileStore) ReadAt(p []byte, off int64) (n int, err error) { 211 | index := f.find(off) 212 | for len(p) > 0 && index < len(f.offsets) { 213 | chunk := int64(len(p)) 214 | entry := &f.files[index] 215 | itemOffset := off - f.offsets[index] 216 | if itemOffset < entry.length { 217 | space := entry.length - itemOffset 218 | if space < chunk { 219 | chunk = space 220 | } 221 | var nThisTime int 222 | nThisTime, err = entry.ReadAt(p[0:chunk], itemOffset) 223 | n = n + nThisTime 224 | if err != nil { 225 | return 226 | } 227 | p = p[nThisTime:] 228 | off += int64(nThisTime) 229 | } 230 | index++ 231 | } 232 | // At this point if there's anything left to read it means we've run off the 233 | // end of the file store. Read zeros. This is defined by the bittorrent protocol. 234 | for i, _ := range p { 235 | p[i] = 0 236 | } 237 | return 238 | } 239 | 240 | func (f *fileStore) WriteAt(p []byte, off int64) (n int, err error) { 241 | index := f.find(off) 242 | for len(p) > 0 && index < len(f.offsets) { 243 | chunk := int64(len(p)) 244 | entry := &f.files[index] 245 | itemOffset := off - f.offsets[index] 246 | if itemOffset < entry.length { 247 | space := entry.length - itemOffset 248 | if space < chunk { 249 | chunk = space 250 | } 251 | var nThisTime int 252 | nThisTime, err = entry.WriteAt(p[0:chunk], itemOffset) 253 | n += nThisTime 254 | if err != nil { 255 | return 256 | } 257 | p = p[nThisTime:] 258 | off += int64(nThisTime) 259 | } 260 | index++ 261 | } 262 | // At this point if there's anything left to write it means we've run off the 263 | // end of the file store. Check that the data is zeros. 264 | // This is defined by the bittorrent protocol. 265 | for i, _ := range p { 266 | if p[i] != 0 { 267 | err = errors.New("unexpected non-zero data at end of store") 268 | n = n + i 269 | return 270 | } 271 | } 272 | n = n + len(p) 273 | return 274 | } 275 | 276 | func (f *fileStore) SetBad(from int64) { 277 | index := f.find(from) 278 | for index < len(f.offsets) { 279 | entry := &f.files[index] 280 | entry.SetPart() 281 | index++ 282 | } 283 | } 284 | 285 | func (f *fileStore) Cleanup() (err error) { 286 | for _, fe := range f.files { 287 | err = fe.Cleanup() 288 | } 289 | 290 | return 291 | } 292 | 293 | func (f *fileStore) Close() (err error) { 294 | return 295 | } 296 | 297 | func copyfile(fromname, toname string) (err error) { 298 | from, err := os.Open(fromname) 299 | if err != nil { 300 | log.Printf("Couldn't open %s for read: %s", fromname, err) 301 | return err 302 | } 303 | defer from.Close() 304 | 305 | to, err := os.Create(toname) 306 | if err != nil { 307 | log.Printf("Couldn't open %s for write: %s", toname, err) 308 | return err 309 | } 310 | defer to.Close() 311 | 312 | _, err = io.Copy(to, from) 313 | if err != nil { 314 | log.Printf("Couldn't copy to part: %s", err) 315 | return err 316 | } 317 | 318 | return 319 | } 320 | -------------------------------------------------------------------------------- /files_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha1" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | type testFile struct { 10 | path string 11 | fileLen int64 12 | // SHA1 of fileLen bytes. 13 | hash string 14 | // SHA1 of the first 25 bytes only. 15 | hashPieceA string 16 | // SHA1 of bytes 25-49 17 | hashPieceB string 18 | } 19 | 20 | var tests []testFile = []testFile{{ 21 | "testData/testFile", 22 | 8054, 23 | // shasum testData/testFile | tr "[a-z]" "[A-Z]" 24 | "BC6314A1D1D36EC6C0888AF9DBD3B5E826612ADA", 25 | // dd if=testData/testFile bs=25 count=1 | shasum | tr "[a-z]" "[A-Z]" 26 | "F072A5A05C7ED8EECFFB6524FBFA89CA725A66C3", 27 | // dd if=testData/testFile bs=25 count=1 skip=1 | shasum | tr "[a-z]" "[A-Z]" 28 | "859CF11E055E61296F42EEB5BB19E598626A5173", 29 | }} 30 | 31 | func mkFileStore(tf testFile) (fs *fileStore, err error) { 32 | f := fileEntry{tf.fileLen, tf.path} 33 | return &fileStore{[]int64{0}, []fileEntry{f}}, nil 34 | } 35 | 36 | func TestFileStoreRead(t *testing.T) { 37 | for _, testFile := range tests { 38 | fs, err := mkFileStore(testFile) 39 | if err != nil { 40 | t.Fatal(err) 41 | } 42 | ret := make([]byte, testFile.fileLen) 43 | _, err = fs.ReadAt(ret, 0) 44 | if err != nil { 45 | t.Fatal(err) 46 | } 47 | h := sha1.New() 48 | h.Write(ret) 49 | sum := fmt.Sprintf("%X", h.Sum(nil)) 50 | if sum != testFile.hash { 51 | t.Errorf("Wanted %v, got %v\n", testFile.hash, sum) 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /generate.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/hex" 5 | "fmt" 6 | "path/filepath" 7 | 8 | "github.com/rakoo/rakoshare/pkg/id" 9 | "github.com/rakoo/rakoshare/pkg/sharesession" 10 | ) 11 | 12 | func Generate(target, workDir string) error { 13 | tmpId, err := id.New() 14 | if err != nil { 15 | return err 16 | } 17 | fmt.Printf("WriteReadStore:\t%s\n ReadStore:\t%s\n Store:\t%s\n", 18 | tmpId.WRS(), tmpId.RS(), tmpId.S()) 19 | 20 | dbFile := filepath.Join(workDir, hex.EncodeToString(tmpId.Infohash)+".sql") 21 | session, err := sharesession.New(dbFile) 22 | if err != nil { 23 | return err 24 | } 25 | 26 | target, err = filepath.Abs(target) 27 | if err != nil { 28 | return err 29 | } 30 | return session.SaveSession(target, tmpId) 31 | } 32 | -------------------------------------------------------------------------------- /listen.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "net" 8 | "strconv" 9 | 10 | "github.com/dchest/spipe" 11 | ) 12 | 13 | var ( 14 | // If the port is 0, picks up a random port. Don't use port 6881 which 15 | // is blacklisted by some trackers. 16 | port = flag.Int("port", 7777, "Port to listen on.") 17 | useUPnP = flag.Bool("useUPnP", false, "Use UPnP to open port in firewall.") 18 | useNATPMP = flag.Bool("useNATPMP", false, "Use NAT-PMP to open port in firewall.") 19 | ) 20 | 21 | // btConn wraps an incoming network connection and contains metadata that helps 22 | // identify which active torrentSession it's relevant for. 23 | type btConn struct { 24 | conn net.Conn 25 | header []byte 26 | infohash string 27 | id string 28 | } 29 | 30 | // listenForPeerConnections listens on a TCP port for incoming connections and 31 | // demuxes them to the appropriate active torrentSession based on the InfoHash 32 | // in the header. 33 | func listenForPeerConnections(key []byte) (conChan chan *btConn, listenPort int, err error) { 34 | listener, err := createListener() 35 | if err != nil { 36 | return 37 | } 38 | conChan = make(chan *btConn) 39 | _, portstring, err := net.SplitHostPort(listener.Addr().String()) 40 | if err != nil { 41 | log.Printf("Listener failed while finding the host/port for %v: %v", portstring, err) 42 | return 43 | } 44 | listenPort, err = strconv.Atoi(portstring) 45 | if err != nil { 46 | log.Printf("Listener failed while converting %v to integer: %v", portstring, err) 47 | return 48 | } 49 | go func() { 50 | for { 51 | tcpConn, err := listener.Accept() 52 | if err != nil { 53 | log.Println("Listener accept failed:", err) 54 | continue 55 | } 56 | 57 | go func() { 58 | conn := spipe.Server(key, tcpConn) 59 | bconn := newBufferedSpipeConn(conn) 60 | header, err := readHeader(bconn) 61 | if err != nil { 62 | //log.Println("Error reading header: ", err) 63 | bconn.Close() 64 | return 65 | } 66 | peersInfoHash := string(header[8:28]) 67 | id := string(header[28:48]) 68 | conChan <- &btConn{ 69 | header: header, 70 | infohash: peersInfoHash, 71 | id: id, 72 | conn: bconn, 73 | } 74 | }() 75 | } 76 | }() 77 | return 78 | } 79 | 80 | func createListener() (listener net.Listener, err error) { 81 | nat, err := createPortMapping() 82 | if err != nil { 83 | err = fmt.Errorf("Unable to create NAT: %v", err) 84 | return 85 | } 86 | listenPort := *port 87 | if nat != nil { 88 | var external net.IP 89 | if external, err = nat.GetExternalAddress(); err != nil { 90 | err = fmt.Errorf("Unable to get external IP address from NAT: %v", err) 91 | return 92 | } 93 | log.Println("External ip address: ", external) 94 | if listenPort, err = chooseListenPort(nat); err != nil { 95 | log.Println("Could not choose listen port.", err) 96 | log.Println("Peer connectivity will be affected.") 97 | } 98 | } 99 | listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: listenPort}) 100 | if err != nil { 101 | log.Fatal("Listen failed:", err) 102 | } 103 | log.Println("Listening for peers on port:", listenPort) 104 | return 105 | } 106 | 107 | // createPortMapping creates a NAT port mapping, or nil if none requested or found. 108 | func createPortMapping() (nat NAT, err error) { 109 | if *useUPnP && *useNATPMP { 110 | err = fmt.Errorf("Cannot specify both -useUPnP and -useNATPMP") 111 | return 112 | } 113 | if *useUPnP { 114 | log.Println("Using UPnP to open port.") 115 | nat, err = Discover() 116 | } 117 | if *useNATPMP { 118 | if gateway == "" { 119 | err = fmt.Errorf("-useNATPMP requires -gateway") 120 | return 121 | } 122 | log.Println("Using NAT-PMP to open port.") 123 | gatewayIP := net.ParseIP(gateway) 124 | if gatewayIP == nil { 125 | err = fmt.Errorf("Could not parse gateway %q", gateway) 126 | } 127 | nat = NewNatPMP(gatewayIP) 128 | } 129 | return 130 | } 131 | 132 | func chooseListenPort(nat NAT) (listenPort int, err error) { 133 | listenPort = *port 134 | // TODO: Unmap port when exiting. (Right now we never exit cleanly.) 135 | // TODO: Defend the port, remap when router reboots 136 | listenPort, err = nat.AddPortMapping("tcp", listenPort, listenPort, 137 | "Taipei-Torrent port "+strconv.Itoa(listenPort), 360000) 138 | if err != nil { 139 | return 140 | } 141 | return 142 | } 143 | 144 | func readHeader(conn net.Conn) (h []byte, err error) { 145 | header := make([]byte, 68) 146 | _, err = conn.Read(header[0:1]) 147 | if err != nil { 148 | err = fmt.Errorf("Couldn't read 1st byte: %v", err) 149 | return 150 | } 151 | if header[0] != 19 { 152 | err = fmt.Errorf("First byte is not 19") 153 | return 154 | } 155 | _, err = conn.Read(header[1:20]) 156 | if err != nil { 157 | err = fmt.Errorf("Couldn't read magic string: %v", err) 158 | return 159 | } 160 | if string(header[1:20]) != "BitTorrent protocol" { 161 | err = fmt.Errorf("Magic string is not correct: %v", string(header[1:20])) 162 | return 163 | } 164 | // Read rest of header 165 | _, err = conn.Read(header[20:]) 166 | if err != nil { 167 | err = fmt.Errorf("Couldn't read rest of header") 168 | return 169 | } 170 | 171 | h = make([]byte, 48) 172 | copy(h, header[20:]) 173 | return 174 | } 175 | -------------------------------------------------------------------------------- /lpd.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bufio" 5 | "bytes" 6 | "fmt" 7 | "log" 8 | "net" 9 | "net/http" 10 | "strings" 11 | "time" 12 | ) 13 | 14 | var ( 15 | request_template = "BT-SEARCH * HTTP/1.1\r\n" + 16 | "Host: 239.192.152.143:6771\r\n" + 17 | "Port: %d\r\n" + 18 | "Infohash: %X\r\n\r\n" 19 | ) 20 | 21 | type Announce struct { 22 | peer string 23 | infohash string 24 | } 25 | 26 | type Announcer struct { 27 | btPort int 28 | addr *net.UDPAddr 29 | conn *net.UDPConn 30 | 31 | announces chan *Announce 32 | activeAnnounces map[string]*time.Ticker 33 | } 34 | 35 | func NewAnnouncer(listenPort int) (lpd *Announcer, err error) { 36 | addr, err := net.ResolveUDPAddr("udp4", "239.192.152.143:6771") 37 | if err != nil { 38 | return 39 | } 40 | 41 | conn, err := net.ListenMulticastUDP("udp4", nil, addr) 42 | if err != nil { 43 | return 44 | } 45 | 46 | activeAnnounces := make(map[string]*time.Ticker) 47 | lpd = &Announcer{ 48 | btPort: listenPort, 49 | addr: addr, 50 | conn: conn, 51 | announces: make(chan *Announce), 52 | activeAnnounces: activeAnnounces, 53 | } 54 | 55 | go lpd.run() 56 | return 57 | } 58 | 59 | func (lpd *Announcer) run() { 60 | for { 61 | answer := make([]byte, 256) 62 | _, from, err := lpd.conn.ReadFromUDP(answer) 63 | if err != nil { 64 | log.Println("Error reading from UDP: ", err) 65 | continue 66 | } 67 | 68 | req, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(answer))) 69 | if err != nil { 70 | log.Println("Error reading HTTP request from UDP: ", err) 71 | continue 72 | } 73 | 74 | if req.Method != "BT-SEARCH" { 75 | log.Println("Invalid method: ", req.Method) 76 | } 77 | 78 | ih := req.Header.Get("Infohash") 79 | if ih == "" { 80 | log.Println("No Infohash") 81 | continue 82 | } 83 | ih = strings.ToLower(ih) 84 | 85 | port := req.Header.Get("Port") 86 | if port == "" { 87 | log.Println("No port") 88 | continue 89 | } 90 | 91 | addr, err := net.ResolveTCPAddr("tcp4", from.IP.String()+":"+port) 92 | if err != nil { 93 | log.Println(err) 94 | continue 95 | } 96 | lpd.announces <- &Announce{addr.String(), ih} 97 | } 98 | } 99 | 100 | func (lpd *Announcer) Announce(ih string) { 101 | go func() { 102 | requestMessage := []byte(fmt.Sprintf(request_template, lpd.btPort, 103 | ih)) 104 | 105 | // Announce at launch, then every 5 minutes 106 | _, err := lpd.conn.WriteToUDP(requestMessage, lpd.addr) 107 | if err != nil { 108 | log.Println(err) 109 | } 110 | 111 | ticker := time.NewTicker(5 * time.Minute) 112 | lpd.activeAnnounces[ih] = ticker 113 | 114 | for _ = range ticker.C { 115 | _, err := lpd.conn.WriteToUDP(requestMessage, lpd.addr) 116 | if err != nil { 117 | log.Println(err) 118 | } 119 | } 120 | }() 121 | } 122 | 123 | func (lpd *Announcer) StopAnnouncing(ih string) { 124 | if ticker, ok := lpd.activeAnnounces[ih]; ok { 125 | ticker.Stop() 126 | delete(lpd.activeAnnounces, ih) 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/hex" 6 | "flag" 7 | "fmt" 8 | "log" 9 | "os" 10 | "os/signal" 11 | "os/user" 12 | "path/filepath" 13 | "runtime/pprof" 14 | "strings" 15 | "time" 16 | 17 | "github.com/rakoo/rakoshare/pkg/id" 18 | "github.com/rakoo/rakoshare/pkg/sharesession" 19 | "github.com/zeebo/bencode" 20 | 21 | "github.com/codegangsta/cli" 22 | ) 23 | 24 | var ( 25 | cpuprofile = flag.String("cpuprofile", "", "If not empty, collects CPU profile samples and writes the profile to the given file before the program exits") 26 | memprofile = flag.String("memprofile", "", "If not empty, writes memory heap allocations to the given file before the program exits") 27 | generate = flag.Bool("gen", false, "If true, generate a 3-tuple of ids") 28 | ) 29 | 30 | var torrent string 31 | 32 | func main() { 33 | flag.Parse() 34 | 35 | if *cpuprofile != "" { 36 | cpuf, err := os.Create(*cpuprofile) 37 | if err != nil { 38 | log.Fatal(err) 39 | } 40 | pprof.StartCPUProfile(cpuf) 41 | defer pprof.StopCPUProfile() 42 | } 43 | 44 | if *memprofile != "" { 45 | defer func(file string) { 46 | memf, err := os.Create(file) 47 | if err != nil { 48 | log.Fatal(err) 49 | } 50 | pprof.WriteHeapProfile(memf) 51 | }(*memprofile) 52 | } 53 | 54 | // Working directory, where all transient stuff happens 55 | u, err := user.Current() 56 | if err != nil { 57 | log.Fatal("Couldn't watch dir: ", err) 58 | } 59 | pathArgs := []string{u.HomeDir, ".local", "share", "rakoshare"} 60 | workDir := filepath.Join(pathArgs...) 61 | 62 | app := cli.NewApp() 63 | app.Name = "rakoshare" 64 | app.Usage = "Share content with everyone" 65 | app.Commands = []cli.Command{ 66 | { 67 | Name: "gen", 68 | Usage: "Generate a share with a given target directory. Outputs the 3-tuple of id", 69 | Flags: []cli.Flag{ 70 | cli.StringFlag{ 71 | Name: "dir", 72 | Value: "", 73 | Usage: "The directory to share", 74 | }, 75 | }, 76 | Action: func(c *cli.Context) { 77 | if c.String("dir") == "" { 78 | fmt.Println("Need a valid directory!") 79 | fmt.Println("Use the -dir flag") 80 | return 81 | } 82 | err := Generate(c.String("dir"), workDir) 83 | if err != nil { 84 | fmt.Println(err) 85 | } 86 | }, 87 | }, 88 | { 89 | Name: "share", 90 | Usage: "Share the given id", 91 | Flags: []cli.Flag{ 92 | cli.StringFlag{ 93 | Name: "id", 94 | Value: "", 95 | Usage: "The id to share", 96 | }, 97 | cli.StringFlag{ 98 | Name: "dir", 99 | Value: "", 100 | Usage: "If not empty, the dir to share", 101 | }, 102 | cli.StringSliceFlag{ 103 | Name: "tracker", 104 | Value: &cli.StringSlice{}, 105 | Usage: "A tracker to connect to", 106 | }, 107 | cli.BoolTFlag{ 108 | Name: "useLPD", 109 | Usage: "Use Local Peer Discovery", 110 | }, 111 | cli.StringSliceFlag{ 112 | Name: "peer", 113 | Value: &cli.StringSlice{}, 114 | Usage: "A peer to connect to", 115 | }, 116 | }, 117 | Action: func(c *cli.Context) { 118 | if c.String("id") == "" { 119 | fmt.Println("Need an id!") 120 | return 121 | } 122 | Share(c.String("id"), workDir, c.String("dir"), 123 | c.StringSlice("tracker"), c.Bool("useLPD"), 124 | c.StringSlice("peer")) 125 | }, 126 | }, 127 | { 128 | Name: "list", 129 | Usage: "List availables shares", 130 | Action: func(c *cli.Context) { 131 | shares := List(workDir) 132 | for _, s := range shares { 133 | fmt.Printf("Sharing %s in %s: \n", s.folder, s.sessionFile) 134 | fmt.Printf("\tWriteReadStore:\t%s\n\t ReadStore:\t%s\n\t Store:\t%s\n", 135 | s.wrs, s.rs, s.s) 136 | fmt.Println() 137 | } 138 | }, 139 | }, 140 | } 141 | 142 | app.Run(os.Args) 143 | } 144 | 145 | type share struct { 146 | sessionFile string 147 | folder string 148 | wrs string 149 | rs string 150 | s string 151 | } 152 | 153 | func List(workDir string) []share { 154 | dir, err := os.Open(workDir) 155 | if err != nil { 156 | log.Fatal(err) 157 | } 158 | names, err := dir.Readdirnames(-1) 159 | if err != nil { 160 | log.Fatal(err) 161 | } 162 | 163 | shares := make([]share, 0, len(names)) 164 | for _, n := range names { 165 | if !strings.HasSuffix(n, ".sql") { 166 | continue 167 | } 168 | session, err := sharesession.New(filepath.Join(workDir, n)) 169 | if err != nil { 170 | continue 171 | } 172 | id := session.GetShareId() 173 | 174 | shares = append(shares, share{ 175 | sessionFile: filepath.Join(workDir, n), 176 | folder: session.GetTarget(), 177 | wrs: id.WRS(), 178 | rs: id.RS(), 179 | s: id.S(), 180 | }) 181 | } 182 | 183 | return shares 184 | } 185 | 186 | func Share(cliId string, workDir string, cliTarget string, trackers []string, useLPD bool, manualPeers []string) { 187 | shareID, err := id.NewFromString(cliId) 188 | if err != nil { 189 | fmt.Printf("Couldn't generate shareId: %s\n", err) 190 | return 191 | } 192 | sessionName := hex.EncodeToString(shareID.Infohash) + ".sql" 193 | session, err := sharesession.New(filepath.Join(workDir, sessionName)) 194 | if err != nil { 195 | log.Fatal("Couldn't open session file: ", err) 196 | } 197 | 198 | fmt.Printf("WriteReadStore:\t%s\n ReadStore:\t%s\n Store:\t%s\n", 199 | shareID.WRS(), shareID.RS(), shareID.S()) 200 | 201 | target := session.GetTarget() 202 | if target == "" { 203 | if cliTarget == "" { 204 | fmt.Println("Need a folder to share!") 205 | return 206 | } 207 | target = cliTarget 208 | session.SaveSession(target, shareID) 209 | } else if cliTarget != "" { 210 | fmt.Printf("Can't override folder already set to %s\n", target) 211 | } 212 | _, err = os.Stat(target) 213 | if err != nil { 214 | if os.IsNotExist(err) { 215 | os.MkdirAll(target, 0744) 216 | } else { 217 | fmt.Printf("%s is an invalid dir: %s\n", target, err) 218 | os.Exit(1) 219 | } 220 | } 221 | 222 | // Watcher 223 | watcher := &Watcher{ 224 | PingNewTorrent: make(chan string), 225 | } 226 | if shareID.CanWrite() { 227 | watcher, err = NewWatcher(session, filepath.Clean(target)) 228 | if err != nil { 229 | log.Fatal("Couldn't start watcher: ", err) 230 | } 231 | } else { 232 | watcher.PingNewTorrent = make(chan string, 1) 233 | watcher.PingNewTorrent <- session.GetCurrentInfohash() 234 | } 235 | 236 | // External listener 237 | conChan, listenPort, err := listenForPeerConnections([]byte(shareID.Psk[:])) 238 | if err != nil { 239 | log.Fatal("Couldn't listen for peers connection: ", err) 240 | } 241 | 242 | var currentSession TorrentSessionI = EmptyTorrent{} 243 | 244 | // quitChan 245 | quitChan := listenSigInt() 246 | 247 | // LPD 248 | lpd := &Announcer{announces: make(chan *Announce)} 249 | if useLPD { 250 | lpd, err = NewAnnouncer(listenPort) 251 | if err != nil { 252 | log.Fatal("Couldn't listen for Local Peer Discoveries: ", err) 253 | } 254 | } 255 | 256 | // Control session 257 | controlSession, err := NewControlSession(shareID, listenPort, session, trackers) 258 | if err != nil { 259 | log.Fatal(err) 260 | } 261 | if useLPD { 262 | lpd.Announce(string(shareID.Infohash)) 263 | } 264 | for _, peer := range manualPeers { 265 | controlSession.backoffHintNewPeer(peer) 266 | } 267 | 268 | peers := session.GetPeers() 269 | for _, p := range peers { 270 | log.Printf("Feeding with known peer: %s\n", p) 271 | controlSession.backoffHintNewPeer(p) 272 | } 273 | 274 | log.Println("Starting.") 275 | 276 | mainLoop: 277 | for { 278 | select { 279 | case <-quitChan: 280 | err := currentSession.Quit() 281 | if err != nil { 282 | log.Println("Failed: ", err) 283 | } else { 284 | log.Println("Done") 285 | } 286 | break mainLoop 287 | case c := <-conChan: 288 | if currentSession.Matches(c.infohash) { 289 | currentSession.AcceptNewPeer(c) 290 | } else if controlSession.Matches(c.infohash) { 291 | controlSession.AcceptNewPeer(c) 292 | } 293 | case announce := <-lpd.announces: 294 | hexhash, err := hex.DecodeString(announce.infohash) 295 | if err != nil { 296 | log.Println("Err with hex-decoding:", err) 297 | break 298 | } 299 | if controlSession.Matches(string(hexhash)) { 300 | controlSession.backoffHintNewPeer(announce.peer) 301 | } 302 | case ih := <-watcher.PingNewTorrent: 303 | if ih == controlSession.currentIH && !currentSession.IsEmpty() { 304 | break 305 | } 306 | err := controlSession.SetCurrent(ih) 307 | if err != nil { 308 | log.Fatal("Error setting new current infohash:", err) 309 | } 310 | 311 | currentSession.Quit() 312 | 313 | torrentFile := session.GetCurrentTorrent() 314 | tentativeSession, err := NewTorrentSession(shareID, target, torrentFile, listenPort) 315 | if err != nil { 316 | if !os.IsNotExist(err) { 317 | log.Println("Couldn't start new session from watched dir: ", err) 318 | } 319 | 320 | // Fallback to an emptytorrent, because the previous one is 321 | // invalid; hope it will be ok next time ! 322 | currentSession = EmptyTorrent{} 323 | break 324 | } 325 | currentSession = tentativeSession 326 | go currentSession.DoTorrent() 327 | 328 | for _, peer := range controlSession.peers.All() { 329 | currentSession.hintNewPeer(peer.address) 330 | } 331 | case announce := <-controlSession.Torrents: 332 | if controlSession.currentIH == announce.infohash && !currentSession.IsEmpty() { 333 | break 334 | } 335 | err := controlSession.SetCurrent(announce.infohash) 336 | if err != nil { 337 | log.Fatal("Error setting new current infohash:", err) 338 | } 339 | 340 | currentSession.Quit() 341 | 342 | log.Println("Opening new torrent session") 343 | magnet := fmt.Sprintf("magnet:?xt=urn:btih:%x", announce.infohash) 344 | tentativeSession, err := NewTorrentSession(shareID, target, magnet, listenPort) 345 | if err != nil { 346 | log.Println("Couldn't start new session from announce: ", err) 347 | currentSession = EmptyTorrent{} 348 | break 349 | } 350 | currentSession = tentativeSession 351 | go currentSession.DoTorrent() 352 | currentSession.hintNewPeer(announce.peer) 353 | case peer := <-controlSession.NewPeers: 354 | if currentSession.IsEmpty() { 355 | magnet := fmt.Sprintf("magnet:?xt=urn:btih:%x", controlSession.currentIH) 356 | tentativeSession, err := NewTorrentSession(shareID, target, magnet, listenPort) 357 | if err != nil { 358 | log.Printf("Couldn't start new session with new peer: %s\n", err) 359 | break 360 | } 361 | currentSession = tentativeSession 362 | go currentSession.DoTorrent() 363 | } 364 | currentSession.hintNewPeer(peer) 365 | case meta := <-currentSession.NewMetaInfo(): 366 | var buf bytes.Buffer 367 | err := bencode.NewEncoder(&buf).Encode(meta) 368 | if err != nil { 369 | log.Println(err) 370 | break 371 | } 372 | session.SaveTorrent(buf.Bytes(), meta.InfoHash, time.Now().Format(time.RFC3339)) 373 | } 374 | } 375 | } 376 | 377 | type EmptyTorrent struct{} 378 | 379 | func (et EmptyTorrent) Quit() error { return nil } 380 | func (et EmptyTorrent) Matches(ih string) bool { return false } 381 | func (et EmptyTorrent) AcceptNewPeer(btc *btConn) {} 382 | func (et EmptyTorrent) DoTorrent() {} 383 | func (et EmptyTorrent) hintNewPeer(peer string) bool { return true } 384 | func (et EmptyTorrent) IsEmpty() bool { return true } 385 | func (et EmptyTorrent) NewMetaInfo() chan *MetaInfo { return nil } 386 | 387 | func listenSigInt() chan os.Signal { 388 | c := make(chan os.Signal) 389 | signal.Notify(c, os.Interrupt, os.Kill) 390 | return c 391 | } 392 | -------------------------------------------------------------------------------- /metadata.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "log" 7 | 8 | bencode "github.com/jackpal/bencode-go" 9 | "github.com/rakoo/rakoshare/pkg/bitset" 10 | ) 11 | 12 | type messagetype int 13 | 14 | const ( 15 | METADATA_REQUEST messagetype = iota 16 | METADATA_DATA 17 | METADATA_REJECT 18 | ) 19 | 20 | const ( 21 | METADATA_PIECE_SIZE = 1 << 14 // 16kiB 22 | ) 23 | 24 | type MetadataMessage struct { 25 | MsgType messagetype "msg_type" 26 | Piece int "piece" 27 | TotalSize int "total_size" 28 | } 29 | 30 | func (t *TorrentSession) DoMetadata(msg []byte, p *peerState) { 31 | var message MetadataMessage 32 | err := bencode.Unmarshal(bytes.NewReader(msg), &message) 33 | if err != nil { 34 | log.Println("Error when parsing metadata: ", err) 35 | return 36 | } 37 | 38 | mt := message.MsgType 39 | switch mt { 40 | case METADATA_REQUEST: 41 | if !t.si.HaveTorrent { 42 | break 43 | } 44 | 45 | rawInfo := t.m.RawInfo() 46 | 47 | from := message.Piece * METADATA_PIECE_SIZE 48 | 49 | // Piece asked must be between the first one and the last one. 50 | // Note that the last one will most of the time be smaller than 51 | // METADATA_PIECE_SIZE 52 | if from >= len(rawInfo) { 53 | log.Printf("%d is out of range. Not sending this\n", message.Piece) 54 | break 55 | } 56 | 57 | to := from + METADATA_PIECE_SIZE 58 | if to > len(rawInfo) { 59 | to = len(rawInfo) 60 | } 61 | 62 | if _, ok := p.theirExtensions["ut_metadata"]; !ok { 63 | log.Println("%s doesn't understand ut_metadata\n", p.address) 64 | break 65 | } 66 | 67 | respHeader := MetadataMessage{ 68 | MsgType: METADATA_DATA, 69 | Piece: message.Piece, 70 | TotalSize: len(rawInfo), 71 | } 72 | 73 | var resp bytes.Buffer 74 | resp.WriteByte(EXTENSION) 75 | resp.WriteByte(byte(p.theirExtensions["ut_metadata"])) 76 | 77 | err = bencode.Marshal(&resp, respHeader) 78 | if err != nil { 79 | log.Println("Couldn't header metadata response: ", err) 80 | break 81 | } 82 | 83 | resp.Write(rawInfo[from:to]) 84 | p.sendMessage(resp.Bytes()) 85 | 86 | case METADATA_DATA: 87 | 88 | if t.si.HaveTorrent { 89 | break 90 | } 91 | 92 | if message.TotalSize == 0 { 93 | log.Println("No metadata size, bailing out") 94 | return 95 | } 96 | 97 | if message.Piece >= len(t.si.ME.Pieces) { 98 | log.Printf("Rejecting invalid metadata piece %d, max is %d\n", 99 | message.Piece, len(t.si.ME.Pieces)-1) 100 | break 101 | } 102 | 103 | pieceSize := METADATA_PIECE_SIZE 104 | if message.Piece == len(t.si.ME.Pieces)-1 { 105 | pieceSize = message.TotalSize - (message.TotalSize/METADATA_PIECE_SIZE)*METADATA_PIECE_SIZE 106 | } 107 | 108 | t.si.ME.Pieces[message.Piece] = msg[len(msg)-pieceSize:] 109 | 110 | finished := true 111 | for idx, data := range t.si.ME.Pieces { 112 | if len(data) == 0 { 113 | p.sendMetadataRequest(idx) 114 | finished = false 115 | break 116 | } 117 | } 118 | 119 | if !finished { 120 | break 121 | } 122 | 123 | log.Println("Finished downloading metadata!") 124 | var full bytes.Buffer 125 | for _, piece := range t.si.ME.Pieces { 126 | full.Write(piece) 127 | } 128 | info := full.Bytes() 129 | 130 | // Verify sha 131 | sha := sha1.New() 132 | sha.Write(info) 133 | actual := string(sha.Sum(nil)) 134 | if actual != t.m.InfoHash { 135 | log.Println("Invalid metadata") 136 | log.Printf("Expected %x, got %x\n", t.m.InfoHash, actual) 137 | break 138 | } 139 | 140 | err = t.reload(info) 141 | if err != nil { 142 | return 143 | } 144 | 145 | if p.have == nil { 146 | if p.temporaryBitfield != nil { 147 | p.have = bitset.NewFromBytes(t.totalPieces, p.temporaryBitfield) 148 | p.temporaryBitfield = nil 149 | } else { 150 | p.have = bitset.New(t.totalPieces) 151 | } 152 | } 153 | if p.have == nil { 154 | log.Panic("Invalid bitfield data") 155 | } 156 | 157 | p.SendBitfield(t.pieceSet) 158 | case METADATA_REJECT: 159 | log.Printf("%d didn't want to send piece %d\n", p.address, message.Piece) 160 | default: 161 | log.Println("Didn't understand metadata extension type: ", mt) 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /metainfo.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "crypto/sha1" 6 | "errors" 7 | "fmt" 8 | "io/ioutil" 9 | "log" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | 14 | "github.com/nictuku/dht" 15 | "github.com/zeebo/bencode" 16 | ) 17 | 18 | type FileDict struct { 19 | Length int64 `bencode:"length"` 20 | Path []string `bencode:"path"` 21 | Md5sum string `bencode:"md5sum,omitempty"` 22 | } 23 | 24 | type InfoDict struct { 25 | PieceLength int64 `bencode:"piece length"` 26 | Pieces string `bencode:"pieces"` 27 | Private int64 `bencode:"private"` 28 | Name string `bencode:"name"` 29 | // Single File Mode 30 | Length int64 `bencode:"length,omitempty"` 31 | Md5sum string `bencode:"md5sum,omitempty"` 32 | // Multiple File mode 33 | Files []*FileDict `bencode:"files,omitempty"` 34 | } 35 | 36 | type MetaInfo struct { 37 | Info *InfoDict `bencode:"info"` 38 | Announce string `bencode:"announce,omitempty"` 39 | AnnounceList [][]string `bencode:"announce-list,omitempty"` 40 | CreationDate int64 `bencode:"creation date,omitempty"` 41 | Comment string `bencode:"comment,omitempty"` 42 | CreatedBy string `bencode:"created by,omitempty"` 43 | Encoding string `bencode:"encoding,omitempty"` 44 | 45 | // These are not used for bencoding, only for helping 46 | InfoHash string `bencode:"-"` 47 | rawInfo []byte `bencode:"-"` 48 | } 49 | 50 | func NewMetaInfo(torrent string) (m *MetaInfo, err error) { 51 | if strings.HasPrefix(torrent, "http:") { 52 | return NewMetaInfoFromHTTP(torrent) 53 | } else if strings.HasPrefix(torrent, "magnet:") { 54 | return NewMetaInfoFromMagnet(torrent) 55 | } else { 56 | m, err := NewMetaInfoFromContent([]byte(torrent)) 57 | if err == nil { 58 | return m, nil 59 | } 60 | return NewMetaInfoFromFile(torrent) 61 | } 62 | } 63 | 64 | func NewMetaInfoFromHTTP(torrent string) (m *MetaInfo, err error) { 65 | r, err := proxyHttpGet(torrent) 66 | if err != nil { 67 | return nil, err 68 | } 69 | defer r.Body.Close() 70 | 71 | content, err := ioutil.ReadAll(r.Body) 72 | if err != nil { 73 | return 74 | } 75 | 76 | return NewMetaInfoFromContent(content) 77 | } 78 | 79 | func NewMetaInfoFromFile(torrent string) (m *MetaInfo, err error) { 80 | content, err := ioutil.ReadFile(torrent) 81 | if err != nil { 82 | return 83 | } 84 | 85 | return NewMetaInfoFromContent(content) 86 | } 87 | 88 | func NewMetaInfoFromContent(content []byte) (m *MetaInfo, err error) { 89 | 90 | var m1 MetaInfo 91 | err1 := bencode.DecodeString(string(content), &m1) 92 | if err1 != nil { 93 | err = errors.New("Couldn't parse torrent file: " + err1.Error()) 94 | return 95 | } 96 | 97 | hash := sha1.New() 98 | err1 = bencode.NewEncoder(hash).Encode(m1.Info) 99 | if err1 != nil { 100 | return 101 | } 102 | 103 | m1.InfoHash = string(hash.Sum(nil)) 104 | 105 | return &m1, nil 106 | } 107 | 108 | func NewMetaInfoFromMagnet(torrent string) (m *MetaInfo, err error) { 109 | magnet, err := parseMagnet(torrent) 110 | if err != nil { 111 | log.Println("Couldn't parse magnet: ", err) 112 | return 113 | } 114 | 115 | ih, err := dht.DecodeInfoHash(magnet.InfoHashes[0]) 116 | if err != nil { 117 | return 118 | } 119 | 120 | m = &MetaInfo{InfoHash: string(ih)} 121 | return 122 | 123 | } 124 | 125 | func (m *MetaInfo) saveToDisk(dir string) (err error) { 126 | ihhex := fmt.Sprintf("%x", m.InfoHash) 127 | f, err := os.Create(filepath.Join(dir, ihhex)) 128 | if err != nil { 129 | log.Println("Error when opening file for creation: ", err) 130 | return 131 | } 132 | defer f.Close() 133 | 134 | return bencode.NewEncoder(f).Encode(m) 135 | } 136 | 137 | // Returns the size of the representation of this metainfo as a bencoded 138 | // dictionary 139 | func (m *MetaInfo) Size() (sz int) { 140 | var buf bytes.Buffer 141 | err := bencode.NewEncoder(&buf).Encode(m) 142 | if err != nil { 143 | log.Fatal("Couldn't bencode this metainfo: ", err) 144 | } 145 | 146 | return buf.Len() 147 | } 148 | 149 | // Returns the representation of this metainfo's info dict as a 150 | // bencoded dictionary 151 | func (m *MetaInfo) RawInfo() (b []byte) { 152 | if m.rawInfo != nil { 153 | return m.rawInfo 154 | } 155 | if m.Info == nil { 156 | return []byte{} 157 | } 158 | 159 | var buf bytes.Buffer 160 | err := bencode.NewEncoder(&buf).Encode(m.Info) 161 | if err != nil { 162 | log.Fatal("Couldn't bencode this metainfo's dict: ", err) 163 | } 164 | 165 | m.rawInfo = buf.Bytes() 166 | 167 | return m.rawInfo 168 | } 169 | 170 | func getMetaInfo(torrent string) (metaInfo *MetaInfo, err error) { 171 | return NewMetaInfo(torrent) 172 | } 173 | 174 | type SessionInfo struct { 175 | PeerId string 176 | Port int 177 | Uploaded int64 178 | Downloaded int64 179 | Left int64 180 | 181 | //UseDHT bool 182 | FromMagnet bool 183 | HaveTorrent bool 184 | 185 | OurExtensions map[int]string 186 | ME *MetaDataExchange 187 | } 188 | 189 | type MetaDataExchange struct { 190 | Transferring bool 191 | Pieces [][]byte 192 | } 193 | -------------------------------------------------------------------------------- /nat.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net" 5 | ) 6 | 7 | // protocol is either "udp" or "tcp" 8 | type NAT interface { 9 | GetExternalAddress() (addr net.IP, err error) 10 | AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) 11 | DeletePortMapping(protocol string, externalPort, internalPort int) (err error) 12 | } 13 | -------------------------------------------------------------------------------- /natpmp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | natpmp "github.com/jackpal/go-nat-pmp" 6 | "net" 7 | ) 8 | 9 | // Adapt the NAT-PMP protocol to the NAT interface 10 | 11 | // TODO: 12 | // + Register for changes to the external address. 13 | // + Re-register port mapping when router reboots. 14 | // + A mechanism for keeping a port mapping registered. 15 | 16 | type natPMPClient struct { 17 | client *natpmp.Client 18 | } 19 | 20 | func NewNatPMP(gateway net.IP) (nat NAT) { 21 | return &natPMPClient{natpmp.NewClient(gateway)} 22 | } 23 | 24 | func (n *natPMPClient) GetExternalAddress() (addr net.IP, err error) { 25 | response, err := n.client.GetExternalAddress() 26 | if err != nil { 27 | return 28 | } 29 | ip := response.ExternalIPAddress 30 | addr = net.IPv4(ip[0], ip[1], ip[2], ip[3]) 31 | return 32 | } 33 | 34 | func (n *natPMPClient) AddPortMapping(protocol string, externalPort, internalPort int, 35 | description string, timeout int) (mappedExternalPort int, err error) { 36 | if timeout <= 0 { 37 | err = fmt.Errorf("timeout must not be <= 0") 38 | return 39 | } 40 | // Note order of port arguments is switched between our AddPortMapping and the client's AddPortMapping. 41 | response, err := n.client.AddPortMapping(protocol, internalPort, externalPort, timeout) 42 | if err != nil { 43 | return 44 | } 45 | mappedExternalPort = int(response.MappedExternalPort) 46 | return 47 | } 48 | 49 | func (n *natPMPClient) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { 50 | // To destroy a mapping, send an add-port with 51 | // an internalPort of the internal port to destroy, an external port of zero and a time of zero. 52 | _, err = n.client.AddPortMapping(protocol, internalPort, 0, 0) 53 | return 54 | } 55 | -------------------------------------------------------------------------------- /peer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "io" 7 | "log" 8 | "net" 9 | "time" 10 | 11 | "github.com/rakoo/rakoshare/pkg/bitset" 12 | "github.com/zeebo/bencode" 13 | ) 14 | 15 | const MAX_OUR_REQUESTS = 2 16 | const MAX_PEER_REQUESTS = 10 17 | const STANDARD_BLOCK_LENGTH = 16 * 1024 18 | 19 | type peerMessage struct { 20 | peer *peerState 21 | message []byte // nil means an error occurred 22 | } 23 | 24 | type peerState struct { 25 | address string 26 | id string 27 | writeChan chan []byte 28 | writeChan2 chan []byte 29 | lastWriteTime time.Time 30 | lastReadTime time.Time 31 | have *bitset.Bitset // What the peer has told us it has 32 | conn net.Conn 33 | am_choking bool // this client is choking the peer 34 | am_interested bool // this client is interested in the peer 35 | peer_choking bool // peer is choking this client 36 | peer_interested bool // peer is interested in this client 37 | peer_requests map[uint64]bool 38 | our_requests map[uint64]time.Time // What we requested, when we requested it 39 | 40 | // This field tells if the peer can send a bitfield or not 41 | can_receive_bitfield bool 42 | 43 | // Stores the bitfield they sent us but we can't verify yet (because 44 | // we don't have the torrent yet) and will commit when we can 45 | temporaryBitfield []byte 46 | 47 | theirExtensions map[string]int 48 | } 49 | 50 | func queueingWriter(in, out chan []byte) { 51 | queue := make(map[int][]byte) 52 | head, tail := 0, 0 53 | L: 54 | for { 55 | if head == tail { 56 | select { 57 | case m, ok := <-in: 58 | if !ok { 59 | break L 60 | } 61 | queue[head] = m 62 | head++ 63 | } 64 | } else { 65 | select { 66 | case m, ok := <-in: 67 | if !ok { 68 | break L 69 | } 70 | queue[head] = m 71 | head++ 72 | case out <- queue[tail]: 73 | delete(queue, tail) 74 | tail++ 75 | } 76 | } 77 | } 78 | // We throw away any messages waiting to be sent, including the 79 | // nil message that is automatically sent when the in channel is closed 80 | close(out) 81 | } 82 | 83 | func NewPeerState(conn net.Conn) *peerState { 84 | writeChan := make(chan []byte) 85 | writeChan2 := make(chan []byte) 86 | go queueingWriter(writeChan, writeChan2) 87 | 88 | ps := &peerState{ 89 | writeChan: writeChan, 90 | writeChan2: writeChan2, 91 | conn: conn, 92 | am_choking: true, 93 | peer_choking: true, 94 | peer_requests: make(map[uint64]bool, MAX_PEER_REQUESTS), 95 | our_requests: make(map[uint64]time.Time, MAX_OUR_REQUESTS), 96 | can_receive_bitfield: true, 97 | } 98 | 99 | return ps 100 | } 101 | 102 | func (p *peerState) Close() { 103 | p.conn.Close() 104 | // No need to close p.writeChan. Further writes to p.conn will just fail. 105 | } 106 | 107 | func (p *peerState) AddRequest(index, begin, length uint32) { 108 | if !p.am_choking && len(p.peer_requests) < MAX_PEER_REQUESTS { 109 | offset := (uint64(index) << 32) | uint64(begin) 110 | p.peer_requests[offset] = true 111 | } 112 | } 113 | 114 | func (p *peerState) CancelRequest(index, begin, length uint32) { 115 | offset := (uint64(index) << 32) | uint64(begin) 116 | if _, ok := p.peer_requests[offset]; ok { 117 | delete(p.peer_requests, offset) 118 | } 119 | } 120 | 121 | func (p *peerState) RemoveRequest() (index, begin, length uint32, ok bool) { 122 | for k, _ := range p.peer_requests { 123 | index, begin = uint32(k>>32), uint32(k) 124 | length = STANDARD_BLOCK_LENGTH 125 | ok = true 126 | return 127 | } 128 | return 129 | } 130 | 131 | func (p *peerState) SetChoke(choke bool) { 132 | if choke != p.am_choking { 133 | p.am_choking = choke 134 | b := byte(UNCHOKE) 135 | if choke { 136 | b = CHOKE 137 | p.peer_requests = make(map[uint64]bool, MAX_PEER_REQUESTS) 138 | } 139 | p.sendOneCharMessage(b) 140 | } 141 | } 142 | 143 | func (p *peerState) SetInterested(interested bool) { 144 | if interested != p.am_interested { 145 | // log.Println("SetInterested", interested, p.address) 146 | p.am_interested = interested 147 | b := byte(NOT_INTERESTED) 148 | if interested { 149 | b = INTERESTED 150 | } 151 | p.sendOneCharMessage(b) 152 | } 153 | } 154 | 155 | func (p *peerState) SendBitfield(bs *bitset.Bitset) { 156 | msg := make([]byte, len(bs.Bytes())+1) 157 | msg[0] = BITFIELD 158 | copy(msg[1:], bs.Bytes()) 159 | p.sendMessage(msg) 160 | } 161 | 162 | func (p *peerState) SendExtensions(supportedExtensions map[int]string, 163 | metadataSize int64) { 164 | 165 | handshake := ExtensionHandshake{ 166 | M: make(map[string]int, len(supportedExtensions)), 167 | V: "Taipei-Torrent dev", 168 | MetadataSize: metadataSize, 169 | } 170 | 171 | for i, ext := range supportedExtensions { 172 | handshake.M[ext] = i 173 | } 174 | 175 | var buf bytes.Buffer 176 | err := bencode.NewEncoder(&buf).Encode(handshake) 177 | if err != nil { 178 | log.Println("Error when marshalling extension message") 179 | return 180 | } 181 | 182 | msg := make([]byte, 2+buf.Len()) 183 | msg[0] = EXTENSION 184 | msg[1] = EXTENSION_HANDSHAKE 185 | copy(msg[2:], buf.Bytes()) 186 | 187 | p.sendMessage(msg) 188 | } 189 | 190 | func (p *peerState) sendOneCharMessage(b byte) { 191 | // log.Println("ocm", b, p.address) 192 | p.sendMessage([]byte{b}) 193 | } 194 | 195 | func (p *peerState) sendMessage(b []byte) { 196 | p.writeChan <- b 197 | p.lastWriteTime = time.Now() 198 | } 199 | 200 | func (p *peerState) keepAlive(now time.Time) { 201 | if now.Sub(p.lastWriteTime) >= 2*time.Minute { 202 | // log.Stderr("Sending keep alive", p) 203 | p.sendMessage([]byte{}) 204 | } 205 | } 206 | 207 | // There's two goroutines per peer, one to read data from the peer, the other to 208 | // send data to the peer. 209 | 210 | // This func is designed to be run as a goroutine. It 211 | // listens for messages on a channel and sends them to a peer. 212 | 213 | func (p *peerState) peerWriter(errorChan chan peerMessage) { 214 | // log.Println("Writing messages") 215 | for msg := range p.writeChan2 { 216 | payload := make([]byte, 4+len(msg)) 217 | binary.BigEndian.PutUint32(payload[:4], uint32(len(msg))) 218 | copy(payload[4:], msg) 219 | 220 | _, err := p.conn.Write(payload) 221 | if err != nil { 222 | // log.Printf("Failed to write %d bytes to %s: %s\n", len(msg), p.address, err) 223 | break 224 | } 225 | } 226 | // log.Println("peerWriter exiting") 227 | errorChan <- peerMessage{p, nil} 228 | } 229 | 230 | // This func is designed to be run as a goroutine. It 231 | // listens for messages from the peer and forwards them to a channel. 232 | 233 | func (p *peerState) peerReader(msgChan chan peerMessage) { 234 | // log.Println("Reading messages") 235 | for { 236 | var size [4]byte 237 | _, err := io.ReadFull(p.conn, size[:]) 238 | if err != nil { 239 | if err != io.EOF { 240 | log.Println(err) 241 | } 242 | break 243 | } 244 | 245 | n := binary.BigEndian.Uint32(size[:]) 246 | if n > 130*1024 { 247 | // log.Println("Message size too large: ", n) 248 | break 249 | } 250 | 251 | buf := make([]byte, n) 252 | 253 | _, err = io.ReadFull(p.conn, buf) 254 | if err != nil { 255 | // log.Printf("Failed to read %d bytes from %s: %s\n", len(buf), p.address, err) 256 | break 257 | } 258 | msgChan <- peerMessage{p, buf} 259 | } 260 | 261 | msgChan <- peerMessage{p, nil} 262 | // log.Println("peerReader exiting") 263 | } 264 | 265 | func (p *peerState) sendMetadataRequest(piece int) { 266 | metaMessage := MetadataMessage{ 267 | MsgType: METADATA_REQUEST, 268 | Piece: piece, 269 | } 270 | 271 | p.sendExtensionMessage("ut_metadata", metaMessage) 272 | } 273 | 274 | func (p *peerState) sendExtensionMessage(typ string, data interface{}) { 275 | if _, ok := p.theirExtensions[typ]; !ok { 276 | // They don't understand this extension 277 | return 278 | } 279 | 280 | var payload bytes.Buffer 281 | err := bencode.NewEncoder(&payload).Encode(data) 282 | if err != nil { 283 | log.Printf("Couldn't marshal extension message: ", err) 284 | } 285 | 286 | msg := make([]byte, 2+payload.Len()) 287 | msg[0] = EXTENSION 288 | msg[1] = byte(p.theirExtensions[typ]) 289 | copy(msg[2:], payload.Bytes()) 290 | 291 | p.sendMessage(msg) 292 | } 293 | -------------------------------------------------------------------------------- /peers.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net" 5 | "strconv" 6 | "sync" 7 | ) 8 | 9 | type Peers struct { 10 | sync.Mutex 11 | peerList []*peerState 12 | } 13 | 14 | func newPeers() *Peers { 15 | return &Peers{peerList: make([]*peerState, 0)} 16 | } 17 | 18 | func (lp *Peers) Know(peer, id string) bool { 19 | lp.Lock() 20 | defer lp.Unlock() 21 | 22 | for _, p := range lp.peerList { 23 | if p.id != id { 24 | continue 25 | } 26 | 27 | phost, _, err := net.SplitHostPort(p.address) 28 | if err != nil { 29 | return false 30 | } 31 | candidateHost, _, err := net.SplitHostPort(peer) 32 | if err != nil { 33 | return false 34 | } 35 | 36 | if phost == candidateHost { 37 | return true 38 | } 39 | } 40 | 41 | return false 42 | } 43 | 44 | func (lp *Peers) All() []*peerState { 45 | lp.Lock() 46 | defer lp.Unlock() 47 | 48 | ret := make([]*peerState, len(lp.peerList)) 49 | for i, p := range lp.peerList { 50 | ret[i] = p 51 | } 52 | return ret 53 | } 54 | 55 | func (lp *Peers) Len() (l int) { 56 | lp.Lock() 57 | l = len(lp.peerList) 58 | lp.Unlock() 59 | return l 60 | } 61 | 62 | func getConnInfo(conn net.Conn) (remoteHost string, lower, upper int, ok bool) { 63 | _, localPort, err1 := net.SplitHostPort(conn.LocalAddr().String()) 64 | remoteHost, remotePort, err2 := net.SplitHostPort(conn.RemoteAddr().String()) 65 | 66 | if err1 != nil || err2 != nil { 67 | return 68 | } 69 | 70 | localPortInt, err3 := strconv.Atoi(localPort) 71 | remotePortInt, err4 := strconv.Atoi(remotePort) 72 | if err3 != nil || err4 != nil { 73 | return 74 | } 75 | 76 | if localPortInt < remotePortInt { 77 | return remoteHost, localPortInt, remotePortInt, true 78 | } 79 | return remoteHost, remotePortInt, localPortInt, true 80 | } 81 | 82 | // Add compares the new peer to be added, and adds it if it is the 83 | // winner at our duplicate elimination algorithm. In that case we remove 84 | // any other duplicate and return true. 85 | func (lp *Peers) Add(peer *peerState) (keep bool) { 86 | thisHost, thisLower, thisUpper, ok := getConnInfo(peer.conn) 87 | if !ok { 88 | return 89 | } 90 | 91 | lp.Lock() 92 | defer lp.Unlock() 93 | 94 | toDelete := make([]int, 0) 95 | 96 | for i, p := range lp.peerList { 97 | host, lower, upper, ok := getConnInfo(p.conn) 98 | if !ok { 99 | continue 100 | } 101 | if thisHost != host { 102 | continue 103 | } 104 | 105 | // We already have one that's better. Keep it, and don't add the new 106 | // one 107 | if lower < thisLower || (lower == thisLower && upper < thisUpper) { 108 | return false 109 | } 110 | 111 | // We already have one but it should be removed and the new one be 112 | // added. 113 | toDelete = append(toDelete, i) 114 | } 115 | 116 | // Remove old ones 117 | for _, delIdx := range toDelete { 118 | lp.peerList[delIdx].Close() 119 | lp.peerList[delIdx] = lp.peerList[len(lp.peerList)-1] 120 | lp.peerList = lp.peerList[:len(lp.peerList)] 121 | } 122 | 123 | lp.peerList = append(lp.peerList, peer) 124 | return true 125 | } 126 | 127 | func (lp *Peers) Delete(peer *peerState) { 128 | lp.Lock() 129 | defer lp.Unlock() 130 | 131 | for i, p := range lp.peerList { 132 | if p.address == peer.address { 133 | // We don't care about the order, just put the last one here 134 | lp.peerList[i] = lp.peerList[len(lp.peerList)-1] 135 | lp.peerList = lp.peerList[:len(lp.peerList)-1] 136 | return 137 | } 138 | } 139 | } 140 | 141 | func (lp *Peers) HasPeer(peer string) bool { 142 | lp.Lock() 143 | defer lp.Unlock() 144 | 145 | for _, p := range lp.peerList { 146 | if p.address == peer { 147 | return true 148 | } 149 | } 150 | return false 151 | } 152 | -------------------------------------------------------------------------------- /pex.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "io" 7 | "log" 8 | "time" 9 | 10 | bencode "github.com/jackpal/bencode-go" 11 | "github.com/nictuku/nettools" 12 | ) 13 | 14 | const ( 15 | PEER_LEN = 6 16 | MAX_PEERS = 50 17 | ) 18 | 19 | const ( 20 | SUPPORTS_ENCRYPTION byte = 1 << iota 21 | IS_SEED 22 | SUPPORTS_UTP 23 | SUPPORTS_HOLE_PUNCHING 24 | ) 25 | 26 | type pexPeer struct { 27 | address string 28 | id string 29 | } 30 | 31 | var ( 32 | lastPeers []pexPeer 33 | ) 34 | 35 | type Flag struct { 36 | SupportsEncryption bool 37 | IsSeed bool 38 | SupportsHolePunching bool 39 | 40 | // This one is a positive flag: if true, it's true. If false, it may 41 | // still be true 42 | SupportsUTP bool 43 | } 44 | 45 | // As specified in libtorrent source: see 46 | // http://sourceforge.net/p/libtorrent/code/HEAD/tree/trunk/src/ut_pex.cpp#l554 47 | func NewFlag(b byte) (f *Flag) { 48 | f = &Flag{ 49 | SupportsEncryption: match(b, SUPPORTS_ENCRYPTION), 50 | IsSeed: match(b, IS_SEED), 51 | SupportsUTP: match(b, SUPPORTS_UTP), 52 | SupportsHolePunching: match(b, SUPPORTS_HOLE_PUNCHING), 53 | } 54 | return 55 | } 56 | 57 | func match(b byte, feature byte) bool { 58 | return b&feature == feature 59 | } 60 | 61 | type PexMessage struct { 62 | Added string "added" 63 | AddedF string "added.f" 64 | Dropped string "dropped" 65 | } 66 | 67 | // The main loop. 68 | // Every minute, we calculate the new pex message and send it to 69 | // everyone. After it's sent, this new pex message becomes the last 70 | // pex message. 71 | // 72 | // A Pex Message has three fields: "added", "addedf" and "dropped". 73 | // 74 | // "added" represents the set of peers we are currently connected to 75 | // that we weren't connected to last time. 76 | // 77 | // "addedf" contains the same peers but instead of their address, it's 78 | // their flags (See the Flag structure for more details) 79 | // 80 | // "dropped" is the set of peers we were connected to last time that 81 | // we aren't currently connected to. 82 | func (t *TorrentSession) StartPex() { 83 | for _ = range time.Tick(1 * time.Minute) { 84 | newLastPeers := make([]pexPeer, 0) 85 | 86 | numadded := 0 87 | added := "" 88 | addedf := "" 89 | 90 | // TODO randomize to distribute more evenly 91 | for _, peer := range t.peers.All() { 92 | newLastPeers = append(newLastPeers, pexPeer{peer.address, peer.id}) 93 | 94 | if contains(lastPeers, peer) { 95 | continue 96 | } 97 | added += nettools.DottedPortToBinary(peer.address) 98 | 99 | // We don't manage those yet 100 | addedf += string(0x00) 101 | 102 | numadded += 1 103 | if numadded >= MAX_PEERS { 104 | break 105 | } 106 | } 107 | 108 | dropped := "" 109 | for _, lastPeer := range lastPeers { 110 | if !t.peers.Know(lastPeer.address, lastPeer.id) { 111 | dropped += nettools.DottedPortToBinary(lastPeer.address) 112 | } 113 | } 114 | 115 | for _, p := range t.peers.All() { 116 | p.sendExtensionMessage("ut_pex", PexMessage{ 117 | Added: added, 118 | AddedF: addedf, 119 | Dropped: dropped, 120 | }) 121 | } 122 | 123 | lastPeers = newLastPeers 124 | } 125 | } 126 | 127 | func (t *TorrentSession) DoPex(msg []byte, p *peerState) { 128 | var message PexMessage 129 | err := bencode.Unmarshal(bytes.NewReader(msg), &message) 130 | if err != nil { 131 | log.Println("Error when parsing pex: ", err) 132 | return 133 | } 134 | 135 | for _, peer := range stringToPeers(message.Added) { 136 | t.hintNewPeer(peer) 137 | } 138 | 139 | // We don't use those yet, but this is possibly how we would 140 | //for _, flag := range stringToFlags(message.AddedF) { 141 | //log.Printf("Got flags: %#v", flag) 142 | //} 143 | } 144 | 145 | func stringToPeers(in string) (peers []string) { 146 | peers = make([]string, 0) 147 | for i := 0; i < len(in); i += PEER_LEN { 148 | peer := nettools.BinaryToDottedPort(in[i : i+PEER_LEN]) 149 | peers = append(peers, peer) 150 | } 151 | 152 | return 153 | } 154 | 155 | func stringToFlags(in string) (flags []*Flag) { 156 | flags = make([]*Flag, 0) 157 | rd := bytes.NewReader([]byte(in)) 158 | for { 159 | var flag uint8 160 | err := binary.Read(rd, binary.BigEndian, &flag) 161 | if err != nil { 162 | if err == io.EOF { 163 | break 164 | } else { 165 | log.Println("Couldn't decode flag into uint8: ", err) 166 | } 167 | } 168 | flags = append(flags, NewFlag(flag)) 169 | } 170 | 171 | return 172 | } 173 | 174 | func contains(all []pexPeer, peer *peerState) bool { 175 | for _, maybe := range all { 176 | if peer.id == maybe.id && peer.address == maybe.address { 177 | return true 178 | } 179 | } 180 | 181 | return false 182 | } 183 | -------------------------------------------------------------------------------- /pieces.go: -------------------------------------------------------------------------------- 1 | // Compute missing pieces for a torrent. 2 | package main 3 | 4 | import ( 5 | "crypto/sha1" 6 | "errors" 7 | "fmt" 8 | "runtime" 9 | 10 | "github.com/rakoo/rakoshare/pkg/bitset" 11 | ) 12 | 13 | func checkPieces(fs FileStore, totalLength int64, m *MetaInfo) (good, bad int, goodBits *bitset.Bitset, err error) { 14 | pieceLength := m.Info.PieceLength 15 | numPieces := int((totalLength + pieceLength - 1) / pieceLength) 16 | goodBits = bitset.New(int(numPieces)) 17 | ref := m.Info.Pieces 18 | if len(ref) != numPieces*sha1.Size { 19 | err = errors.New(fmt.Sprintf("Incorrect Info.Pieces length: expected %d, got %d", len(ref), numPieces*sha1.Size)) 20 | return 21 | } 22 | currentSums, err := computeSums(fs, totalLength, m.Info.PieceLength) 23 | if err != nil { 24 | return 25 | } 26 | for i := 0; i < numPieces; i++ { 27 | base := i * sha1.Size 28 | end := base + sha1.Size 29 | if checkEqual([]byte(ref[base:end]), currentSums[base:end]) { 30 | good++ 31 | goodBits.Set(int(i)) 32 | } else { 33 | fs.SetBad(int64(i) * pieceLength) 34 | bad++ 35 | } 36 | } 37 | return 38 | } 39 | 40 | func checkEqual(ref, current []byte) bool { 41 | for i := 0; i < len(current); i++ { 42 | if ref[i] != current[i] { 43 | return false 44 | } 45 | } 46 | return true 47 | } 48 | 49 | type chunk struct { 50 | i int64 51 | data []byte 52 | } 53 | 54 | // computeSums reads the file content and computes the SHA1 hash for each 55 | // piece. Spawns parallel goroutines to compute the hashes, since each 56 | // computation takes ~30ms. 57 | func computeSums(fs FileStore, totalLength int64, pieceLength int64) (sums []byte, err error) { 58 | // Calculate the SHA1 hash for each piece in parallel goroutines. 59 | hashes := make(chan chunk) 60 | results := make(chan chunk, 3) 61 | for i := 0; i < runtime.GOMAXPROCS(0); i++ { 62 | go hashPiece(hashes, results) 63 | } 64 | 65 | // Read file content and send to "pieces", keeping order. 66 | numPieces := (totalLength + pieceLength - 1) / pieceLength 67 | go func() { 68 | for i := int64(0); i < numPieces; i++ { 69 | piece := make([]byte, pieceLength, pieceLength) 70 | if i == numPieces-1 { 71 | piece = piece[0 : totalLength-i*pieceLength] 72 | } 73 | // Ignore errors. 74 | fs.ReadAt(piece, i*pieceLength) 75 | hashes <- chunk{i: i, data: piece} 76 | } 77 | close(hashes) 78 | }() 79 | 80 | // Merge back the results. 81 | sums = make([]byte, sha1.Size*numPieces) 82 | for i := int64(0); i < numPieces; i++ { 83 | h := <-results 84 | copy(sums[h.i*sha1.Size:], h.data) 85 | } 86 | return 87 | } 88 | 89 | func hashPiece(h chan chunk, result chan chunk) { 90 | hasher := sha1.New() 91 | for piece := range h { 92 | hasher.Reset() 93 | _, err := hasher.Write(piece.data) 94 | if err != nil { 95 | result <- chunk{piece.i, nil} 96 | } else { 97 | result <- chunk{piece.i, hasher.Sum(nil)} 98 | } 99 | } 100 | } 101 | 102 | func checkPiece(fs FileStore, totalLength int64, m *MetaInfo, pieceIndex int) (good bool, err error) { 103 | ref := m.Info.Pieces 104 | currentSum, err := computePieceSum(fs, totalLength, m.Info.PieceLength, pieceIndex) 105 | if err != nil { 106 | return 107 | } 108 | base := pieceIndex * sha1.Size 109 | end := base + sha1.Size 110 | refSha1 := []byte(ref[base:end]) 111 | good = checkEqual(refSha1, currentSum) 112 | if !good { 113 | err = fmt.Errorf("reference sha1: %v != piece sha1: %v", refSha1, currentSum) 114 | } 115 | return 116 | } 117 | 118 | func computePieceSum(fs FileStore, totalLength int64, pieceLength int64, pieceIndex int) (sum []byte, err error) { 119 | numPieces := (totalLength + pieceLength - 1) / pieceLength 120 | hasher := sha1.New() 121 | piece := make([]byte, pieceLength) 122 | if int64(pieceIndex) == numPieces-1 { 123 | piece = piece[0 : totalLength-int64(pieceIndex)*pieceLength] 124 | } 125 | _, err = fs.ReadAt(piece, int64(pieceIndex)*pieceLength) 126 | if err != nil { 127 | return 128 | } 129 | _, err = hasher.Write(piece) 130 | if err != nil { 131 | return 132 | } 133 | sum = hasher.Sum(nil) 134 | return 135 | } 136 | -------------------------------------------------------------------------------- /pieces_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha1" 5 | "fmt" 6 | "testing" 7 | ) 8 | 9 | func TestComputeSums(t *testing.T) { 10 | pieceLen := int64(25) 11 | for _, testFile := range tests { 12 | fs, err := mkFileStore(testFile) 13 | if err != nil { 14 | t.Fatal(err) 15 | } 16 | sums, err := computeSums(fs, testFile.fileLen, pieceLen) 17 | if err != nil { 18 | t.Fatal(err) 19 | } 20 | if len(sums) < sha1.Size { 21 | t.Errorf("computeSums got len %d, wanted %d", len(sums), sha1.Size) 22 | } 23 | a := fmt.Sprintf("%X", sums[:sha1.Size]) 24 | b := fmt.Sprintf("%X", sums[sha1.Size:sha1.Size*2]) 25 | if a != testFile.hashPieceA { 26 | t.Errorf("Piece A Wanted %v, got %v\n", testFile.hashPieceA, a) 27 | } 28 | if b != testFile.hashPieceB { 29 | t.Errorf("Piece B Wanted %v, got %v\n", testFile.hashPieceB, b) 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pkg/bitset/bitset.go: -------------------------------------------------------------------------------- 1 | package bitset 2 | 3 | // As defined by the bittorrent protocol, this bitset is big-endian, such that 4 | // the high bit of the first byte is block 0 5 | 6 | type Bitset struct { 7 | b []byte 8 | n int 9 | endIndex int 10 | endMask byte // Which bits of the last byte are valid 11 | } 12 | 13 | func New(n int) *Bitset { 14 | endIndex, endOffset := n>>3, n&7 15 | endMask := ^byte(255 >> byte(endOffset)) 16 | if endOffset == 0 { 17 | endIndex = -1 18 | } 19 | return &Bitset{make([]byte, (n+7)>>3), n, endIndex, endMask} 20 | } 21 | 22 | // Creates a new bitset from a given byte stream. Returns nil if the 23 | // data is invalid in some way. 24 | func NewFromBytes(n int, data []byte) *Bitset { 25 | bitset := New(n) 26 | if len(bitset.b) != len(data) { 27 | return nil 28 | } 29 | copy(bitset.b, data) 30 | if bitset.endIndex >= 0 && bitset.b[bitset.endIndex]&(^bitset.endMask) != 0 { 31 | return nil 32 | } 33 | return bitset 34 | } 35 | 36 | func (b *Bitset) Set(index int) { 37 | if index < 0 || index >= b.n { 38 | panic("Index out of range.") 39 | } 40 | b.b[index>>3] |= byte(128 >> byte(index&7)) 41 | } 42 | 43 | func (b *Bitset) Clear(index int) { 44 | if index < 0 || index >= b.n { 45 | panic("Index out of range.") 46 | } 47 | b.b[index>>3] &= ^byte(128 >> byte(index&7)) 48 | } 49 | 50 | func (b *Bitset) IsSet(index int) bool { 51 | if index < 0 || index >= b.n { 52 | panic("Index out of range.") 53 | } 54 | return (b.b[index>>3] & byte(128>>byte(index&7))) != 0 55 | } 56 | 57 | func (b *Bitset) AndNot(b2 *Bitset) { 58 | if b.n != b2.n { 59 | panic("Unequal bitset sizes") 60 | } 61 | for i := 0; i < len(b.b); i++ { 62 | b.b[i] = b.b[i] & ^b2.b[i] 63 | } 64 | b.clearEnd() 65 | } 66 | 67 | func (b *Bitset) clearEnd() { 68 | if b.endIndex >= 0 { 69 | b.b[b.endIndex] &= b.endMask 70 | } 71 | } 72 | 73 | func (b *Bitset) IsEndValid() bool { 74 | if b.endIndex >= 0 { 75 | return (b.b[b.endIndex] & b.endMask) == 0 76 | } 77 | return true 78 | } 79 | 80 | // TODO: Make this fast 81 | func (b *Bitset) FindNextSet(index int) int { 82 | for i := index; i < b.n; i++ { 83 | if (b.b[i>>3] & byte(128>>byte(i&7))) != 0 { 84 | return i 85 | } 86 | } 87 | return -1 88 | } 89 | 90 | // TODO: Make this fast 91 | func (b *Bitset) FindNextClear(index int) int { 92 | for i := index; i < b.n; i++ { 93 | if (b.b[i>>3] & byte(128>>byte(i&7))) == 0 { 94 | return i 95 | } 96 | } 97 | return -1 98 | } 99 | 100 | func (b *Bitset) Bytes() []byte { 101 | return b.b 102 | } 103 | 104 | func (b *Bitset) IsWithinLimits(n int) bool { 105 | return n < b.n 106 | } 107 | -------------------------------------------------------------------------------- /pkg/id/id.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import ( 4 | "bytes" 5 | "crypto/rand" 6 | "crypto/sha1" 7 | "errors" 8 | "io" 9 | 10 | "code.google.com/p/go.crypto/scrypt" 11 | 12 | ed "github.com/agl/ed25519" 13 | "github.com/crowsonkb/base58" 14 | ) 15 | 16 | type Role byte 17 | 18 | var ( 19 | ROLE_WRITEREADSTORE Role = 0x1 20 | ROLE_READSTORE Role = 0x2 21 | ROLE_STORE Role = 0x4 22 | ) 23 | 24 | var ( 25 | errInvalidId = errors.New("Invalid id") 26 | errInvalidInfoHash = errors.New("Programming error: Invalid infohash generated") 27 | ) 28 | 29 | type PubKey [ed.PublicKeySize]byte 30 | type PrivKey [ed.PrivateKeySize]byte 31 | type PreSharedKey [32]byte 32 | 33 | // len = 20 34 | type infohash []byte 35 | 36 | func deriveFromSeed(seed [32]byte) (priv PrivKey, pub PubKey, psk PreSharedKey, ih infohash, err error) { 37 | // priv/pub key 38 | tmpPub, tmpPriv, err := ed.GenerateKey(bytes.NewReader(seed[:32])) 39 | if err != nil { 40 | return 41 | } 42 | priv = *tmpPriv 43 | pub = *tmpPub 44 | psk, err = deriveScrypt(pub) 45 | 46 | if err != nil { 47 | return 48 | } 49 | 50 | outscrypt, err := deriveScrypt(psk) 51 | if err != nil { 52 | return 53 | } 54 | ih = outscrypt[:sha1.Size] 55 | 56 | return 57 | } 58 | 59 | func deriveScrypt(in [32]byte) (psk [32]byte, err error) { 60 | out, err := scrypt.Key(in[:], in[:], 1<<16, 8, 1, 32) 61 | if err != nil { 62 | return 63 | } 64 | copy(psk[:], out) 65 | 66 | return 67 | } 68 | 69 | // The ID structure represents all the information a peer needs to know 70 | // to be able to participate in a swarm sharing a given directory. 71 | // 72 | // IDs start from the highest privileged type: a peer handling such an 73 | // ID has the ability to write content to the folder, along with reading 74 | // and storing its content. Its human-intelligible value can be 75 | // retrieved from WriteReadStoreID 76 | // 77 | // When derived once, the new ID now has lost the ability to write, but 78 | // can still read and store content. Its human-intelligible value can be 79 | // retrieved from ReadStoreID. 80 | // 81 | // When derived a second time, the new ID can only store content; it 82 | // cannot read it (because the actual content is encrypted). The 83 | // corresponding value is StoreID. 84 | // 85 | // Finally, when derived a third time, we get a infohash like the 86 | // Bittorrent ones, that is used to find peers. 87 | type Id struct { 88 | Priv PrivKey 89 | canWrite bool 90 | 91 | Pub PubKey 92 | canRead bool 93 | 94 | Psk PreSharedKey 95 | 96 | // This is automatically generated from Psk. It must be 20 bytes long 97 | Infohash []byte 98 | } 99 | 100 | func New() (Id, error) { 101 | 102 | var randSeed [32]byte 103 | _, err := io.ReadFull(rand.Reader, randSeed[:]) 104 | if err != nil { 105 | return Id{}, err 106 | } 107 | 108 | priv, pub, psk, ih, err := deriveFromSeed(randSeed) 109 | 110 | id := Id{ 111 | Priv: priv, 112 | canWrite: true, 113 | 114 | Pub: pub, 115 | canRead: true, 116 | 117 | Psk: psk, 118 | 119 | Infohash: ih, 120 | } 121 | 122 | return id, err 123 | } 124 | 125 | func (id Id) CanWrite() bool { 126 | return id.canWrite 127 | } 128 | 129 | func (id Id) CanRead() bool { 130 | return id.canRead 131 | } 132 | 133 | func (id Id) WRS() string { 134 | if !id.CanWrite() { 135 | return "" 136 | } 137 | wrs := make([]byte, len(id.Priv)+1) 138 | wrs[0] = byte(ROLE_WRITEREADSTORE) 139 | copy(wrs[1:], id.Priv[:]) 140 | return base58.Encode(wrs) 141 | } 142 | 143 | func (id Id) RS() string { 144 | if !id.CanRead() { 145 | return "" 146 | } 147 | rs := make([]byte, len(id.Pub)+1) 148 | rs[0] = byte(ROLE_READSTORE) 149 | copy(rs[1:], id.Pub[:]) 150 | return base58.Encode(rs) 151 | } 152 | 153 | func (id Id) S() string { 154 | s := make([]byte, len(id.Psk)+1) 155 | s[0] = byte(ROLE_STORE) 156 | copy(s[1:], id.Psk[:]) 157 | return base58.Encode(s) 158 | } 159 | 160 | func NewFromString(in string) (id Id, err error) { 161 | if len(in) <= 1 { 162 | err = errInvalidId 163 | return 164 | } 165 | decoded, err := base58.Decode(in) 166 | if err != nil { 167 | return 168 | } 169 | 170 | role := Role(decoded[0]) 171 | switch role { 172 | case ROLE_WRITEREADSTORE: 173 | if len(decoded[1:]) != ed.PrivateKeySize { 174 | err = errInvalidId 175 | break 176 | } 177 | 178 | priv := decoded[1:] 179 | var privId PrivKey 180 | copy(privId[:], priv) 181 | 182 | pub := priv[32:] 183 | var pubId PubKey 184 | copy(pubId[:], pub) 185 | 186 | psk, err := deriveScrypt(pubId) 187 | if err != nil { 188 | break 189 | } 190 | 191 | outscrypt, err := deriveScrypt(psk) 192 | if err != nil { 193 | break 194 | } 195 | 196 | id = Id{ 197 | Priv: privId, 198 | canWrite: true, 199 | 200 | Pub: pubId, 201 | canRead: true, 202 | 203 | Psk: psk, 204 | 205 | Infohash: outscrypt[:sha1.Size], 206 | } 207 | 208 | case ROLE_READSTORE: 209 | if len(decoded[1:]) != ed.PublicKeySize { 210 | err = errInvalidId 211 | break 212 | } 213 | 214 | pub := decoded[1:] 215 | var pubId PubKey 216 | copy(pubId[:], pub) 217 | 218 | psk, err := deriveScrypt(pubId) 219 | if err != nil { 220 | break 221 | } 222 | 223 | outscrypt, err := deriveScrypt(psk) 224 | if err != nil { 225 | break 226 | } 227 | 228 | id = Id{ 229 | Pub: pubId, 230 | canRead: true, 231 | 232 | Psk: psk, 233 | 234 | Infohash: outscrypt[:sha1.Size], 235 | } 236 | case ROLE_STORE: 237 | if len(decoded[1:]) != 32 { 238 | err = errInvalidId 239 | break 240 | } 241 | 242 | var psk PreSharedKey 243 | copy(psk[:], decoded[1:]) 244 | 245 | outscrypt, err := deriveScrypt(psk) 246 | if err != nil { 247 | break 248 | } 249 | 250 | id = Id{ 251 | Psk: psk, 252 | 253 | Infohash: outscrypt[:sha1.Size], 254 | } 255 | default: 256 | err = errInvalidId 257 | } 258 | 259 | return 260 | } 261 | -------------------------------------------------------------------------------- /pkg/id/id_test.go: -------------------------------------------------------------------------------- 1 | package id 2 | 3 | import ( 4 | "encoding/hex" 5 | "testing" 6 | ) 7 | 8 | type vec struct { 9 | WRS string 10 | RS string 11 | S string 12 | } 13 | 14 | var ( 15 | vector1 = vec{ 16 | WRS: "B76BogapG9DGadhsuUd1ikteJKo7iAnMKNjHtAf8KA626SnWuuiVCQaUftG5cGRWUCGFGegTnmRRourTX6WwHor8", 17 | RS: "j4vDxToJfvocsC1BL6pjoYDP8DjugsUWHVyQ6kFHQmh2", 18 | S: "2KqymCfosgEQi3Q6zYLSoqsJSz4mYnxFXNAbMMtPqjSrb", 19 | } 20 | ) 21 | 22 | func TestNew(t *testing.T) { 23 | id, err := New() 24 | if err != nil { 25 | t.Fatal("There should be no error for creating an id!") 26 | } 27 | 28 | if !id.CanWrite() { 29 | t.Fatal("should be able to write") 30 | } 31 | 32 | if !id.CanRead() { 33 | t.Fatal("should be able to read") 34 | } 35 | } 36 | 37 | func TestNewFromStringWRS(t *testing.T) { 38 | _, err := NewFromString("GARBAGE") 39 | if err == nil { 40 | t.Fatal("Garbage shouldn't be accepted!") 41 | } 42 | 43 | id, err := NewFromString(vector1.WRS) 44 | if err != nil { 45 | t.Fatal("Couldn't create id: ", err) 46 | } 47 | 48 | if !id.CanWrite() { 49 | t.Fatal("id should be able to write") 50 | } 51 | 52 | hexpriv := hex.EncodeToString(id.Priv[:]) 53 | expectedhexpriv := "f96bd82f536527c87d7fde59106f9a8573ea6ec183f67d6509bd666aa2f0da8e710bc5f0cd2538d35e093b9041e083d0072f9ba80e5a7f59e08c029afc788cd9" 54 | if hexpriv != expectedhexpriv { 55 | t.Fatalf("Invalid priv: expected %s, got %s", expectedhexpriv, hexpriv) 56 | } 57 | 58 | if !id.CanRead() { 59 | t.Fatal("id should be able to read") 60 | } 61 | 62 | hexpub := hex.EncodeToString(id.Pub[:]) 63 | expectedhexpub := "710bc5f0cd2538d35e093b9041e083d0072f9ba80e5a7f59e08c029afc788cd9" 64 | if hexpub != expectedhexpub { 65 | t.Fatalf("Invalid pub: expected %s, got %s", expectedhexpub, hexpub) 66 | } 67 | 68 | hexpsk := hex.EncodeToString(id.Psk[:]) 69 | expectedhexpsk := "75c4442eb37c07ad5df9f131a62046d71d89c3e7210f1e8916b93e2deba611d0" 70 | if hexpsk != expectedhexpsk { 71 | t.Fatalf("Invalid psk: expected %s, got %s", expectedhexpsk, hexpsk) 72 | } 73 | 74 | hexih := hex.EncodeToString(id.Infohash) 75 | expectedih := "ddb859685f8ea3f7f8e81c9980f4447a0b90d246" 76 | if hexih != expectedih { 77 | t.Fatalf("Invalid infohash: expected %s, got %s", expectedih, hexih) 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /pkg/sharesession/sharesession.go: -------------------------------------------------------------------------------- 1 | package sharesession 2 | 3 | import ( 4 | "database/sql" 5 | "log" 6 | "time" 7 | 8 | "github.com/rakoo/rakoshare/pkg/id" 9 | 10 | _ "github.com/mattn/go-sqlite3" 11 | ) 12 | 13 | var ( 14 | Q_SELECT_TORRENT = `SELECT torrent FROM meta` 15 | Q_SELECT_INFOHASH = `SELECT infohash FROM meta` 16 | Q_SELECT_LASTMODTIME = `SELECT lastmodtime FROM meta` 17 | Q_SELECT_IHMESSAGE = `SELECT ihmessage FROM meta` 18 | Q_SELECT_FOLDER = `SELECT folder FROM meta` 19 | Q_INSERT_TORRENT = `UPDATE meta SET torrent=?, infohash=?, lastmodtime=?` 20 | Q_INSERT_IHMESSAGE = `UPDATE meta SET ihmessage=?` 21 | ) 22 | 23 | var ( 24 | CREATESTRINGS = []string{ 25 | `CREATE TABLE meta( 26 | torrent string, 27 | infohash string, 28 | lastmodtime string, 29 | ihmessage string, 30 | folder string, 31 | wrs string, 32 | rs string, 33 | s string 34 | )`, 35 | 36 | `CREATE TABLE peers( 37 | hostport string primary key, 38 | valid_until string 39 | )`, 40 | //`INSERT INTO meta VALUES ("", "", "", "", "")`, 41 | } 42 | ) 43 | 44 | type Session struct { 45 | db *sql.DB 46 | } 47 | 48 | func New(path string) (*Session, error) { 49 | db, err := sql.Open("sqlite3", path) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | err = db.Ping() 55 | if err != nil { 56 | return nil, err 57 | } 58 | 59 | var ph string 60 | err = db.QueryRow("SELECT name FROM sqlite_master WHERE type='table' AND name='meta'").Scan(&ph) 61 | if err == sql.ErrNoRows { 62 | for _, str := range CREATESTRINGS { 63 | _, err := db.Exec(str) 64 | if err != nil { 65 | return nil, err 66 | } 67 | } 68 | } 69 | 70 | session := &Session{db} 71 | go session.watchPeers() 72 | return session, nil 73 | } 74 | 75 | func (s *Session) GetCurrentInfohash() string { 76 | return s.getFromMeta(Q_SELECT_INFOHASH) 77 | } 78 | 79 | func (s *Session) GetCurrentTorrent() string { 80 | return s.getFromMeta(Q_SELECT_TORRENT) 81 | } 82 | 83 | func (s *Session) GetLastModTime() time.Time { 84 | t := s.getFromMeta(Q_SELECT_LASTMODTIME) 85 | parsed, err := time.Parse(time.RFC3339, t) 86 | if err != nil { 87 | // Return the beginning of time, so that all files are after this 88 | // date 89 | return time.Unix(0, 0) 90 | } 91 | return parsed 92 | } 93 | 94 | func (s *Session) GetCurrentIHMessage() string { 95 | return s.getFromMeta(Q_SELECT_IHMESSAGE) 96 | } 97 | 98 | func (s *Session) GetTarget() string { 99 | return s.getFromMeta(Q_SELECT_FOLDER) 100 | } 101 | 102 | func (sess *Session) GetShareId() id.Id { 103 | var wrs, rs, s string 104 | err := sess.db.QueryRow(`SELECT wrs, rs, s FROM meta`).Scan(&wrs, &rs, &s) 105 | if err != nil { 106 | return id.Id{} 107 | } 108 | var ret id.Id 109 | switch { 110 | case wrs != "": 111 | ret, _ = id.NewFromString(wrs) 112 | case rs != "": 113 | ret, _ = id.NewFromString(rs) 114 | case s != "": 115 | ret, _ = id.NewFromString(s) 116 | } 117 | 118 | return ret 119 | } 120 | 121 | func (s *Session) getFromMeta(query string) string { 122 | var value string 123 | err := s.db.QueryRow(query).Scan(&value) 124 | if err != nil { 125 | //log.Printf("Can't get from sql: %s", err) 126 | return "" 127 | } 128 | 129 | return value 130 | } 131 | 132 | func (s *Session) SaveTorrent(torrent []byte, infohash, lastModTime string) error { 133 | _, err := s.db.Exec(Q_INSERT_TORRENT, torrent, infohash, lastModTime) 134 | return err 135 | } 136 | 137 | func (s *Session) SaveIHMessage(mess []byte) error { 138 | _, err := s.db.Exec(Q_INSERT_IHMESSAGE, mess) 139 | return err 140 | } 141 | 142 | func (s *Session) SaveSession(target string, theid id.Id) error { 143 | _, err := s.db.Exec(`INSERT INTO meta (folder, wrs, rs, s) VALUES (?, ?, ?, ?)`, 144 | target, theid.WRS(), theid.RS(), theid.S()) 145 | return err 146 | } 147 | 148 | func (s *Session) SavePeer(peer string, shouldKeep func(peer string) bool) error { 149 | validUntil := time.Now().Add(24 * time.Hour) 150 | _, err := s.db.Exec(`INSERT OR REPLACE INTO peers VALUES (?, ?)`, peer, validUntil.Format(time.RFC3339)) 151 | if err != nil { 152 | return err 153 | } 154 | 155 | time.AfterFunc(validUntil.Sub(time.Now()), func() { 156 | if !shouldKeep(peer) { 157 | s.deletePeerOrLog(peer, validUntil.Format(time.RFC3339)) 158 | } 159 | }) 160 | return nil 161 | } 162 | 163 | func (s *Session) GetPeers() (peers []string) { 164 | peersWithValidity, err := s.getPeersWithValidity() 165 | if err != nil { 166 | return 167 | } 168 | 169 | peers = make([]string, len(peersWithValidity)) 170 | for i, pwv := range peersWithValidity { 171 | peers[i] = pwv.hostport 172 | } 173 | return peers 174 | } 175 | 176 | type peer struct { 177 | hostport string 178 | validUntil string 179 | } 180 | 181 | func (s *Session) getPeersWithValidity() (peers []peer, err error) { 182 | rows, err := s.db.Query(`SELECT * FROM peers`) 183 | if err != nil { 184 | return nil, err 185 | } 186 | defer rows.Close() 187 | 188 | peers = make([]peer, 0) 189 | for rows.Next() { 190 | var hostport string 191 | var validUntil string 192 | 193 | if errs := rows.Scan(&hostport, &validUntil); errs != nil { 194 | err = errs 195 | break 196 | } 197 | 198 | peers = append(peers, peer{hostport, validUntil}) 199 | } 200 | 201 | if err := rows.Err(); err != nil { 202 | return nil, err 203 | } 204 | return peers, err 205 | } 206 | 207 | func (s *Session) watchPeers() { 208 | peers, err := s.getPeersWithValidity() 209 | if err != nil { 210 | log.Fatal(err) 211 | } 212 | 213 | for _, p := range peers { 214 | if eol, err := time.Parse(time.RFC3339, p.validUntil); err != nil { 215 | s.deletePeerOrLog(p.hostport, p.validUntil) 216 | } else { 217 | time.AfterFunc(eol.Sub(time.Now()), func() { s.deletePeerOrLog(p.hostport, p.validUntil) }) 218 | } 219 | } 220 | } 221 | 222 | func (s *Session) deletePeerOrLog(hostport string, validUntil string) { 223 | _, errDel := s.db.Exec(`DELETE FROM peers WHERE hostport = ? AND valid_until = ?`, hostport, validUntil) 224 | if errDel != nil { 225 | log.Printf("Couldn't delete (%s, %s): %s\n", hostport, validUntil, errDel) 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /proxy.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "net" 6 | "net/http" 7 | 8 | "github.com/hailiang/gosocks" 9 | ) 10 | 11 | func init() { 12 | flag.StringVar(&proxyAddress, "proxyAddress", "", "Address of a SOCKS5 proxy to use.") 13 | } 14 | 15 | var proxyAddress string 16 | 17 | func useProxy() bool { 18 | return len(proxyAddress) > 0 19 | } 20 | 21 | func proxyHttpGet(url string) (r *http.Response, e error) { 22 | return proxyHttpClient().Get(url) 23 | } 24 | 25 | func proxyNetDial(netType, addr string) (net.Conn, error) { 26 | if useProxy() { 27 | return socks.DialSocksProxy(socks.SOCKS5, proxyAddress)(netType, addr) 28 | } 29 | return net.Dial(netType, addr) 30 | } 31 | 32 | func proxyHttpClient() (client *http.Client) { 33 | if useProxy() { 34 | dialSocksProxy := socks.DialSocksProxy(socks.SOCKS5, proxyAddress) 35 | tr := &http.Transport{Dial: dialSocksProxy} 36 | client = &http.Client{Transport: tr} 37 | } else { 38 | client = &http.Client{} 39 | } 40 | return 41 | } 42 | -------------------------------------------------------------------------------- /test.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Test the torrent 3 | set -e 4 | 5 | go clean 6 | go build -race 7 | 8 | # ./Taipei-Torrent -fileDir=testData/downloads -port=63881 -useUPnP=true testData/a.torrent 9 | # ./Taipei-Torrent -fileDir=testData/downloads -port=63881 -useNATPMP -gateway 192.168.1.1 testData/a.torrent 10 | ./Taipei-Torrent -fileDir=testData/downloads -port=63881 testData/a.torrent 11 | -------------------------------------------------------------------------------- /testData/1Mrandom: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rakoo/rakoshare/66b3d1be2b768648f27e075028c6a23a2284371c/testData/1Mrandom -------------------------------------------------------------------------------- /testData/1Mrandom.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rakoo/rakoshare/66b3d1be2b768648f27e075028c6a23a2284371c/testData/1Mrandom.torrent -------------------------------------------------------------------------------- /testData/a.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rakoo/rakoshare/66b3d1be2b768648f27e075028c6a23a2284371c/testData/a.torrent -------------------------------------------------------------------------------- /testData/dsl-1M.torrent: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rakoo/rakoshare/66b3d1be2b768648f27e075028c6a23a2284371c/testData/dsl-1M.torrent -------------------------------------------------------------------------------- /testData/testFile: -------------------------------------------------------------------------------- 1 | When in the Course of human events it becomes necessary for one people to dissolve the political bands which have connected them with another and to assume among the powers of the earth, the separate and equal station to which the Laws of Nature and of Nature's God entitle them, a decent respect to the opinions of mankind requires that they should declare the causes which impel them to the separation. 2 | 3 | We hold these truths to be self-evident, that all men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty and the pursuit of Happiness. — That to secure these rights, Governments are instituted among Men, deriving their just powers from the consent of the governed, — That whenever any Form of Government becomes destructive of these ends, it is the Right of the People to alter or to abolish it, and to institute new Government, laying its foundation on such principles and organizing its powers in such form, as to them shall seem most likely to effect their Safety and Happiness. Prudence, indeed, will dictate that Governments long established should not be changed for light and transient causes; and accordingly all experience hath shewn that mankind are more disposed to suffer, while evils are sufferable than to right themselves by abolishing the forms to which they are accustomed. But when a long train of abuses and usurpations, pursuing invariably the same Object evinces a design to reduce them under absolute Despotism, it is their right, it is their duty, to throw off such Government, and to provide new Guards for their future security. — Such has been the patient sufferance of these Colonies; and such is now the necessity which constrains them to alter their former Systems of Government. The history of the present King of Great Britain is a history of repeated injuries and usurpations, all having in direct object the establishment of an absolute Tyranny over these States. To prove this, let Facts be submitted to a candid world. 4 | 5 | He has refused his Assent to Laws, the most wholesome and necessary for the public good. 6 | 7 | He has forbidden his Governors to pass Laws of immediate and pressing importance, unless suspended in their operation till his Assent should be obtained; and when so suspended, he has utterly neglected to attend to them. 8 | 9 | He has refused to pass other Laws for the accommodation of large districts of people, unless those people would relinquish the right of Representation in the Legislature, a right inestimable to them and formidable to tyrants only. 10 | 11 | He has called together legislative bodies at places unusual, uncomfortable, and distant from the depository of their Public Records, for the sole purpose of fatiguing them into compliance with his measures. 12 | 13 | He has dissolved Representative Houses repeatedly, for opposing with manly firmness his invasions on the rights of the people. 14 | 15 | He has refused for a long time, after such dissolutions, to cause others to be elected, whereby the Legislative Powers, incapable of Annihilation, have returned to the People at large for their exercise; the State remaining in the mean time exposed to all the dangers of invasion from without, and convulsions within. 16 | 17 | He has endeavoured to prevent the population of these States; for that purpose obstructing the Laws for Naturalization of Foreigners; refusing to pass others to encourage their migrations hither, and raising the conditions of new Appropriations of Lands. 18 | 19 | He has obstructed the Administration of Justice by refusing his Assent to Laws for establishing Judiciary Powers. 20 | 21 | He has made Judges dependent on his Will alone for the tenure of their offices, and the amount and payment of their salaries. 22 | 23 | He has erected a multitude of New Offices, and sent hither swarms of Officers to harass our people and eat out their substance. 24 | 25 | He has kept among us, in times of peace, Standing Armies without the Consent of our legislatures. 26 | 27 | He has affected to render the Military independent of and superior to the Civil Power. 28 | 29 | He has combined with others to subject us to a jurisdiction foreign to our constitution, and unacknowledged by our laws; giving his Assent to their Acts of pretended Legislation: 30 | 31 | For quartering large bodies of armed troops among us: 32 | 33 | For protecting them, by a mock Trial from punishment for any Murders which they should commit on the Inhabitants of these States: 34 | 35 | For cutting off our Trade with all parts of the world: 36 | 37 | For imposing Taxes on us without our Consent: 38 | 39 | For depriving us in many cases, of the benefit of Trial by Jury: 40 | 41 | For transporting us beyond Seas to be tried for pretended offences: 42 | 43 | For abolishing the free System of English Laws in a neighbouring Province, establishing therein an Arbitrary government, and enlarging its Boundaries so as to render it at once an example and fit instrument for introducing the same absolute rule into these Colonies 44 | 45 | For taking away our Charters, abolishing our most valuable Laws and altering fundamentally the Forms of our Governments: 46 | 47 | For suspending our own Legislatures, and declaring themselves invested with power to legislate for us in all cases whatsoever. 48 | 49 | He has abdicated Government here, by declaring us out of his Protection and waging War against us. 50 | 51 | He has plundered our seas, ravaged our coasts, burnt our towns, and destroyed the lives of our people. 52 | 53 | He is at this time transporting large Armies of foreign Mercenaries to compleat the works of death, desolation, and tyranny, already begun with circumstances of Cruelty & Perfidy scarcely paralleled in the most barbarous ages, and totally unworthy the Head of a civilized nation. 54 | 55 | He has constrained our fellow Citizens taken Captive on the high Seas to bear Arms against their Country, to become the executioners of their friends and Brethren, or to fall themselves by their Hands. 56 | 57 | He has excited domestic insurrections amongst us, and has endeavoured to bring on the inhabitants of our frontiers, the merciless Indian Savages whose known rule of warfare, is an undistinguished destruction of all ages, sexes and conditions. 58 | 59 | In every stage of these Oppressions We have Petitioned for Redress in the most humble terms: Our repeated Petitions have been answered only by repeated injury. A Prince, whose character is thus marked by every act which may define a Tyrant, is unfit to be the ruler of a free people. 60 | 61 | Nor have We been wanting in attentions to our British brethren. We have warned them from time to time of attempts by their legislature to extend an unwarrantable jurisdiction over us. We have reminded them of the circumstances of our emigration and settlement here. We have appealed to their native justice and magnanimity, and we have conjured them by the ties of our common kindred to disavow these usurpations, which would inevitably interrupt our connections and correspondence. They too have been deaf to the voice of justice and of consanguinity. We must, therefore, acquiesce in the necessity, which denounces our Separation, and hold them, as we hold the rest of mankind, Enemies in War, in Peace Friends. 62 | 63 | We, therefore, the Representatives of the united States of America, in General Congress, Assembled, appealing to the Supreme Judge of the world for the rectitude of our intentions, do, in the Name, and by Authority of the good People of these Colonies, solemnly publish and declare, That these united Colonies are, and of Right ought to be Free and Independent States, that they are Absolved from all Allegiance to the British Crown, and that all political connection between them and the State of Great Britain, is and ought to be totally dissolved; and that as Free and Independent States, they have full Power to levy War, conclude Peace, contract Alliances, establish Commerce, and to do all other Acts and Things which Independent States may of right do. — And for the support of this Declaration, with a firm reliance on the protection of Divine Providence, we mutually pledge to each other our Lives, our Fortunes, and our sacred Honor. 64 | -------------------------------------------------------------------------------- /testdht.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Test the torrent 3 | 4 | ./Taipei-Torrent -fileDir=testData/downloads -port 63881 -useUPnP=false -useDHT -trackerLessMode=true testData/a.torrent 5 | -------------------------------------------------------------------------------- /torrent.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/binary" 6 | "errors" 7 | "flag" 8 | "fmt" 9 | "io" 10 | "log" 11 | "math/rand" 12 | "os" 13 | "strconv" 14 | "strings" 15 | "time" 16 | 17 | "github.com/zeebo/bencode" 18 | 19 | "github.com/rakoo/rakoshare/pkg/bitset" 20 | "github.com/rakoo/rakoshare/pkg/id" 21 | ) 22 | 23 | const ( 24 | MAX_NUM_PEERS = 60 25 | TARGET_NUM_PEERS = 15 26 | ) 27 | 28 | // BitTorrent message types. Sources: 29 | // http://bittorrent.org/beps/bep_0003.html 30 | // http://wiki.theory.org/BitTorrentSpecification 31 | const ( 32 | CHOKE = iota 33 | UNCHOKE 34 | INTERESTED 35 | NOT_INTERESTED 36 | HAVE 37 | BITFIELD 38 | REQUEST 39 | PIECE 40 | CANCEL 41 | PORT // Not implemented. For DHT support. 42 | EXTENSION = 20 43 | ) 44 | 45 | const ( 46 | EXTENSION_HANDSHAKE = iota 47 | ) 48 | 49 | // Should be overriden by flag. Not thread safe. 50 | var gateway string 51 | 52 | func init() { 53 | // If the port is 0, picks up a random port - but the DHT will keep 54 | // running on port 0 because ListenUDP doesn't do that. 55 | // Don't use port 6881 which blacklisted by some trackers. 56 | flag.StringVar(&gateway, "gateway", "", "IP Address of gateway.") 57 | } 58 | 59 | func peerId() string { 60 | sid := "-tt" + strconv.Itoa(os.Getpid()) + "_" + strconv.FormatInt(rand.Int63(), 10) 61 | return sid[0:20] 62 | } 63 | 64 | var kBitTorrentHeader = []byte{'\x13', 'B', 'i', 't', 'T', 'o', 'r', 65 | 'r', 'e', 'n', 't', ' ', 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l'} 66 | 67 | type ActivePiece struct { 68 | downloaderCount []int // -1 means piece is already downloaded 69 | pieceLength int 70 | } 71 | 72 | func (a *ActivePiece) chooseBlockToDownload(endgame bool) (index int) { 73 | if endgame { 74 | return a.chooseBlockToDownloadEndgame() 75 | } 76 | return a.chooseBlockToDownloadNormal() 77 | } 78 | 79 | func (a *ActivePiece) chooseBlockToDownloadNormal() (index int) { 80 | for i, v := range a.downloaderCount { 81 | if v == 0 { 82 | a.downloaderCount[i]++ 83 | return i 84 | } 85 | } 86 | return -1 87 | } 88 | 89 | func (a *ActivePiece) chooseBlockToDownloadEndgame() (index int) { 90 | index, minCount := -1, -1 91 | for i, v := range a.downloaderCount { 92 | if v >= 0 && (minCount == -1 || minCount > v) { 93 | index, minCount = i, v 94 | } 95 | } 96 | if index > -1 { 97 | a.downloaderCount[index]++ 98 | } 99 | return 100 | } 101 | 102 | func (a *ActivePiece) recordBlock(index int) (requestCount int) { 103 | requestCount = a.downloaderCount[index] 104 | a.downloaderCount[index] = -1 105 | return 106 | } 107 | 108 | func (a *ActivePiece) isComplete() bool { 109 | for _, v := range a.downloaderCount { 110 | if v != -1 { 111 | return false 112 | } 113 | } 114 | return true 115 | } 116 | 117 | type TorrentSessionI interface { 118 | NewMetaInfo() chan *MetaInfo 119 | 120 | IsEmpty() bool 121 | Quit() error 122 | Matches(ih string) bool 123 | AcceptNewPeer(btc *btConn) 124 | DoTorrent() 125 | hintNewPeer(peer string) bool 126 | } 127 | 128 | type TorrentSession struct { 129 | m *MetaInfo 130 | si *SessionInfo 131 | torrentHeader []byte 132 | fileStore FileStore 133 | peers *Peers 134 | peerMessageChan chan peerMessage 135 | pieceSet *bitset.Bitset // The pieces we have 136 | totalPieces int 137 | totalSize int64 138 | lastPieceLength int 139 | goodPieces int 140 | activePieces map[int]*ActivePiece 141 | heartbeat chan bool 142 | quit chan bool 143 | 144 | // Where the data lives 145 | target string 146 | 147 | miChan chan *MetaInfo 148 | Id id.Id 149 | } 150 | 151 | func NewTorrentSession(shareId id.Id, target, torrent string, listenPort int) (ts *TorrentSession, err error) { 152 | t := &TorrentSession{ 153 | Id: shareId, 154 | peers: newPeers(), 155 | peerMessageChan: make(chan peerMessage), 156 | activePieces: make(map[int]*ActivePiece), 157 | quit: make(chan bool), 158 | miChan: make(chan *MetaInfo), 159 | target: target, 160 | } 161 | 162 | fromMagnet := strings.HasPrefix(torrent, "magnet:") 163 | t.m, err = getMetaInfo(torrent) 164 | if err != nil { 165 | return 166 | } 167 | 168 | go t.StartPex() 169 | 170 | t.si = &SessionInfo{ 171 | PeerId: peerId(), 172 | Port: listenPort, 173 | FromMagnet: fromMagnet, 174 | HaveTorrent: false, 175 | ME: &MetaDataExchange{}, 176 | OurExtensions: map[int]string{ 177 | 1: "ut_metadata", 178 | 2: "ut_pex", 179 | }, 180 | } 181 | 182 | if !t.si.FromMagnet { 183 | err = t.load() 184 | } 185 | return t, err 186 | } 187 | 188 | func (t *TorrentSession) NewMetaInfo() chan *MetaInfo { 189 | return t.miChan 190 | } 191 | 192 | func (t *TorrentSession) reload(info []byte) error { 193 | err := bencode.NewDecoder(bytes.NewReader(info)).Decode(&t.m.Info) 194 | if err != nil { 195 | log.Println("Error when reloading torrent: ", err) 196 | return err 197 | } 198 | 199 | t.miChan <- t.m 200 | return t.load() 201 | } 202 | 203 | func (t *TorrentSession) load() error { 204 | var err error 205 | 206 | log.Printf("Tracker: %v, Comment: %v, InfoHash: %x, Encoding: %v, Private: %v", 207 | t.m.AnnounceList, t.m.Comment, t.m.InfoHash, t.m.Encoding, t.m.Info.Private) 208 | if e := t.m.Encoding; e != "" && e != "UTF-8" { 209 | log.Printf("Invalid encoding, couldn't load: %s\n", e) 210 | return errors.New("Invalid encoding: " + e) 211 | } 212 | 213 | t.fileStore, t.totalSize, err = NewFileStore(t.m.Info, t.target) 214 | if err != nil { 215 | log.Fatal("Couldn't create filestore: ", err) 216 | } 217 | t.lastPieceLength = int(t.totalSize % t.m.Info.PieceLength) 218 | if t.lastPieceLength == 0 { // last piece is a full piece 219 | t.lastPieceLength = int(t.m.Info.PieceLength) 220 | } 221 | 222 | log.Println("Starting verification of pieces...") 223 | start := time.Now() 224 | good, bad, pieceSet, err := checkPieces(t.fileStore, t.totalSize, t.m) 225 | if err != nil { 226 | return errors.New(fmt.Sprintf("Error when checking pieces: %s", err)) 227 | } 228 | end := time.Now() 229 | log.Printf("Computed missing pieces (%.2f seconds)", end.Sub(start).Seconds()) 230 | if err != nil { 231 | return err 232 | } 233 | t.pieceSet = pieceSet 234 | t.totalPieces = good + bad 235 | t.goodPieces = good 236 | log.Println("Good pieces:", good, "Bad pieces:", bad) 237 | 238 | left := int64(bad) * int64(t.m.Info.PieceLength) 239 | if !t.pieceSet.IsSet(t.totalPieces - 1) { 240 | left = left - t.m.Info.PieceLength + int64(t.lastPieceLength) 241 | } 242 | 243 | if left == 0 { 244 | err := t.fileStore.Cleanup() 245 | if err != nil { 246 | log.Println("Couldn't cleanup correctly: ", err) 247 | } 248 | } 249 | 250 | t.si.HaveTorrent = true 251 | return nil 252 | } 253 | 254 | func (t *TorrentSession) IsEmpty() bool { 255 | return false 256 | } 257 | 258 | func (ts *TorrentSession) Header() (header []byte) { 259 | if ts.torrentHeader != nil { 260 | return ts.torrentHeader 261 | } 262 | 263 | header = make([]byte, 68) 264 | copy(header, kBitTorrentHeader[0:]) 265 | 266 | // Support Extension Protocol (BEP-0010) 267 | header[25] |= 0x10 268 | 269 | copy(header[28:48], []byte(ts.m.InfoHash)) 270 | copy(header[48:68], []byte(ts.si.PeerId)) 271 | 272 | ts.torrentHeader = header 273 | 274 | return 275 | } 276 | 277 | func (ts *TorrentSession) hintNewPeer(peer string) (isnew bool) { 278 | if ts.peers.Know(peer, "") { 279 | return false 280 | } 281 | 282 | go ts.connectToPeer(peer) 283 | return true 284 | } 285 | 286 | func (ts *TorrentSession) connectToPeer(peer string) { 287 | conn, err := NewTCPConn([]byte(ts.Id.Psk[:]), peer) 288 | if err != nil { 289 | log.Println("Failed to connect to", peer, err) 290 | return 291 | } 292 | 293 | _, err = conn.Write(ts.Header()) 294 | if err != nil { 295 | log.Println("Failed to send header to", peer, err) 296 | return 297 | } 298 | 299 | theirheader, err := readHeader(conn) 300 | if err != nil { 301 | log.Printf("Failed to read header from %s: %s", peer, err) 302 | return 303 | } 304 | 305 | peersInfoHash := string(theirheader[8:28]) 306 | id := string(theirheader[28:48]) 307 | 308 | // If it's us, we don't need to continue 309 | if id == ts.si.PeerId { 310 | conn.Close() 311 | return 312 | } 313 | 314 | btconn := &btConn{ 315 | header: theirheader, 316 | infohash: peersInfoHash, 317 | id: id, 318 | conn: conn, 319 | } 320 | ts.AddPeer(btconn) 321 | } 322 | 323 | func (t *TorrentSession) AcceptNewPeer(btconn *btConn) { 324 | // If it's us, we don't need to continue 325 | if btconn.id == t.si.PeerId { 326 | btconn.conn.Close() 327 | return 328 | } 329 | 330 | _, err := btconn.conn.Write(t.Header()) 331 | if err != nil { 332 | return 333 | } 334 | t.AddPeer(btconn) 335 | } 336 | 337 | func (t *TorrentSession) AddPeer(btconn *btConn) { 338 | theirheader := btconn.header 339 | 340 | peer := btconn.conn.RemoteAddr().String() 341 | if t.peers.Len() >= MAX_NUM_PEERS { 342 | log.Println("We have enough peers. Rejecting additional peer", peer) 343 | btconn.conn.Close() 344 | return 345 | } 346 | ps := NewPeerState(btconn.conn) 347 | ps.address = peer 348 | ps.id = btconn.id 349 | 350 | if keep := t.peers.Add(ps); !keep { 351 | log.Printf("[TORRENT] Not keeping %s -- %s\n", ps.address, ps.id) 352 | return 353 | } 354 | 355 | if int(theirheader[5])&0x10 == 0x10 { 356 | ps.SendExtensions(t.si.OurExtensions, int64(len(t.m.RawInfo()))) 357 | 358 | if t.si.HaveTorrent { 359 | ps.SendBitfield(t.pieceSet) 360 | } 361 | } else { 362 | ps.SendBitfield(t.pieceSet) 363 | } 364 | 365 | if t.si.HaveTorrent { 366 | // By default, a peer has no pieces. If it has pieces, it should send 367 | // a BITFIELD message as a first message 368 | ps.have = bitset.New(t.totalPieces) 369 | } 370 | 371 | // Note that we need to launch these at the end of initialisation, so 372 | // we are sure that the message we buffered previously will be the 373 | // first to be sent. 374 | go ps.peerWriter(t.peerMessageChan) 375 | go ps.peerReader(t.peerMessageChan) 376 | 377 | log.Printf("[TORRENT] AddPeer: added %s\n", btconn.conn.RemoteAddr().String()) 378 | } 379 | 380 | func (t *TorrentSession) ClosePeer(peer *peerState) { 381 | if t.si.ME != nil && !t.si.ME.Transferring { 382 | t.si.ME.Transferring = false 383 | } 384 | 385 | t.removeRequests(peer) 386 | t.peers.Delete(peer) 387 | peer.Close() 388 | } 389 | 390 | func (t *TorrentSession) deadlockDetector(quit chan struct{}) { 391 | lastHeartbeat := time.Now() 392 | 393 | deadlockLoop: 394 | for { 395 | select { 396 | case <-quit: 397 | break deadlockLoop 398 | case <-t.heartbeat: 399 | lastHeartbeat = time.Now() 400 | case <-time.After(15 * time.Second): 401 | age := time.Now().Sub(lastHeartbeat) 402 | log.Println("Starvation or deadlock of main thread detected. Look in the stack dump for what DoTorrent() is currently doing.") 403 | log.Println("Last heartbeat", age.Seconds(), "seconds ago") 404 | panic("Killed by deadlock detector") 405 | } 406 | } 407 | } 408 | 409 | func (t *TorrentSession) Quit() (err error) { 410 | t.quit <- true 411 | for _, peer := range t.peers.All() { 412 | t.ClosePeer(peer) 413 | } 414 | return nil 415 | } 416 | 417 | func (t *TorrentSession) DoTorrent() { 418 | t.heartbeat = make(chan bool, 1) 419 | quitDeadlock := make(chan struct{}) 420 | go t.deadlockDetector(quitDeadlock) 421 | 422 | log.Println("[CURRENT] Start") 423 | 424 | rechokeChan := time.Tick(10 * time.Second) 425 | verboseChan := time.Tick(10 * time.Minute) 426 | keepAliveChan := time.Tick(60 * time.Second) 427 | 428 | for { 429 | select { 430 | case pm := <-t.peerMessageChan: 431 | peer, message := pm.peer, pm.message 432 | peer.lastReadTime = time.Now() 433 | err2 := t.DoMessage(peer, message) 434 | if err2 != nil { 435 | if err2 != io.EOF { 436 | log.Println("Closing peer", peer.address, "because", err2) 437 | } 438 | t.ClosePeer(peer) 439 | } 440 | case <-rechokeChan: 441 | // TODO: recalculate who to choke / unchoke 442 | 443 | // Try to have at least 1 active piece per peer + 1 active piece 444 | if len(t.activePieces) < t.peers.Len()+1 { 445 | for _, peer := range t.peers.All() { 446 | t.RequestBlock(peer) 447 | } 448 | } 449 | 450 | t.heartbeat <- true 451 | case <-verboseChan: 452 | ratio := float64(0.0) 453 | if t.si.Downloaded > 0 { 454 | ratio = float64(t.si.Uploaded) / float64(t.si.Downloaded) 455 | } 456 | log.Printf("[CURRENT] Peers: %d, good/total: %d/%d, ratio: %f\n", 457 | t.peers.Len(), t.goodPieces, t.totalPieces, ratio) 458 | case <-keepAliveChan: 459 | now := time.Now() 460 | for _, peer := range t.peers.All() { 461 | if peer.lastReadTime.Second() != 0 && now.Sub(peer.lastReadTime) > 3*time.Minute { 462 | // log.Println("Closing peer", peer.address, "because timed out.") 463 | t.ClosePeer(peer) 464 | continue 465 | } 466 | err2 := t.doCheckRequests(peer) 467 | if err2 != nil { 468 | if err2 != io.EOF { 469 | log.Println("Closing peer", peer.address, "because", err2) 470 | } 471 | t.ClosePeer(peer) 472 | continue 473 | } 474 | peer.keepAlive(now) 475 | } 476 | 477 | case <-t.quit: 478 | log.Println("Quitting torrent session") 479 | quitDeadlock <- struct{}{} 480 | return 481 | } 482 | } 483 | 484 | } 485 | 486 | func (t *TorrentSession) RequestBlock(p *peerState) (err error) { 487 | for k, _ := range t.activePieces { 488 | if p.have.IsSet(k) { 489 | err = t.RequestBlock2(p, k, false) 490 | if err != io.EOF { 491 | return 492 | } 493 | } 494 | } 495 | // No active pieces. (Or no suitable active pieces.) Pick one 496 | piece := t.ChoosePiece(p) 497 | if piece < 0 { 498 | // No unclaimed pieces. See if we can double-up on an active piece 499 | for k, _ := range t.activePieces { 500 | if p.have.IsSet(k) { 501 | err = t.RequestBlock2(p, k, true) 502 | if err != io.EOF { 503 | return 504 | } 505 | } 506 | } 507 | } 508 | if piece >= 0 { 509 | pieceLength := int(t.m.Info.PieceLength) 510 | if piece == t.totalPieces-1 { 511 | pieceLength = t.lastPieceLength 512 | } 513 | pieceCount := (pieceLength + STANDARD_BLOCK_LENGTH - 1) / STANDARD_BLOCK_LENGTH 514 | t.activePieces[piece] = &ActivePiece{make([]int, pieceCount), pieceLength} 515 | return t.RequestBlock2(p, piece, false) 516 | } else { 517 | p.SetInterested(false) 518 | } 519 | return 520 | } 521 | 522 | func (t *TorrentSession) ChoosePiece(p *peerState) (piece int) { 523 | n := t.totalPieces 524 | start := rand.Intn(n) 525 | piece = t.checkRange(p, start, n) 526 | if piece == -1 { 527 | piece = t.checkRange(p, 0, start) 528 | } 529 | return 530 | } 531 | 532 | func (t *TorrentSession) checkRange(p *peerState, start, end int) (piece int) { 533 | for i := start; i < end; i++ { 534 | if !t.pieceSet.IsSet(i) && p.have.IsSet(i) { 535 | if _, ok := t.activePieces[i]; !ok { 536 | return i 537 | } 538 | } 539 | } 540 | return -1 541 | } 542 | 543 | func (t *TorrentSession) RequestBlock2(p *peerState, piece int, endGame bool) (err error) { 544 | v := t.activePieces[piece] 545 | for { 546 | block := v.chooseBlockToDownload(endGame) 547 | if block >= 0 { 548 | t.requestBlockImp(p, piece, block, true) 549 | } else { 550 | break 551 | } 552 | } 553 | return 554 | } 555 | 556 | // Request or cancel a block 557 | func (t *TorrentSession) requestBlockImp(p *peerState, piece int, block int, request bool) { 558 | begin := block * STANDARD_BLOCK_LENGTH 559 | req := make([]byte, 13) 560 | opcode := byte(REQUEST) 561 | if !request { 562 | opcode = byte(CANCEL) 563 | } 564 | length := STANDARD_BLOCK_LENGTH 565 | if piece == t.totalPieces-1 { 566 | left := t.lastPieceLength - begin 567 | if left < length { 568 | length = left 569 | } 570 | } 571 | // log.Println("Requesting block", piece, ".", block, length, request) 572 | req[0] = opcode 573 | binary.BigEndian.PutUint32(req[1:5], uint32(piece)) 574 | binary.BigEndian.PutUint32(req[5:9], uint32(begin)) 575 | binary.BigEndian.PutUint32(req[9:13], uint32(length)) 576 | requestIndex := (uint64(piece) << 32) | uint64(begin) 577 | if !request { 578 | delete(p.our_requests, requestIndex) 579 | } else { 580 | p.our_requests[requestIndex] = time.Now() 581 | } 582 | p.sendMessage(req) 583 | return 584 | } 585 | 586 | func (t *TorrentSession) RecordBlock(p *peerState, piece, begin, length uint32) (err error) { 587 | block := begin / STANDARD_BLOCK_LENGTH 588 | // log.Println("Received block", piece, ".", block) 589 | requestIndex := (uint64(piece) << 32) | uint64(begin) 590 | delete(p.our_requests, requestIndex) 591 | v, ok := t.activePieces[int(piece)] 592 | if ok { 593 | requestCount := v.recordBlock(int(block)) 594 | if requestCount > 1 { 595 | // Someone else has also requested this, so send cancel notices 596 | for _, peer := range t.peers.All() { 597 | if p != peer { 598 | if _, ok := peer.our_requests[requestIndex]; ok { 599 | t.requestBlockImp(peer, int(piece), int(block), false) 600 | requestCount-- 601 | } 602 | } 603 | } 604 | } 605 | t.si.Downloaded += int64(length) 606 | if v.isComplete() { 607 | delete(t.activePieces, int(piece)) 608 | ok, err = checkPiece(t.fileStore, t.totalSize, t.m, int(piece)) 609 | if !ok || err != nil { 610 | log.Println("Closing peer that sent a bad piece", piece, p.id, err) 611 | p.Close() 612 | return 613 | } 614 | t.si.Left -= int64(v.pieceLength) 615 | t.pieceSet.Set(int(piece)) 616 | t.goodPieces++ 617 | log.Println("Have", t.goodPieces, "of", t.totalPieces, "pieces.") 618 | if t.goodPieces == t.totalPieces { 619 | log.Println("We're complete!") 620 | err := t.fileStore.Cleanup() 621 | if err != nil { 622 | log.Println("Couldn't cleanup correctly: ", err) 623 | } 624 | 625 | // TODO: Drop connections to all seeders. 626 | } 627 | for _, p := range t.peers.All() { 628 | if p.have != nil { 629 | if p.have.IsSet(int(piece)) { 630 | // We don't do anything special. We rely on the caller 631 | // to decide if this peer is still interesting. 632 | } else { 633 | // log.Println("...telling ", p) 634 | haveMsg := make([]byte, 5) 635 | haveMsg[0] = HAVE 636 | binary.BigEndian.PutUint32(haveMsg[1:5], uint32(piece)) 637 | p.sendMessage(haveMsg) 638 | } 639 | } 640 | } 641 | } 642 | } else { 643 | log.Println("Received a block we already have.", piece, block, p.address) 644 | } 645 | return 646 | } 647 | 648 | func (t *TorrentSession) doChoke(p *peerState) (err error) { 649 | p.peer_choking = true 650 | err = t.removeRequests(p) 651 | return 652 | } 653 | 654 | func (t *TorrentSession) removeRequests(p *peerState) (err error) { 655 | for k, _ := range p.our_requests { 656 | piece := int(k >> 32) 657 | begin := int(k & 0xffffffff) 658 | block := begin / STANDARD_BLOCK_LENGTH 659 | // log.Println("Forgetting we requested block ", piece, ".", block) 660 | t.removeRequest(piece, block) 661 | } 662 | p.our_requests = make(map[uint64]time.Time, MAX_OUR_REQUESTS) 663 | return 664 | } 665 | 666 | func (t *TorrentSession) removeRequest(piece, block int) { 667 | v, ok := t.activePieces[piece] 668 | if ok && v.downloaderCount[block] > 0 { 669 | v.downloaderCount[block]-- 670 | } 671 | } 672 | 673 | func (t *TorrentSession) doCheckRequests(p *peerState) (err error) { 674 | now := time.Now() 675 | for k, v := range p.our_requests { 676 | if now.Sub(v).Seconds() > 30 { 677 | piece := int(k >> 32) 678 | block := int(k&0xffffffff) / STANDARD_BLOCK_LENGTH 679 | // log.Println("timing out request of", piece, ".", block) 680 | t.removeRequest(piece, block) 681 | } 682 | } 683 | return 684 | } 685 | 686 | func (t *TorrentSession) DoMessage(p *peerState, message []byte) (err error) { 687 | if message == nil { 688 | return io.EOF // The reader or writer goroutine has exited 689 | } 690 | if len(message) == 0 { // keep alive 691 | return 692 | } 693 | 694 | if t.si.HaveTorrent { 695 | err = t.generalMessage(message, p) 696 | } else { 697 | err = t.extensionMessage(message, p) 698 | } 699 | return 700 | } 701 | 702 | func (t *TorrentSession) extensionMessage(message []byte, p *peerState) (err error) { 703 | switch message[0] { 704 | case CHOKE: 705 | p.peer_choking = true 706 | case UNCHOKE: 707 | p.peer_choking = false 708 | case BITFIELD: 709 | p.SetChoke(false) // TODO: better choke policy 710 | 711 | p.temporaryBitfield = make([]byte, len(message[1:])) 712 | copy(p.temporaryBitfield, message[1:]) 713 | p.can_receive_bitfield = false 714 | case EXTENSION: 715 | err := t.DoExtension(message[1:], p) 716 | if err != nil { 717 | log.Printf("Failed extensions for %s: %s\n", p.address, err) 718 | } 719 | } 720 | return 721 | } 722 | 723 | func (t *TorrentSession) generalMessage(message []byte, p *peerState) (err error) { 724 | messageId := message[0] 725 | 726 | switch messageId { 727 | case CHOKE: 728 | // log.Println("choke", p.address) 729 | if len(message) != 1 { 730 | return errors.New("Unexpected length") 731 | } 732 | err = t.doChoke(p) 733 | case UNCHOKE: 734 | // log.Println("unchoke", p.address) 735 | if len(message) != 1 { 736 | return errors.New("Unexpected length") 737 | } 738 | p.peer_choking = false 739 | for i := 0; i < MAX_OUR_REQUESTS; i++ { 740 | err = t.RequestBlock(p) 741 | if err != nil { 742 | return 743 | } 744 | } 745 | case INTERESTED: 746 | // log.Println("interested", p) 747 | if len(message) != 1 { 748 | return errors.New("Unexpected length") 749 | } 750 | p.peer_interested = true 751 | 752 | // TODO: Consider better unchoking policy (this is needed for 753 | // clients like Transmission who don't send a BITFIELD so we have to 754 | // unchoke them at this moment) 755 | p.SetChoke(false) 756 | case NOT_INTERESTED: 757 | // log.Println("not interested", p) 758 | if len(message) != 1 { 759 | return errors.New("Unexpected length") 760 | } 761 | p.peer_interested = false 762 | case HAVE: 763 | if len(message) != 5 { 764 | return errors.New("Unexpected length") 765 | } 766 | piece := binary.BigEndian.Uint32(message[1:]) 767 | if p.have.IsWithinLimits(int(piece)) { 768 | log.Printf("[TORRENT] Set have at %d for %s\n", piece, p.address) 769 | p.have.Set(int(piece)) 770 | if !p.am_interested && !t.pieceSet.IsSet(int(piece)) { 771 | p.SetInterested(true) 772 | 773 | log.Printf("[TORRENT] %s has %d, asking for it", p.address, piece) 774 | // TODO DRY up, this is a copy paste from RequestBlock 775 | pieceLength := int(t.m.Info.PieceLength) 776 | if int(piece) == t.totalPieces-1 { 777 | pieceLength = t.lastPieceLength 778 | } 779 | pieceCount := (pieceLength + STANDARD_BLOCK_LENGTH - 1) / STANDARD_BLOCK_LENGTH 780 | t.activePieces[int(piece)] = &ActivePiece{make([]int, pieceCount), pieceLength} 781 | t.RequestBlock2(p, int(piece), false) 782 | } 783 | } else { 784 | return errors.New("have index is out of range.") 785 | } 786 | case BITFIELD: 787 | // log.Println("bitfield", p.address) 788 | if !p.can_receive_bitfield { 789 | return errors.New("Late bitfield operation") 790 | } 791 | p.SetChoke(false) // TODO: better choke policy 792 | 793 | p.have = bitset.NewFromBytes(t.totalPieces, message[1:]) 794 | if p.have == nil { 795 | return errors.New("Invalid bitfield data.") 796 | } 797 | 798 | t.checkInteresting(p) 799 | p.can_receive_bitfield = false 800 | 801 | if p.peer_choking == false { 802 | for i := 0; i < MAX_OUR_REQUESTS; i++ { 803 | err = t.RequestBlock(p) 804 | if err != nil { 805 | return 806 | } 807 | } 808 | } 809 | case REQUEST: 810 | if len(message) != 13 { 811 | return errors.New("Unexpected message length") 812 | } 813 | index := binary.BigEndian.Uint32(message[1:5]) 814 | begin := binary.BigEndian.Uint32(message[5:9]) 815 | length := binary.BigEndian.Uint32(message[9:13]) 816 | if !p.have.IsWithinLimits(int(index)) { 817 | return errors.New("piece out of range.") 818 | } 819 | if !t.pieceSet.IsSet(int(index)) { 820 | return errors.New("we don't have that piece.") 821 | } 822 | if int64(begin) >= t.m.Info.PieceLength { 823 | return errors.New("begin out of range.") 824 | } 825 | if int64(begin)+int64(length) > t.m.Info.PieceLength { 826 | return errors.New("begin + length out of range.") 827 | } 828 | // TODO: Asynchronous 829 | // p.AddRequest(index, begin, length) 830 | return t.sendRequest(p, index, begin, length) 831 | case PIECE: 832 | // piece 833 | if len(message) < 9 { 834 | return errors.New("unexpected message length") 835 | } 836 | index := binary.BigEndian.Uint32(message[1:5]) 837 | begin := binary.BigEndian.Uint32(message[5:9]) 838 | length := len(message) - 9 839 | if !p.have.IsWithinLimits(int(index)) { 840 | return errors.New("piece out of range.") 841 | } 842 | if t.pieceSet.IsSet(int(index)) { 843 | // We already have that piece, keep going 844 | break 845 | } 846 | if int64(begin) >= t.m.Info.PieceLength { 847 | return errors.New("begin out of range.") 848 | } 849 | if int64(begin)+int64(length) > t.m.Info.PieceLength { 850 | return errors.New("begin + length out of range.") 851 | } 852 | if length > 128*1024 { 853 | return errors.New("Block length too large.") 854 | } 855 | globalOffset := int64(index)*t.m.Info.PieceLength + int64(begin) 856 | _, err = t.fileStore.WriteAt(message[9:], globalOffset) 857 | if err != nil { 858 | return err 859 | } 860 | t.RecordBlock(p, index, begin, uint32(length)) 861 | err = t.RequestBlock(p) 862 | case CANCEL: 863 | // log.Println("cancel") 864 | if len(message) != 13 { 865 | return errors.New("Unexpected message length") 866 | } 867 | index := binary.BigEndian.Uint32(message[1:5]) 868 | begin := binary.BigEndian.Uint32(message[5:9]) 869 | length := binary.BigEndian.Uint32(message[9:13]) 870 | if !p.have.IsWithinLimits(int(index)) { 871 | return errors.New("piece out of range.") 872 | } 873 | if !t.pieceSet.IsSet(int(index)) { 874 | return errors.New("we don't have that piece.") 875 | } 876 | if int64(begin) >= t.m.Info.PieceLength { 877 | return errors.New("begin out of range.") 878 | } 879 | if int64(begin)+int64(length) > t.m.Info.PieceLength { 880 | return errors.New("begin + length out of range.") 881 | } 882 | if length != STANDARD_BLOCK_LENGTH { 883 | return errors.New("Unexpected block length.") 884 | } 885 | p.CancelRequest(index, begin, length) 886 | case PORT: 887 | // TODO: Implement this message. 888 | // We see peers sending us 16K byte messages here, so 889 | // it seems that we don't understand what this is. 890 | if len(message) != 3 { 891 | return fmt.Errorf("Unexpected length for port message: %d", len(message)) 892 | } 893 | case EXTENSION: 894 | err := t.DoExtension(message[1:], p) 895 | if err != nil { 896 | log.Printf("Failed extensions for %s: %s\n", p.address, err) 897 | } 898 | 899 | default: 900 | return errors.New(fmt.Sprintf("Uknown message id: %d\n", messageId)) 901 | } 902 | 903 | return 904 | } 905 | 906 | type ExtensionHandshake struct { 907 | M map[string]int `bencode:"m"` 908 | V string `bencode:"v"` 909 | P uint16 `bencode:"p,omitempty"` 910 | Yourip string `bencode:"yourip,omitempty"` 911 | Ipv6 string `bencode:"ipv6,omitempty"` 912 | Ipv4 string `bencode:"ipv4,omitempty"` 913 | Reqq uint16 `bencode:"reqq,omitempty"` 914 | MetadataSize int64 `bencode:"metadata_size,omitempty"` 915 | } 916 | 917 | func (t *TorrentSession) DoExtension(msg []byte, p *peerState) (err error) { 918 | 919 | var h ExtensionHandshake 920 | if msg[0] == EXTENSION_HANDSHAKE { 921 | err = bencode.NewDecoder(bytes.NewReader(msg[1:])).Decode(&h) 922 | if err != nil { 923 | log.Println("Error when unmarshaling extension handshake") 924 | return err 925 | } 926 | 927 | p.theirExtensions = make(map[string]int) 928 | for name, code := range h.M { 929 | p.theirExtensions[name] = code 930 | } 931 | 932 | if t.si.HaveTorrent || t.si.ME != nil && t.si.ME.Transferring { 933 | return 934 | } 935 | 936 | // Fill metadata info 937 | if h.MetadataSize == 0 { 938 | log.Printf("Missing metadata_size argument, this is invalid.") 939 | return 940 | } 941 | 942 | nPieces := h.MetadataSize/METADATA_PIECE_SIZE + 1 943 | t.si.ME.Pieces = make([][]byte, nPieces) 944 | 945 | if _, ok := p.theirExtensions["ut_metadata"]; ok { 946 | t.si.ME.Transferring = true 947 | p.sendMetadataRequest(0) 948 | } 949 | 950 | } else if ext, ok := t.si.OurExtensions[int(msg[0])]; ok { 951 | switch ext { 952 | case "ut_metadata": 953 | t.DoMetadata(msg[1:], p) 954 | case "ut_pex": 955 | t.DoPex(msg[1:], p) 956 | default: 957 | log.Println("Unknown extension: ", ext) 958 | } 959 | } else { 960 | log.Println("Unknown extension: ", int(msg[0])) 961 | } 962 | 963 | return nil 964 | } 965 | 966 | func (t *TorrentSession) sendRequest(peer *peerState, index, begin, length uint32) (err error) { 967 | if !peer.am_choking { 968 | // log.Println("Sending block", index, begin, length) 969 | buf := make([]byte, length+9) 970 | buf[0] = PIECE 971 | binary.BigEndian.PutUint32(buf[1:5], index) 972 | binary.BigEndian.PutUint32(buf[5:9], begin) 973 | _, err = t.fileStore.ReadAt(buf[9:], 974 | int64(index)*t.m.Info.PieceLength+int64(begin)) 975 | if err != nil { 976 | return 977 | } 978 | peer.sendMessage(buf) 979 | t.si.Uploaded += int64(length) 980 | } 981 | return 982 | } 983 | 984 | func (t *TorrentSession) checkInteresting(p *peerState) { 985 | p.SetInterested(t.isInteresting(p)) 986 | } 987 | 988 | func (t *TorrentSession) isInteresting(p *peerState) bool { 989 | for i := 0; i < t.totalPieces; i++ { 990 | if !t.pieceSet.IsSet(i) && p.have.IsSet(i) { 991 | return true 992 | } 993 | } 994 | return false 995 | } 996 | 997 | func (t *TorrentSession) Matches(ih string) bool { 998 | return t.m.InfoHash == ih 999 | } 1000 | -------------------------------------------------------------------------------- /trackerClient.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io/ioutil" 7 | "log" 8 | "math/rand" 9 | "net" 10 | "net/url" 11 | "strconv" 12 | "strings" 13 | "time" 14 | 15 | "github.com/nictuku/nettools" 16 | "github.com/zeebo/bencode" 17 | ) 18 | 19 | // Code to talk to trackers. 20 | // Implements BEP 12 Multitracker Metadata Extension 21 | 22 | type ClientStatusReport struct { 23 | Event string 24 | InfoHash string 25 | PeerId string 26 | Port int 27 | Uploaded int64 28 | Downloaded int64 29 | Left int64 30 | } 31 | 32 | type trackerClient struct { 33 | trackerInfoChan chan *TrackerResponse 34 | announceList [][]string 35 | failedTrackers map[string]struct{} 36 | } 37 | 38 | func NewTrackerClient(announce string, announceList [][]string) trackerClient { 39 | if announce != "" && announceList == nil { 40 | // Convert the plain announce into an announceList to simplify logic 41 | announceList = [][]string{[]string{announce}} 42 | } 43 | 44 | if announceList != nil { 45 | announceList = shuffleAnnounceList(announceList) 46 | } 47 | tic := make(chan *TrackerResponse) 48 | return trackerClient{ 49 | trackerInfoChan: tic, 50 | announceList: announceList, 51 | failedTrackers: make(map[string]struct{}), 52 | } 53 | } 54 | 55 | func (tc trackerClient) Announce(report ClientStatusReport) { 56 | go func() { 57 | tr := tc.queryTrackers(report) 58 | if tr != nil { 59 | tc.trackerInfoChan <- tr 60 | } 61 | }() 62 | } 63 | 64 | // Deep copy announcelist and shuffle each level. 65 | func shuffleAnnounceList(announceList [][]string) (result [][]string) { 66 | result = make([][]string, len(announceList)) 67 | for i, level := range announceList { 68 | result[i] = shuffleAnnounceListLevel(level) 69 | } 70 | return 71 | } 72 | 73 | func shuffleAnnounceListLevel(level []string) (shuffled []string) { 74 | items := len(level) 75 | shuffled = make([]string, items) 76 | perm := rand.Perm(items) 77 | for i, v := range perm { 78 | shuffled[v] = level[i] 79 | } 80 | return 81 | } 82 | 83 | func (tc trackerClient) queryTrackers(report ClientStatusReport) (tr *TrackerResponse) { 84 | for _, level := range tc.announceList { 85 | for i, tracker := range level { 86 | if _, ok := tc.failedTrackers[tracker]; ok { 87 | continue 88 | } 89 | var err error 90 | tr, err = queryTracker(report, tracker) 91 | if err == nil { 92 | // Move successful tracker to front of slice for next announcement 93 | // cycle. 94 | copy(level[1:i+1], level[0:i]) 95 | level[0] = tracker 96 | return 97 | } else { 98 | log.Println("Couldn't contact", tracker, ": ", err) 99 | if _, ok := tc.failedTrackers[tracker]; !ok { 100 | log.Printf("Blacklisting failed tracker %s", tracker) 101 | } 102 | tc.failedTrackers[tracker] = struct{}{} 103 | } 104 | } 105 | } 106 | if len(tc.announceList) > 0 && len(tc.announceList[0]) > 0 { 107 | log.Println("Error: Did not successfully contact a tracker:", tc.announceList) 108 | } 109 | return 110 | } 111 | 112 | func queryTracker(report ClientStatusReport, trackerUrl string) (tr *TrackerResponse, err error) { 113 | // We sometimes indicate www.domain.com:port/path, it should be 114 | // automatically detected 115 | if !strings.HasPrefix(trackerUrl, "http") { 116 | trackerUrl = "http://" + trackerUrl 117 | } 118 | 119 | u, err := url.Parse(trackerUrl) 120 | if err != nil { 121 | log.Println("Error: Invalid announce URL(", trackerUrl, "):", err) 122 | return 123 | } 124 | 125 | uq := u.Query() 126 | uq.Add("info_hash", report.InfoHash) 127 | uq.Add("peer_id", report.PeerId) 128 | uq.Add("port", strconv.Itoa(report.Port)) 129 | uq.Add("uploaded", strconv.FormatInt(report.Uploaded, 10)) 130 | uq.Add("downloaded", strconv.FormatInt(report.Downloaded, 10)) 131 | uq.Add("left", strconv.FormatInt(report.Left, 10)) 132 | uq.Add("compact", "1") 133 | 134 | // Don't report IPv6 address, the user might prefer to keep 135 | // that information private when communicating with IPv4 hosts. 136 | if false { 137 | ipv6Address, err := findLocalIPV6AddressFor(u.Host) 138 | if err == nil { 139 | log.Println("our ipv6", ipv6Address) 140 | uq.Add("ipv6", ipv6Address) 141 | } 142 | } 143 | 144 | if report.Event != "" { 145 | uq.Add("event", report.Event) 146 | } 147 | 148 | // This might reorder the existing query string in the Announce url 149 | // This might break some broken trackers that don't parse URLs properly. 150 | 151 | u.RawQuery = uq.Encode() 152 | 153 | tr, err = getTrackerInfo(u.String()) 154 | if tr == nil || err != nil { 155 | log.Println("Error: Could not fetch tracker info:", err) 156 | } else if tr.FailureReason != "" { 157 | log.Println("Error: Tracker returned failure reason:", tr.FailureReason) 158 | err = fmt.Errorf("tracker failure %s", tr.FailureReason) 159 | } 160 | return 161 | } 162 | 163 | func findLocalIPV6AddressFor(hostAddr string) (local string, err error) { 164 | // Figure out our IPv6 address to talk to a given host. 165 | host, hostPort, err := net.SplitHostPort(hostAddr) 166 | if err != nil { 167 | host = hostAddr 168 | hostPort = "1234" 169 | } 170 | dummyAddr := net.JoinHostPort(host, hostPort) 171 | log.Println("Looking for host ", dummyAddr) 172 | conn, err := net.Dial("udp6", dummyAddr) 173 | if err != nil { 174 | log.Println("No IPV6 for host ", host, err) 175 | return "", err 176 | } 177 | defer conn.Close() 178 | localAddr := conn.LocalAddr() 179 | local, _, err = net.SplitHostPort(localAddr.String()) 180 | if err != nil { 181 | local = localAddr.String() 182 | } 183 | return 184 | } 185 | 186 | type TrackerResponse struct { 187 | FailureReason string "failure reason" 188 | WarningMessage string "warning message" 189 | Interval time.Duration 190 | MinInterval time.Duration "min interval" 191 | TrackerId string "tracker id" 192 | Complete int 193 | Incomplete int 194 | PeersRaw string `bencode:"peers"` 195 | Peers []string 196 | Peers6Raw string `bencode:"peers6"` 197 | Peers6 []string 198 | } 199 | 200 | func getTrackerInfo(url string) (tr *TrackerResponse, err error) { 201 | r, err := proxyHttpGet(url) 202 | if err != nil { 203 | return 204 | } 205 | defer r.Body.Close() 206 | if r.StatusCode >= 400 { 207 | data, _ := ioutil.ReadAll(r.Body) 208 | reason := "Bad Request " + string(data) 209 | log.Println(reason) 210 | err = errors.New(reason) 211 | return 212 | } 213 | 214 | var tr2 TrackerResponse 215 | err = bencode.NewDecoder(r.Body).Decode(&tr2) 216 | r.Body.Close() 217 | if err != nil { 218 | return 219 | } 220 | 221 | // Decode peers 222 | if len(tr2.PeersRaw) > 0 { 223 | const peerLen = 6 224 | nPeers := len(tr2.PeersRaw) / peerLen 225 | // log.Println("Tracker gave us", nPeers, "peers") 226 | tr2.Peers = make([]string, nPeers) 227 | for i := 0; i < nPeers; i++ { 228 | peer := nettools.BinaryToDottedPort(tr2.PeersRaw[i*peerLen : (i+1)*peerLen]) 229 | tr2.Peers[i/peerLen] = peer 230 | } 231 | } 232 | 233 | // Decode peers6 234 | 235 | if len(tr2.Peers6Raw) > 0 { 236 | const peerLen = 18 237 | nPeers := len(tr2.Peers6Raw) / peerLen 238 | // log.Println("Tracker gave us", nPeers, "IPv6 peers") 239 | tr2.Peers6 = make([]string, nPeers) 240 | for i := 0; i < nPeers; i++ { 241 | peerEntry := tr2.Peers6Raw[i*peerLen : (i+1)*peerLen] 242 | host := net.IP(peerEntry[0:16]) 243 | port := int((uint(peerEntry[16]) << 8) | uint(peerEntry[17])) 244 | peer := net.JoinHostPort(host.String(), strconv.Itoa(port)) 245 | tr2.Peers6[i] = peer 246 | } 247 | } 248 | 249 | tr = &tr2 250 | return 251 | } 252 | -------------------------------------------------------------------------------- /upnp.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Just enough UPnP to be able to forward ports 4 | // 5 | 6 | import ( 7 | "bytes" 8 | "encoding/xml" 9 | "errors" 10 | "net" 11 | "net/http" 12 | "os" 13 | "strconv" 14 | "strings" 15 | "time" 16 | ) 17 | 18 | type upnpNAT struct { 19 | serviceURL string 20 | ourIP string 21 | } 22 | 23 | func Discover() (nat NAT, err error) { 24 | ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") 25 | if err != nil { 26 | return 27 | } 28 | conn, err := net.ListenPacket("udp4", ":0") 29 | if err != nil { 30 | return 31 | } 32 | socket := conn.(*net.UDPConn) 33 | defer socket.Close() 34 | 35 | err = socket.SetDeadline(time.Now().Add(3 * time.Second)) 36 | if err != nil { 37 | return 38 | } 39 | 40 | st := "ST: urn:schemas-upnp-org:device:InternetGatewayDevice:1\r\n" 41 | buf := bytes.NewBufferString( 42 | "M-SEARCH * HTTP/1.1\r\n" + 43 | "HOST: 239.255.255.250:1900\r\n" + 44 | st + 45 | "MAN: \"ssdp:discover\"\r\n" + 46 | "MX: 2\r\n\r\n") 47 | message := buf.Bytes() 48 | answerBytes := make([]byte, 1024) 49 | for i := 0; i < 3; i++ { 50 | _, err = socket.WriteToUDP(message, ssdp) 51 | if err != nil { 52 | return 53 | } 54 | var n int 55 | n, _, err = socket.ReadFromUDP(answerBytes) 56 | if err != nil { 57 | continue 58 | // socket.Close() 59 | // return 60 | } 61 | answer := string(answerBytes[0:n]) 62 | if strings.Index(answer, "\r\n"+st) < 0 { 63 | continue 64 | } 65 | // HTTP header field names are case-insensitive. 66 | // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 67 | locString := "\r\nlocation: " 68 | answer = strings.ToLower(answer) 69 | locIndex := strings.Index(answer, locString) 70 | if locIndex < 0 { 71 | continue 72 | } 73 | loc := answer[locIndex+len(locString):] 74 | endIndex := strings.Index(loc, "\r\n") 75 | if endIndex < 0 { 76 | continue 77 | } 78 | locURL := loc[0:endIndex] 79 | var serviceURL string 80 | serviceURL, err = getServiceURL(locURL) 81 | if err != nil { 82 | return 83 | } 84 | var ourIP string 85 | ourIP, err = getOurIP() 86 | if err != nil { 87 | return 88 | } 89 | nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP} 90 | return 91 | } 92 | err = errors.New("UPnP port discovery failed.") 93 | return 94 | } 95 | 96 | type Service struct { 97 | ServiceType string 98 | ControlURL string 99 | } 100 | 101 | type DeviceList struct { 102 | Device []Device 103 | } 104 | 105 | type ServiceList struct { 106 | Service []Service 107 | } 108 | 109 | type Device struct { 110 | DeviceType string 111 | DeviceList DeviceList 112 | ServiceList ServiceList 113 | } 114 | 115 | type Root struct { 116 | Device Device 117 | } 118 | 119 | func getChildDevice(d *Device, deviceType string) *Device { 120 | dl := d.DeviceList.Device 121 | for i := 0; i < len(dl); i++ { 122 | if dl[i].DeviceType == deviceType { 123 | return &dl[i] 124 | } 125 | } 126 | return nil 127 | } 128 | 129 | func getChildService(d *Device, serviceType string) *Service { 130 | sl := d.ServiceList.Service 131 | for i := 0; i < len(sl); i++ { 132 | if sl[i].ServiceType == serviceType { 133 | return &sl[i] 134 | } 135 | } 136 | return nil 137 | } 138 | 139 | func getOurIP() (ip string, err error) { 140 | hostname, err := os.Hostname() 141 | if err != nil { 142 | return 143 | } 144 | return net.LookupCNAME(hostname) 145 | } 146 | 147 | func getServiceURL(rootURL string) (url string, err error) { 148 | r, err := http.Get(rootURL) 149 | if err != nil { 150 | return 151 | } 152 | defer r.Body.Close() 153 | if r.StatusCode >= 400 { 154 | err = errors.New(string(r.StatusCode)) 155 | return 156 | } 157 | var root Root 158 | err = xml.NewDecoder(r.Body).Decode(&root) 159 | if err != nil { 160 | return 161 | } 162 | a := &root.Device 163 | if a.DeviceType != "urn:schemas-upnp-org:device:InternetGatewayDevice:1" { 164 | err = errors.New("No InternetGatewayDevice") 165 | return 166 | } 167 | b := getChildDevice(a, "urn:schemas-upnp-org:device:WANDevice:1") 168 | if b == nil { 169 | err = errors.New("No WANDevice") 170 | return 171 | } 172 | c := getChildDevice(b, "urn:schemas-upnp-org:device:WANConnectionDevice:1") 173 | if c == nil { 174 | err = errors.New("No WANConnectionDevice") 175 | return 176 | } 177 | d := getChildService(c, "urn:schemas-upnp-org:service:WANIPConnection:1") 178 | if d == nil { 179 | err = errors.New("No WANIPConnection") 180 | return 181 | } 182 | url = combineURL(rootURL, d.ControlURL) 183 | return 184 | } 185 | 186 | func combineURL(rootURL, subURL string) string { 187 | protocolEnd := "://" 188 | protoEndIndex := strings.Index(rootURL, protocolEnd) 189 | a := rootURL[protoEndIndex+len(protocolEnd):] 190 | rootIndex := strings.Index(a, "/") 191 | return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL 192 | } 193 | 194 | func soapRequest(url, function, message string) (r *http.Response, err error) { 195 | fullMessage := "" + 196 | "\r\n" + 197 | "" + message + "" 198 | 199 | req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) 200 | if err != nil { 201 | return nil, err 202 | } 203 | req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") 204 | req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") 205 | //req.Header.Set("Transfer-Encoding", "chunked") 206 | req.Header.Set("SOAPAction", "\"urn:schemas-upnp-org:service:WANIPConnection:1#"+function+"\"") 207 | req.Header.Set("Connection", "Close") 208 | req.Header.Set("Cache-Control", "no-cache") 209 | req.Header.Set("Pragma", "no-cache") 210 | 211 | // log.Stderr("soapRequest ", req) 212 | 213 | r, err = http.DefaultClient.Do(req) 214 | if r.Body != nil { 215 | defer r.Body.Close() 216 | } 217 | 218 | if r.StatusCode >= 400 { 219 | // log.Stderr(function, r.StatusCode) 220 | err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function) 221 | r = nil 222 | return 223 | } 224 | return 225 | } 226 | 227 | type statusInfo struct { 228 | externalIpAddress string 229 | } 230 | 231 | func (n *upnpNAT) getStatusInfo() (info statusInfo, err error) { 232 | 233 | message := "\r\n" + 234 | "" 235 | 236 | var response *http.Response 237 | response, err = soapRequest(n.serviceURL, "GetStatusInfo", message) 238 | if err != nil { 239 | return 240 | } 241 | 242 | // TODO: Write a soap reply parser. It has to eat the Body and envelope tags... 243 | 244 | response.Body.Close() 245 | return 246 | } 247 | 248 | func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { 249 | info, err := n.getStatusInfo() 250 | if err != nil { 251 | return 252 | } 253 | addr = net.ParseIP(info.externalIpAddress) 254 | return 255 | } 256 | 257 | func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) { 258 | // A single concatenation would break ARM compilation. 259 | message := "\r\n" + 260 | "" + strconv.Itoa(externalPort) 261 | message += "" + protocol + "" 262 | message += "" + strconv.Itoa(internalPort) + "" + 263 | "" + n.ourIP + "" + 264 | "1" 265 | message += description + 266 | "" + strconv.Itoa(timeout) + 267 | "" 268 | 269 | var response *http.Response 270 | response, err = soapRequest(n.serviceURL, "AddPortMapping", message) 271 | if err != nil { 272 | return 273 | } 274 | 275 | // TODO: check response to see if the port was forwarded 276 | // log.Println(message, response) 277 | mappedExternalPort = externalPort 278 | _ = response 279 | return 280 | } 281 | 282 | func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { 283 | 284 | message := "\r\n" + 285 | "" + strconv.Itoa(externalPort) + 286 | "" + protocol + "" + 287 | "" 288 | 289 | var response *http.Response 290 | response, err = soapRequest(n.serviceURL, "DeletePortMapping", message) 291 | if err != nil { 292 | return 293 | } 294 | 295 | // TODO: check response to see if the port was deleted 296 | // log.Println(message, response) 297 | _ = response 298 | return 299 | } 300 | -------------------------------------------------------------------------------- /uri.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "crypto/sha1" 5 | "errors" 6 | "fmt" 7 | "net/url" 8 | "strings" 9 | ) 10 | 11 | var ( 12 | errNoBtih = errors.New("Magnet URI xt parameter missing the 'urn:btih:' prefix. Not a bittorrent hash link?") 13 | ) 14 | 15 | type Magnet struct { 16 | InfoHashes []string 17 | Names []string 18 | } 19 | 20 | func parseMagnet(s string) (mag Magnet, err error) { 21 | // References: 22 | // - http://bittorrent.org/beps/bep_0009.html 23 | // - http://en.wikipedia.org/wiki/Magnet_URI_scheme 24 | // 25 | // Example bittorrent magnet link: 26 | // 27 | // => magnet:?xt=urn:btih:bbb6db69965af769f664b6636e7914f8735141b3&dn=Ubuntu-12.04-desktop-i386.iso 28 | // 29 | // xt: exact topic. 30 | // ~ urn: uniform resource name. 31 | // ~ btih: bittorrent infohash. 32 | // dn: display name (optional). 33 | // tr: address tracker (optional). 34 | u, err := url.Parse(s) 35 | if err != nil { 36 | return 37 | } 38 | xts, ok := u.Query()["xt"] 39 | if !ok { 40 | err = errors.New(fmt.Sprintf("Magnet URI missing the 'xt' argument: %s", s)) 41 | return 42 | } 43 | 44 | infoHashes := make([]string, 0, len(xts)) 45 | for _, xt := range xts { 46 | s := strings.Split(xt, "urn:btih:") 47 | if len(s) != 2 { 48 | err = errNoBtih 49 | return 50 | } 51 | 52 | ih := s[1] 53 | 54 | // TODO: support base32 encoded hashes, if they still exist. 55 | if len(ih) != sha1.Size*2 { // hex format. 56 | err = errors.New(fmt.Sprintf("Magnet URI contains infohash with unexpected length. Wanted %d, got %d: %v", sha1.Size, len(ih), ih)) 57 | return 58 | } 59 | 60 | infoHashes = append(infoHashes, s[1]) 61 | } 62 | 63 | var names []string 64 | n, ok := u.Query()["dn"] 65 | if ok { 66 | names = n 67 | } 68 | 69 | mag = Magnet{ 70 | InfoHashes: infoHashes, 71 | Names: names, 72 | } 73 | 74 | return 75 | } 76 | -------------------------------------------------------------------------------- /uri_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | type magnetTest struct { 9 | uri string 10 | infoHashes []string 11 | } 12 | 13 | func TestParseMagnet(t *testing.T) { 14 | uris := []magnetTest{ 15 | {uri: "magnet:?xt=urn:btih:bbb6db69965af769f664b6636e7914f8735141b3&dn=Ubuntu-12.04-desktop-i386.iso&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A80&tr=udp%3A%2F%2Ftracker.publicbt.com%3A80&tr=udp%3A%2F%2Ftracker.istole.it%3A6969&tr=udp%3A%2F%2Ftracker.ccc.de%3A80", infoHashes: []string{"bbb6db69965af769f664b6636e7914f8735141b3"}}, 16 | } 17 | 18 | for _, u := range uris { 19 | m, err := parseMagnet(u.uri) 20 | if err != nil { 21 | t.Errorf("ParseMagnet failed for uri %v: %v", u.uri, err) 22 | } 23 | if !reflect.DeepEqual(u.infoHashes, m.InfoHashes) { 24 | t.Errorf("ParseMagnet failed, wanted %v, got %v", u.infoHashes, m.InfoHashes) 25 | } 26 | } 27 | } 28 | --------------------------------------------------------------------------------