├── .gitignore
├── .travis.yml
├── LICENSE
├── README.md
├── main.go
├── queryTracker.bash
├── resolveBindIP.go
├── resolveBindIP_test.go
├── test.bash
├── testData
├── a.torrent
└── testFile
├── testdht.bash
├── testswarm.bash
├── testtracker.bash
├── torrent
├── accumulator.go
├── accumulator_test.go
├── bitset.go
├── cache.go
├── cache_test.go
├── choker.go
├── choker_test.go
├── execOnSeeding.go
├── files.go
├── files_test.go
├── hdcache.go
├── hdcache_test.go
├── listen.go
├── lpd.go
├── metainfo.go
├── metainfo_test.go
├── nat.go
├── natpmp.go
├── osfiles.go
├── peer.go
├── peer_test.go
├── pieces.go
├── pieces_test.go
├── proxy.go
├── ramfiles.go
├── sftp.go
├── torrent.go
├── torrentLoop.go
├── trackerClient.go
├── upnp.go
├── uri.go
└── uri_test.go
└── tracker
├── tracker.go
└── tracker_test.go
/.gitignore:
--------------------------------------------------------------------------------
1 | *~
2 | *.[56ao]
3 | Taipei-Torrent
4 | *.exe
5 | testdata/downloads/
6 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: go
2 |
3 | go:
4 | - 1.11.x
5 | - tip
6 |
7 | allowed_failures:
8 | - go: tip
9 |
10 | install:
11 | - go get -d -v ./... && go install -race -v ./...
12 |
13 | script: go test -race -v ./...
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | // Copyright (c) 2010 Jack Palevich. All rights reserved.
2 | //
3 | // Redistribution and use in source and binary forms, with or without
4 | // modification, are permitted provided that the following conditions are
5 | // met:
6 | //
7 | // * Redistributions of source code must retain the above copyright
8 | // notice, this list of conditions and the following disclaimer.
9 | // * Redistributions in binary form must reproduce the above
10 | // copyright notice, this list of conditions and the following disclaimer
11 | // in the documentation and/or other materials provided with the
12 | // distribution.
13 | // * Neither the name of Google Inc. nor the names of its
14 | // contributors may be used to endorse or promote products derived from
15 | // this software without specific prior written permission.
16 | //
17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Taipei Torrent
2 | ==============
3 |
4 | This is a simple command-line-interface BitTorrent client coded in the go
5 | programming language.
6 |
7 | [](https://travis-ci.org/jackpal/Taipei-Torrent)
8 |
9 | Features:
10 | ---------
11 |
12 | + Supports multiple torrent files
13 | + Magnet links
14 | + DHT
15 | + IPv6
16 | + UDP trackers
17 | + UPnP / NAT-PMP automatic firewall configuration
18 | + Socks5 proxy support
19 |
20 | Additional Features:
21 | --------------------
22 |
23 | + It can act as a tracker if you start it with the --createTracker flag
24 | + SFTP file system proxying enables torrenting large torrents on systems with
25 | limited local file storage.
26 |
27 | FAQ:
28 | ----
29 |
30 | Q: Why is it named Taipei Torrent?
31 |
32 | A: I started writing it while visiting beautiful Taipei, Taiwan
33 |
34 | Q: What is the license?
35 |
36 | A: See the LICENSE file.
37 |
38 | Current Status
39 | --------------
40 |
41 | + Tested on Go 1.4.2 and tip.
42 | + Tested on Windows, Linux and Mac OS X.
43 | + People tell me they've run it on Android, too.
44 |
45 | Development Roadmap
46 | -------------------
47 |
48 | + Full UPnP support (need to be able to search for an unused listener port,
49 | detect we have already acquired the port, defend the report against router
50 | reboots, release the listener port when we quit.)
51 | + Clean up source code
52 | + Deal with TODOs
53 | + Perhaps a web-based status UI.
54 |
55 | Download, Install, and Build Instructions
56 | -----------------------------------------
57 |
58 | 1. Download and install the Go tools from http://golang.org
59 |
60 | 2. Use the "go" command to download, install, and build the Taipei-Torrent
61 | app:
62 |
63 | go get github.com/jackpal/Taipei-Torrent
64 |
65 | Usage Instructions
66 | ------------------
67 |
68 | Taipei-Torrent mydownload.torrent
69 | Taipei-Torrent --useDHT "magnet:?xt=urn:btih:bbb6db69965af769f664b6636e7914f8735141b3"
70 |
71 | or
72 |
73 | Taipei-Torrent -help
74 |
75 | Third-party Packages
76 | --------------------
77 |
78 | https://github.com/jackpal/bencode-go - Bencode encoder/decoder
79 |
80 | http://github.com/jackpal/gateway - LAN gateway discovery
81 |
82 | http://github.com/jackpal/go-nat-pmp - NAT-PMP firewall client
83 |
84 | https://github.com/nictuku/dht - Distributed Hash Table
85 |
86 | https://github.com/nictuku/nettools - Network utilities
87 |
88 | https://github.com/pkg/sftp - SFTP protocol
89 |
90 |
91 | Google+ Community
92 | -----------------
93 |
94 | https://plus.google.com/u/0/communities/100997865549971977580
95 |
96 | Other Notable Go BitTorrent Implementations
97 | -------------------------------------------
98 |
99 | I haven't used these, but they may be worth checking out:
100 |
101 | https://github.com/anacrolix/torrent
102 |
103 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "log"
6 | "math"
7 | "math/rand"
8 | "os"
9 | "os/signal"
10 | "path"
11 | "runtime/debug"
12 | "runtime/pprof"
13 | "time"
14 |
15 | "github.com/jackpal/Taipei-Torrent/torrent"
16 | "github.com/jackpal/Taipei-Torrent/tracker"
17 | "golang.org/x/net/proxy"
18 | )
19 |
20 | var (
21 | cpuprofile = flag.String("cpuprofile", "", "If not empty, collects CPU profile samples and writes the profile to the given file before the program exits")
22 | memprofile = flag.String("memprofile", "", "If not empty, writes memory heap allocations to the given file before the program exits")
23 | createTorrent = flag.String("createTorrent", "", "If not empty, creates a torrent file from the given root. Writes to stdout")
24 | createTracker = flag.String("createTracker", "", "Creates a tracker serving the given torrent file on the given address. Example --createTracker=:8080 to serve on port 8080.")
25 |
26 | port = flag.Int("port", 7777, "Port to listen on. 0 means pick random port. Note that 6881 is blacklisted by some trackers.")
27 | fileDir = flag.String("fileDir", ".", "path to directory where files are stored")
28 | seedRatio = flag.Float64("seedRatio", math.Inf(0), "Seed until ratio >= this value before quitting.")
29 | useDeadlockDetector = flag.Bool("useDeadlockDetector", false, "Panic and print stack dumps when the program is stuck.")
30 | useLPD = flag.Bool("useLPD", false, "Use Local Peer Discovery")
31 | useUPnP = flag.Bool("useUPnP", false, "Use UPnP to open port in firewall.")
32 | useNATPMP = flag.Bool("useNATPMP", false, "Use NAT-PMP to open port in firewall.")
33 | gateway = flag.String("gateway", "", "IP Address of gateway.")
34 | useDHT = flag.Bool("useDHT", false, "Use DHT to get peers.")
35 | trackerlessMode = flag.Bool("trackerlessMode", false, "Do not get peers from the tracker. Good for testing DHT mode.")
36 | proxyAddress = flag.String("proxyAddress", "", "Address of a SOCKS5 proxy to use.")
37 | initialCheck = flag.Bool("initialCheck", true, "Do an initial hash check on files when adding torrents.")
38 | useSFTP = flag.String("useSFTP", "", "SFTP connection string, to store torrents over SFTP. e.g. 'username:password@192.168.1.25:22/path/'")
39 | useRamCache = flag.Int("useRamCache", 0, "Size in MiB of cache in ram, to reduce traffic on torrent storage.")
40 | useHdCache = flag.Int("useHdCache", 0, "Size in MiB of cache in OS temp directory, to reduce traffic on torrent storage.")
41 | execOnSeeding = flag.String("execOnSeeding", "", "Command to execute when torrent has fully downloaded and has begun seeding.")
42 | quickResume = flag.Bool("quickResume", false, "Save torrenting data to resume faster. '-initialCheck' should be set to false, to prevent hash check on resume.")
43 | maxActive = flag.Int("maxActive", 16, "How many torrents should be active at a time. Torrents added beyond this value are queued.")
44 | memoryPerTorrent = flag.Int("memoryPerTorrent", -1, "Maximum memory (in MiB) per torrent used for Active Pieces. 0 means minimum. -1 (default) means unlimited.")
45 | )
46 |
47 | func parseTorrentFlags() (flags *torrent.TorrentFlags, err error) {
48 | dialer, err := dialerFromFlags()
49 | if err != nil {
50 | return
51 | }
52 | flags = &torrent.TorrentFlags{
53 | Dial: dialer,
54 | Port: portFromFlags(),
55 | FileDir: *fileDir,
56 | SeedRatio: *seedRatio,
57 | UseDeadlockDetector: *useDeadlockDetector,
58 | UseLPD: *useLPD,
59 | UseDHT: *useDHT,
60 | UseUPnP: *useUPnP,
61 | UseNATPMP: *useNATPMP,
62 | TrackerlessMode: *trackerlessMode,
63 | // IP address of gateway
64 | Gateway: *gateway,
65 | InitialCheck: *initialCheck,
66 | FileSystemProvider: fsproviderFromFlags(),
67 | Cacher: cacheproviderFromFlags(),
68 | ExecOnSeeding: *execOnSeeding,
69 | QuickResume: *quickResume,
70 | MaxActive: *maxActive,
71 | MemoryPerTorrent: *memoryPerTorrent,
72 | }
73 | return
74 | }
75 |
76 | func portFromFlags() int {
77 | if *port != 0 {
78 | return *port
79 | }
80 |
81 | rr := rand.New(rand.NewSource(time.Now().UnixNano()))
82 | return rr.Intn(48000) + 1025
83 | }
84 |
85 | func cacheproviderFromFlags() torrent.CacheProvider {
86 | if (*useRamCache) > 0 && (*useHdCache) > 0 {
87 | log.Panicln("Only one cache at a time, please.")
88 | }
89 |
90 | if (*useRamCache) > 0 {
91 | return torrent.NewRamCacheProvider(*useRamCache)
92 | }
93 |
94 | if (*useHdCache) > 0 {
95 | return torrent.NewHdCacheProvider(*useHdCache)
96 | }
97 | return nil
98 | }
99 |
100 | func fsproviderFromFlags() torrent.FsProvider {
101 | if len(*useSFTP) > 0 {
102 | return torrent.NewSftpFsProvider(*useSFTP)
103 | }
104 | return torrent.OsFsProvider{}
105 | }
106 |
107 | func dialerFromFlags() (proxy.Dialer, error) {
108 | if len(*proxyAddress) > 0 {
109 | return proxy.SOCKS5("tcp", string(*proxyAddress), nil, &proxy.Direct)
110 | }
111 | return proxy.FromEnvironment(), nil
112 | }
113 |
114 | func main() {
115 | flag.Usage = usage
116 | flag.Parse()
117 |
118 | if *createTorrent != "" {
119 | err := torrent.WriteMetaInfoBytes(*createTorrent, *createTracker, os.Stdout)
120 | if err != nil {
121 | log.Fatal("Could not create torrent file:", err)
122 | }
123 | return
124 | }
125 |
126 | if *createTracker != "" {
127 | err := startTracker(*createTracker, flag.Args())
128 | if err != nil {
129 | log.Fatal("Tracker returned error:", err)
130 | }
131 | return
132 | }
133 |
134 | args := flag.Args()
135 | narg := flag.NArg()
136 | if narg < 1 {
137 | log.Println("Too few arguments. Torrent file or torrent URL required.")
138 | usage()
139 | }
140 |
141 | torrentFlags, err := parseTorrentFlags()
142 | if err != nil {
143 | log.Fatal("Could not parse flags:", err)
144 | }
145 |
146 | if *cpuprofile != "" {
147 | cpuf, err := os.Create(*cpuprofile)
148 | if err != nil {
149 | log.Fatal(err)
150 | }
151 | pprof.StartCPUProfile(cpuf)
152 | defer pprof.StopCPUProfile()
153 | }
154 |
155 | if *memprofile != "" {
156 | defer func(file string) {
157 | memf, err := os.Create(file)
158 | if err != nil {
159 | log.Fatal(err)
160 | }
161 | pprof.WriteHeapProfile(memf)
162 | }(*memprofile)
163 | }
164 |
165 | if (*memoryPerTorrent) >= 0 { //User is worried about memory use.
166 | debug.SetGCPercent(20) //Set the GC to clear memory more often.
167 | }
168 |
169 | log.Println("Starting.")
170 |
171 | err = torrent.RunTorrents(torrentFlags, args)
172 | if err != nil {
173 | log.Fatal("Could not run torrents", args, err)
174 | }
175 | }
176 |
177 | func usage() {
178 | log.Printf("usage: torrent.Torrent [options] (torrent-file | torrent-url)")
179 |
180 | flag.PrintDefaults()
181 | os.Exit(2)
182 | }
183 |
184 | func startTracker(addr string, torrentFiles []string) (err error) {
185 | t := tracker.NewTracker()
186 | // TODO(jackpal) Allow caller to choose port number
187 | t.Addr = addr
188 | dial, err := dialerFromFlags()
189 | if err != nil {
190 | return
191 | }
192 | for _, torrentFile := range torrentFiles {
193 | var metaInfo *torrent.MetaInfo
194 | metaInfo, err = torrent.GetMetaInfo(dial, torrentFile)
195 | if err != nil {
196 | return
197 | }
198 | name := metaInfo.Info.Name
199 | if name == "" {
200 | name = path.Base(torrentFile)
201 | }
202 | err = t.Register(metaInfo.InfoHash, name)
203 | if err != nil {
204 | return
205 | }
206 | }
207 | go func() {
208 | quitChan := listenSigInt()
209 | select {
210 | case <-quitChan:
211 | log.Printf("got control-C")
212 | t.Quit()
213 | }
214 | }()
215 |
216 | err = t.ListenAndServe()
217 | if err != nil {
218 | return
219 | }
220 | return
221 | }
222 |
223 | func listenSigInt() chan os.Signal {
224 | c := make(chan os.Signal, 1)
225 | signal.Notify(c, os.Interrupt, os.Kill)
226 | return c
227 | }
228 |
--------------------------------------------------------------------------------
/queryTracker.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Test the tracker
3 | set -e
4 |
5 | TRACKER=http://127.0.0.1:8080
6 | ANNOUNCE=$TRACKER/announce
7 | SCRAPE=$TRACKER/scrape
8 |
9 | curl $SCRAPE
10 |
11 | # 3 peers
12 | curl $ANNOUNCE?port=6001\&compact=1\&uploaded=0\&downloaded=0\&left=123\&event=started\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
13 | curl $ANNOUNCE?port=6002\&compact=1\&uploaded=0\&downloaded=0\&left=123\&event=startedx\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
14 | curl $ANNOUNCE?port=6003\&compact=1\&uploaded=0\&downloaded=0\&left=123\&event=started\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
15 |
16 | # check in again
17 | curl $ANNOUNCE?port=6001\&compact=1\&uploaded=10\&downloaded=0\&left=123\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
18 | curl $ANNOUNCE?port=6002\&compact=1\&uploaded=10\&downloaded=0\&left=123\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
19 | curl $ANNOUNCE?port=6003\&compact=1\&uploaded=10\&downloaded=0\&left=123\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
20 |
21 | # Completed event
22 | curl $ANNOUNCE?port=6001\&compact=1\&uploaded=10\&downloaded=0\&left=0\&event=completed\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
23 |
24 | # Stopped event
25 | curl $ANNOUNCE?port=6001\&compact=1\&uploaded=10\&downloaded=0\&left=0\&event=stopped\&info_hash=%a4%1d%1f%89\(dT%b1%8d%8dL%b2%e0/%fe%11Xtv%c4
26 |
27 | curl $SCRAPE
28 |
--------------------------------------------------------------------------------
/resolveBindIP.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net"
7 | "regexp"
8 | "strconv"
9 | )
10 |
11 | // resolveBindIPAddr resolves either an ip address or an interface name or an interface name followed by [N].
12 | // Empty string resolves as nil.
13 | func resolveBindIPAddr(bindIP string) (resolved *net.IPAddr, err error) {
14 | // Parse addresses of form en0[2]
15 | matches := regexp.MustCompile(`(.+)\[(\d+)\]`).FindStringSubmatch(bindIP)
16 | if matches != nil {
17 | var i int
18 | i, err = strconv.Atoi(matches[2])
19 | if err != nil {
20 | return
21 | }
22 | return resolveInterfaceIndex(matches[1], i)
23 | }
24 | // Parse addresses of form en0
25 | resolved, err = resolveInterfaceIndex(bindIP, 0)
26 | if err == nil {
27 | return
28 | }
29 | // Parse addresses of form 192.168.0.10 or IPV6 equivalent.
30 | return net.ResolveIPAddr("ip", bindIP)
31 | }
32 |
33 | func resolveInterfaceIndex(ifName string, i int) (resolved *net.IPAddr, err error) {
34 | netInterface, err := net.InterfaceByName(ifName)
35 | if err != nil {
36 | return
37 | }
38 | addrs, err := netInterface.Addrs()
39 | if err != nil {
40 | return
41 | }
42 | if i < 0 || i >= len(addrs) {
43 | err = fmt.Errorf("Address index %d out of range 0..%d for interface %v", i, len(addrs), ifName)
44 | return
45 | }
46 | addr := addrs[i]
47 | if addrIPNet, ok := addr.(*net.IPNet); ok {
48 | // addr is a net.IPNet, convert it into a net.IPAddr
49 | resolved = &net.IPAddr{IP: addrIPNet.IP}
50 | } else {
51 | err = fmt.Errorf("%s[%d] is not an IP address", ifName, i)
52 | log.Println(err)
53 | }
54 | return
55 | }
56 |
--------------------------------------------------------------------------------
/resolveBindIP_test.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net"
6 | "strings"
7 | "testing"
8 | )
9 |
10 | type bindIPTestData []struct {
11 | in string
12 | out string
13 | }
14 |
15 | var bindIPTests = bindIPTestData{
16 | {"", ""},
17 | {"junk", "error"},
18 | {"junk[999]", "error"},
19 | {"192.168.1.10", "192.168.1.10"},
20 | {"2001:db8:85a3::8a2e:370:7334", "2001:db8:85a3::8a2e:370:7334"},
21 | }
22 |
23 | func TestResolveBindIP(t *testing.T) {
24 | for _, tt := range bindIPTests {
25 | resolveBindIPAddrCase(t, tt.in, tt.out)
26 | }
27 | }
28 |
29 | func resolveBindIPAddrCase(t *testing.T, in string, out string) {
30 | bindIPAddr, err := resolveBindIPAddr(in)
31 | if err != nil {
32 | if out != "error" {
33 | t.Errorf("resolveBindIPAddr(%q) => error %v, want %q", in, err, out)
34 | }
35 | } else {
36 | bindIP := bindIPAddr.String()
37 | if bindIP != out {
38 | t.Errorf("resolveBindIPAddr(%q) => %v, want %q", in, bindIP, out)
39 | }
40 | }
41 | }
42 |
43 | func TestDynamicParseBindIP(t *testing.T) {
44 | netIFs, err := net.Interfaces()
45 | if err != nil {
46 | t.Errorf("net.Interfaces() => %v", err)
47 | }
48 | if len(netIFs) == 0 {
49 | t.Errorf("net.Interfaces() => no interfaces")
50 | }
51 | netIF := netIFs[0]
52 | addrs, err := netIFs[0].Addrs()
53 | if err != nil {
54 | t.Errorf("%v.Addrs() => %v", netIF.Name, err)
55 | }
56 | if len(addrs) == 0 {
57 | t.Errorf("%v.Addrs() => no addresses", netIF.Name)
58 | }
59 | resolveBindIPAddrCase(t, netIF.Name, stripMask(addrs[0].String()))
60 | resolveBindIPAddrCase(t, fmt.Sprintf("%v[%d]", netIF.Name, -1), "error")
61 | resolveBindIPAddrCase(t, fmt.Sprintf("%v[%d]", netIF.Name, len(addrs)), "error")
62 | for i, addr := range addrs {
63 | resolveBindIPAddrCase(t, fmt.Sprintf("%v[%d]", netIF.Name, i), stripMask(addr.String()))
64 | }
65 | }
66 |
67 | func stripMask(ipnet string) string {
68 | return strings.SplitN(ipnet, "/", 2)[0]
69 | }
70 |
--------------------------------------------------------------------------------
/test.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Test the torrent
3 | set -e
4 | echo "Building..."
5 | go clean
6 | go build -race
7 | echo "Running unit tests..."
8 | cd torrent
9 | go test -race
10 | cd ..
11 | echo "Running a torrent. Type control-C to exit"
12 | # ./Taipei-Torrent -fileDir=testData/downloads -port=63881 -useUPnP=true testData/a.torrent
13 | # ./Taipei-Torrent -fileDir=testData/downloads -port=63881 -useNATPMP -gateway 192.168.1.1 testData/a.torrent
14 | ./Taipei-Torrent -fileDir=testData/downloads -port=63881 --seedRatio=0 testData/a.torrent
15 |
--------------------------------------------------------------------------------
/testData/a.torrent:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jackpal/Taipei-Torrent/6808fdfe24b4db505476ef48b9e288a5e7398f77/testData/a.torrent
--------------------------------------------------------------------------------
/testData/testFile:
--------------------------------------------------------------------------------
1 | When in the Course of human events it becomes necessary for one people to dissolve the political bands which have connected them with another and to assume among the powers of the earth, the separate and equal station to which the Laws of Nature and of Nature's God entitle them, a decent respect to the opinions of mankind requires that they should declare the causes which impel them to the separation.
2 |
3 | We hold these truths to be self-evident, that all men are created equal, that they are endowed by their Creator with certain unalienable Rights, that among these are Life, Liberty and the pursuit of Happiness. — That to secure these rights, Governments are instituted among Men, deriving their just powers from the consent of the governed, — That whenever any Form of Government becomes destructive of these ends, it is the Right of the People to alter or to abolish it, and to institute new Government, laying its foundation on such principles and organizing its powers in such form, as to them shall seem most likely to effect their Safety and Happiness. Prudence, indeed, will dictate that Governments long established should not be changed for light and transient causes; and accordingly all experience hath shewn that mankind are more disposed to suffer, while evils are sufferable than to right themselves by abolishing the forms to which they are accustomed. But when a long train of abuses and usurpations, pursuing invariably the same Object evinces a design to reduce them under absolute Despotism, it is their right, it is their duty, to throw off such Government, and to provide new Guards for their future security. — Such has been the patient sufferance of these Colonies; and such is now the necessity which constrains them to alter their former Systems of Government. The history of the present King of Great Britain is a history of repeated injuries and usurpations, all having in direct object the establishment of an absolute Tyranny over these States. To prove this, let Facts be submitted to a candid world.
4 |
5 | He has refused his Assent to Laws, the most wholesome and necessary for the public good.
6 |
7 | He has forbidden his Governors to pass Laws of immediate and pressing importance, unless suspended in their operation till his Assent should be obtained; and when so suspended, he has utterly neglected to attend to them.
8 |
9 | He has refused to pass other Laws for the accommodation of large districts of people, unless those people would relinquish the right of Representation in the Legislature, a right inestimable to them and formidable to tyrants only.
10 |
11 | He has called together legislative bodies at places unusual, uncomfortable, and distant from the depository of their Public Records, for the sole purpose of fatiguing them into compliance with his measures.
12 |
13 | He has dissolved Representative Houses repeatedly, for opposing with manly firmness his invasions on the rights of the people.
14 |
15 | He has refused for a long time, after such dissolutions, to cause others to be elected, whereby the Legislative Powers, incapable of Annihilation, have returned to the People at large for their exercise; the State remaining in the mean time exposed to all the dangers of invasion from without, and convulsions within.
16 |
17 | He has endeavoured to prevent the population of these States; for that purpose obstructing the Laws for Naturalization of Foreigners; refusing to pass others to encourage their migrations hither, and raising the conditions of new Appropriations of Lands.
18 |
19 | He has obstructed the Administration of Justice by refusing his Assent to Laws for establishing Judiciary Powers.
20 |
21 | He has made Judges dependent on his Will alone for the tenure of their offices, and the amount and payment of their salaries.
22 |
23 | He has erected a multitude of New Offices, and sent hither swarms of Officers to harass our people and eat out their substance.
24 |
25 | He has kept among us, in times of peace, Standing Armies without the Consent of our legislatures.
26 |
27 | He has affected to render the Military independent of and superior to the Civil Power.
28 |
29 | He has combined with others to subject us to a jurisdiction foreign to our constitution, and unacknowledged by our laws; giving his Assent to their Acts of pretended Legislation:
30 |
31 | For quartering large bodies of armed troops among us:
32 |
33 | For protecting them, by a mock Trial from punishment for any Murders which they should commit on the Inhabitants of these States:
34 |
35 | For cutting off our Trade with all parts of the world:
36 |
37 | For imposing Taxes on us without our Consent:
38 |
39 | For depriving us in many cases, of the benefit of Trial by Jury:
40 |
41 | For transporting us beyond Seas to be tried for pretended offences:
42 |
43 | For abolishing the free System of English Laws in a neighbouring Province, establishing therein an Arbitrary government, and enlarging its Boundaries so as to render it at once an example and fit instrument for introducing the same absolute rule into these Colonies
44 |
45 | For taking away our Charters, abolishing our most valuable Laws and altering fundamentally the Forms of our Governments:
46 |
47 | For suspending our own Legislatures, and declaring themselves invested with power to legislate for us in all cases whatsoever.
48 |
49 | He has abdicated Government here, by declaring us out of his Protection and waging War against us.
50 |
51 | He has plundered our seas, ravaged our coasts, burnt our towns, and destroyed the lives of our people.
52 |
53 | He is at this time transporting large Armies of foreign Mercenaries to compleat the works of death, desolation, and tyranny, already begun with circumstances of Cruelty & Perfidy scarcely paralleled in the most barbarous ages, and totally unworthy the Head of a civilized nation.
54 |
55 | He has constrained our fellow Citizens taken Captive on the high Seas to bear Arms against their Country, to become the executioners of their friends and Brethren, or to fall themselves by their Hands.
56 |
57 | He has excited domestic insurrections amongst us, and has endeavoured to bring on the inhabitants of our frontiers, the merciless Indian Savages whose known rule of warfare, is an undistinguished destruction of all ages, sexes and conditions.
58 |
59 | In every stage of these Oppressions We have Petitioned for Redress in the most humble terms: Our repeated Petitions have been answered only by repeated injury. A Prince, whose character is thus marked by every act which may define a Tyrant, is unfit to be the ruler of a free people.
60 |
61 | Nor have We been wanting in attentions to our British brethren. We have warned them from time to time of attempts by their legislature to extend an unwarrantable jurisdiction over us. We have reminded them of the circumstances of our emigration and settlement here. We have appealed to their native justice and magnanimity, and we have conjured them by the ties of our common kindred to disavow these usurpations, which would inevitably interrupt our connections and correspondence. They too have been deaf to the voice of justice and of consanguinity. We must, therefore, acquiesce in the necessity, which denounces our Separation, and hold them, as we hold the rest of mankind, Enemies in War, in Peace Friends.
62 |
63 | We, therefore, the Representatives of the united States of America, in General Congress, Assembled, appealing to the Supreme Judge of the world for the rectitude of our intentions, do, in the Name, and by Authority of the good People of these Colonies, solemnly publish and declare, That these united Colonies are, and of Right ought to be Free and Independent States, that they are Absolved from all Allegiance to the British Crown, and that all political connection between them and the State of Great Britain, is and ought to be totally dissolved; and that as Free and Independent States, they have full Power to levy War, conclude Peace, contract Alliances, establish Commerce, and to do all other Acts and Things which Independent States may of right do. — And for the support of this Declaration, with a firm reliance on the protection of Divine Providence, we mutually pledge to each other our Lives, our Fortunes, and our sacred Honor.
64 |
--------------------------------------------------------------------------------
/testdht.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Test the torrent
3 |
4 | ./Taipei-Torrent -fileDir=testData/downloads -port 63881 -useUPnP=false -useDHT -trackerlessMode=true testData/a.torrent
5 |
--------------------------------------------------------------------------------
/testswarm.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Test the tracker
3 | set -e
4 | echo "Cleaning.."
5 | go clean
6 | echo "Building..."
7 | # Use "install" so Taipei-Torrent gets placed on the command path, so that it
8 | # can be run as part of the test. Not very hermetic.
9 | go install -race ./...
10 | echo "Running unit tests..."
11 | cd torrent
12 | go test -race
13 | cd ..
14 | cd tracker
15 | go test -race
16 | cd ..
17 | echo "Done"
--------------------------------------------------------------------------------
/testtracker.bash:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Test the tracker
3 | set -e
4 | echo "Building..."
5 | go clean
6 | go build -race
7 | echo "Running unit tests..."
8 | cd torrent
9 | go test -race
10 | cd ..
11 | cd tracker
12 | go test -race
13 | cd ..
14 | echo "Running a tracker. Type control-C to exit"
15 | ./Taipei-Torrent -createTracker -port=63881 testData/a.torrent
16 |
--------------------------------------------------------------------------------
/torrent/accumulator.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import "time"
4 |
5 | // An accumulator that keeps track of the rate of increase.
6 | type Accumulator struct {
7 | maxRatePeriod time.Duration
8 | rateSince time.Time
9 | last time.Time
10 | rate float64
11 | total int64
12 | }
13 |
14 | func NewAccumulator(now time.Time, maxRatePeriod time.Duration) (acc *Accumulator) {
15 | acc = &Accumulator{}
16 | acc.maxRatePeriod = maxRatePeriod
17 | acc.rateSince = now.Add(time.Second * -1)
18 | acc.last = acc.rateSince
19 | acc.rate = 0.0
20 | acc.total = 0
21 | return acc
22 | }
23 |
24 | func (a *Accumulator) Add(now time.Time, amount int64) {
25 | a.total += amount
26 | a.rate = (a.rate*float64(a.last.Sub(a.rateSince)) + float64(amount)) /
27 | float64(now.Sub(a.rateSince))
28 | a.last = now
29 | newRateSince := now.Add(-a.maxRatePeriod)
30 | if a.rateSince.Before(newRateSince) {
31 | a.rateSince = newRateSince
32 | }
33 | }
34 |
35 | func (a *Accumulator) GetRate(now time.Time) float64 {
36 | a.Add(now, 0)
37 | return a.GetRateNoUpdate()
38 | }
39 |
40 | func (a *Accumulator) GetRateNoUpdate() float64 {
41 | return a.rate * float64(time.Second)
42 | }
43 |
44 | func (a *Accumulator) DurationUntilRate(now time.Time, newRate float64) time.Duration {
45 | rate := a.rate
46 | if rate <= newRate {
47 | return time.Duration(0)
48 | }
49 | dt := float64(now.Sub(a.rateSince))
50 | return time.Duration(((rate * dt) / newRate) - dt)
51 | }
52 |
53 | func (a *Accumulator) getTotal() int64 {
54 | return a.total
55 | }
56 |
--------------------------------------------------------------------------------
/torrent/accumulator_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "testing"
5 | "time"
6 | )
7 |
8 | func TestAccumulator(t *testing.T) {
9 | start := time.Now()
10 | a := NewAccumulator(start, time.Minute)
11 | checkAcc(t, a, 0)
12 | a.Add(start, 1)
13 | checkAcc(t, a, 1)
14 | a.Add(start, 10)
15 | checkAcc(t, a, 11)
16 | checkRate(t, start, a, 11)
17 | middle := start.Add(time.Second)
18 | checkRate(t, middle, a, 5.5)
19 | a.Add(middle, 10)
20 | checkAcc(t, a, 21)
21 | checkRate(t, middle, a, 10.5)
22 | }
23 |
24 | func checkAcc(t *testing.T, a *Accumulator, expectedTotal int64) {
25 | total := a.getTotal()
26 | if total != expectedTotal {
27 | t.Errorf("Expected total %d actual %d", expectedTotal, total)
28 | }
29 | }
30 |
31 | func checkRate(t *testing.T, now time.Time, a *Accumulator, expectedRate float64) {
32 | rate := a.GetRate(now)
33 | if rate != expectedRate {
34 | t.Errorf("a.GetRate(%v) = %g. Expected %g", now, rate, expectedRate)
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/torrent/bitset.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // As defined by the bittorrent protocol, this bitset is big-endian, such that
8 | // the high bit of the first byte is block 0
9 |
10 | type Bitset struct {
11 | b []byte
12 | n int
13 | endIndex int
14 | endMask byte // Which bits of the last byte are valid
15 | }
16 |
17 | func NewBitset(n int) *Bitset {
18 | endIndex, endOffset := n>>3, n&7
19 | endMask := ^byte(255 >> byte(endOffset))
20 | if endOffset == 0 {
21 | endIndex = -1
22 | }
23 | return &Bitset{make([]byte, (n+7)>>3), n, endIndex, endMask}
24 | }
25 |
26 | // Creates a new bitset from a given byte stream. Returns nil if the
27 | // data is invalid in some way.
28 | func NewBitsetFromBytes(n int, data []byte) *Bitset {
29 | bitset := NewBitset(n)
30 | if len(bitset.b) != len(data) {
31 | return nil
32 | }
33 | copy(bitset.b, data)
34 | if bitset.endIndex >= 0 && bitset.b[bitset.endIndex]&(^bitset.endMask) != 0 {
35 | return nil
36 | }
37 | return bitset
38 | }
39 |
40 | func (b *Bitset) Set(index int) {
41 | b.checkRange(index)
42 | b.b[index>>3] |= byte(128 >> byte(index&7))
43 | }
44 |
45 | func (b *Bitset) Clear(index int) {
46 | b.checkRange(index)
47 | b.b[index>>3] &= ^byte(128 >> byte(index&7))
48 | }
49 |
50 | func (b *Bitset) IsSet(index int) bool {
51 | b.checkRange(index)
52 | return (b.b[index>>3] & byte(128>>byte(index&7))) != 0
53 | }
54 |
55 | func (b *Bitset) Len() int {
56 | return b.n
57 | }
58 |
59 | func (b *Bitset) InRange(index int) bool {
60 | return 0 <= index && index < b.n
61 | }
62 |
63 | func (b *Bitset) checkRange(index int) {
64 | if !b.InRange(index) {
65 | panic(fmt.Sprintf("Index %d out of range 0..%d.", index, b.n))
66 | }
67 | }
68 |
69 | func (b *Bitset) AndNot(b2 *Bitset) {
70 | if b.n != b2.n {
71 | panic(fmt.Sprintf("Unequal bitset sizes %d != %d", b.n, b2.n))
72 | }
73 | for i := 0; i < len(b.b); i++ {
74 | b.b[i] = b.b[i] & ^b2.b[i]
75 | }
76 | b.clearEnd()
77 | }
78 |
79 | func (b *Bitset) clearEnd() {
80 | if b.endIndex >= 0 {
81 | b.b[b.endIndex] &= b.endMask
82 | }
83 | }
84 |
85 | func (b *Bitset) IsEndValid() bool {
86 | if b.endIndex >= 0 {
87 | return (b.b[b.endIndex] & b.endMask) == 0
88 | }
89 | return true
90 | }
91 |
92 | // TODO: Make this fast
93 | func (b *Bitset) FindNextSet(index int) int {
94 | for i := index; i < b.n; i++ {
95 | if (b.b[i>>3] & byte(128>>byte(i&7))) != 0 {
96 | return i
97 | }
98 | }
99 | return -1
100 | }
101 |
102 | // TODO: Make this fast
103 | func (b *Bitset) FindNextClear(index int) int {
104 | for i := index; i < b.n; i++ {
105 | if (b.b[i>>3] & byte(128>>byte(i&7))) == 0 {
106 | return i
107 | }
108 | }
109 | return -1
110 | }
111 |
112 | func (b *Bitset) Bytes() []byte {
113 | return b.b
114 | }
115 |
--------------------------------------------------------------------------------
/torrent/cache.go:
--------------------------------------------------------------------------------
1 | // cache
2 | package torrent
3 |
4 | import (
5 | "log"
6 | "math"
7 | "sort"
8 | "sync/atomic"
9 | "time"
10 | )
11 |
12 | type CacheProvider interface {
13 | NewCache(infohash string, numPieces int, pieceLength int64, totalSize int64, undelying FileStore) FileStore
14 | }
15 |
16 | type inttuple struct {
17 | a, b int
18 | }
19 |
20 | type accessTime struct {
21 | index int
22 | atime time.Time
23 | }
24 | type byTime []accessTime
25 |
26 | func (a byTime) Len() int { return len(a) }
27 | func (a byTime) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
28 | func (a byTime) Less(i, j int) bool { return a[i].atime.Before(a[j].atime) }
29 |
30 | //This provider creates a ram cache for each torrent.
31 | //Each time a cache is created or closed, all cache
32 | //are recalculated so they total <= capacity (in MiB).
33 | type RamCacheProvider struct {
34 | capacity int
35 | caches map[string]*RamCache
36 | }
37 |
38 | func NewRamCacheProvider(capacity int) CacheProvider {
39 | rc := &RamCacheProvider{capacity, make(map[string]*RamCache)}
40 | return rc
41 | }
42 |
43 | func (r *RamCacheProvider) NewCache(infohash string, numPieces int, pieceSize int64, torrentLength int64, underlying FileStore) FileStore {
44 | i := uint32(1)
45 | rc := &RamCache{pieceSize: pieceSize, atimes: make([]time.Time, numPieces), store: make([][]byte, numPieces),
46 | torrentLength: torrentLength, cacheProvider: r, capacity: &i, infohash: infohash, underlying: underlying}
47 |
48 | r.caches[infohash] = rc
49 | r.rebalance()
50 | return rc
51 | }
52 |
53 | //Rebalance the cache capacity allocations; has to be called on each cache creation or deletion.
54 | func (r *RamCacheProvider) rebalance() {
55 | //Cache size is a diminishing return thing:
56 | //The more of it a torrent has, the less of a difference additional cache makes.
57 | //Thus, instead of scaling the distribution lineraly with torrent size, we'll do it by square-root
58 | log.Println("Rebalancing caches...")
59 | var scalingTotal float64
60 | sqrts := make(map[string]float64)
61 | for i, cache := range r.caches {
62 | sqrts[i] = math.Sqrt(float64(cache.torrentLength))
63 | scalingTotal += sqrts[i]
64 | }
65 |
66 | scalingFactor := float64(r.capacity*1024*1024) / scalingTotal
67 | for i, cache := range r.caches {
68 | newCap := int64(math.Floor(scalingFactor * sqrts[i] / float64(cache.pieceSize)))
69 | if newCap == 0 {
70 | newCap = 1 //Something's better than nothing!
71 | }
72 | log.Printf("Setting cache '%x' to new capacity %v (%v MiB)", cache.infohash, newCap, float32(newCap*cache.pieceSize)/float32(1024*1024))
73 | cache.setCapacity(uint32(newCap))
74 | }
75 |
76 | for _, cache := range r.caches {
77 | cache.trim()
78 | }
79 | }
80 |
81 | func (r *RamCacheProvider) cacheClosed(infohash string) {
82 | delete(r.caches, infohash)
83 | r.rebalance()
84 | }
85 |
86 | //'pieceSize' is the size of the average piece
87 | //'capacity' is how many pieces the cache can hold
88 | //'actualUsage' is how many pieces the cache has at the moment
89 | //'atime' is an array of access times for each stored box
90 | //'store' is an array of "boxes" ([]byte of 1 piece each)
91 | //'torrentLength' is the number of bytes in the torrent
92 | //'cacheProvider' is a pointer to the cacheProvider that created this cache
93 | //'infohash' is the infohash of the torrent
94 | type RamCache struct {
95 | pieceSize int64
96 | capacity *uint32 //Access only through getter/setter
97 | actualUsage int
98 | atimes []time.Time
99 | store [][]byte
100 | torrentLength int64
101 | cacheProvider *RamCacheProvider
102 | infohash string
103 | underlying FileStore
104 | }
105 |
106 | func (r *RamCache) Close() error {
107 | r.cacheProvider.cacheClosed(r.infohash)
108 | r.store = nil
109 | return r.underlying.Close()
110 | }
111 |
112 | func (r *RamCache) ReadAt(p []byte, off int64) (retInt int, retErr error) {
113 | boxI := off / r.pieceSize
114 | boxOff := off % r.pieceSize
115 |
116 | for i := 0; i < len(p); {
117 |
118 | var buffer []byte
119 | if r.store[boxI] != nil { //in cache
120 | buffer = r.store[boxI]
121 | r.atimes[boxI] = time.Now()
122 | } else { //not in cache
123 | bufferLength := r.pieceSize
124 | bufferOffset := boxI * r.pieceSize
125 |
126 | if bufferLength > r.torrentLength-bufferOffset { //do we want the last, smaller than usual piece?
127 | bufferLength = r.torrentLength - bufferOffset
128 | }
129 |
130 | buffer = make([]byte, bufferLength)
131 | r.underlying.ReadAt(buffer, bufferOffset)
132 | r.addBox(buffer, int(boxI))
133 | }
134 |
135 | i += copy(p[i:], buffer[boxOff:])
136 | boxI++
137 | boxOff = 0
138 | }
139 |
140 | retInt = len(p)
141 | return
142 | }
143 |
144 | func (r *RamCache) WritePiece(p []byte, boxI int) (n int, err error) {
145 |
146 | if r.store[boxI] != nil { //box exists, our work is done
147 | log.Println("Got a WritePiece for a piece we should already have:", boxI)
148 | return
149 | }
150 |
151 | r.addBox(p, boxI)
152 |
153 | //TODO: Maybe goroutine the calls to underlying?
154 | return r.underlying.WritePiece(p, boxI)
155 | }
156 |
157 | func (r *RamCache) addBox(p []byte, boxI int) {
158 | r.store[boxI] = p
159 | r.atimes[boxI] = time.Now()
160 | r.actualUsage++
161 | r.trim()
162 | }
163 |
164 | func (r *RamCache) removeBox(boxI int) {
165 | r.store[boxI] = nil
166 | r.actualUsage--
167 | }
168 |
169 | func (r *RamCache) getCapacity() int {
170 | return int(atomic.LoadUint32(r.capacity))
171 | }
172 |
173 | func (r *RamCache) setCapacity(capacity uint32) {
174 | atomic.StoreUint32(r.capacity, capacity)
175 | }
176 |
177 | //Trim excess data.
178 | func (r *RamCache) trim() {
179 | if r.actualUsage <= r.getCapacity() {
180 | return
181 | }
182 |
183 | //Figure out what's oldest and clear that then
184 | tATA := make([]accessTime, 0, r.actualUsage)
185 |
186 | for i, atime := range r.atimes {
187 | if r.store[i] != nil {
188 | tATA = append(tATA, accessTime{i, atime})
189 | }
190 | }
191 |
192 | sort.Sort(byTime(tATA))
193 |
194 | deficit := r.actualUsage - r.getCapacity()
195 | for i := 0; i < deficit; i++ {
196 | deadBox := tATA[i].index
197 | r.removeBox(deadBox)
198 | }
199 | }
200 |
201 | //Simple utility for dumping a []byte to log.
202 | //It skips over sections of '0', unlike encoding/hex.Dump()
203 | func Dump(buff []byte) {
204 | log.Println("Dumping []byte len=", len(buff))
205 | for i := 0; i < len(buff); i += 16 {
206 | skipLine := true
207 | for j := i; j < len(buff) && j < 16+i; j++ {
208 | if buff[j] != 0 {
209 | skipLine = false
210 | break
211 | }
212 | }
213 | if !skipLine {
214 | log.Printf("%X: %X\n", i, buff[i:i+16])
215 | }
216 | }
217 | log.Println("Done Dumping")
218 | }
219 |
--------------------------------------------------------------------------------
/torrent/cache_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "crypto/sha1"
5 | "encoding/hex"
6 | "io/ioutil"
7 | "log"
8 | "testing"
9 | )
10 |
11 | func TestCachedFileStoreRead(t *testing.T) {
12 | rcp := NewRamCacheProvider(2000)
13 | for _, testFile := range tests {
14 | fs, err := mkFileStore(testFile)
15 | orig, _ := ioutil.ReadFile(testFile.path)
16 | numPieces := len(orig) / 512
17 | if len(orig)%512 > 0 {
18 | numPieces++
19 | }
20 | tC := rcp.NewCache("test", numPieces, 512, int64(len(orig)), fs)
21 | tC.WritePiece(orig[:512], 0)
22 | tC.WritePiece(orig[512:1024], 1)
23 |
24 | if err != nil {
25 | t.Fatal(err)
26 | }
27 | ret := make([]byte, testFile.fileLen)
28 | _, err = tC.ReadAt(ret, 0)
29 | if err != nil {
30 | t.Fatal(err)
31 | }
32 | wantedsum := sha1.Sum(orig[:testFile.fileLen])
33 | sum1Str := hex.EncodeToString(wantedsum[0:])
34 | gotsum := sha1.Sum(ret)
35 | sum2Str := hex.EncodeToString(gotsum[0:])
36 | if sum1Str != sum2Str {
37 | t.Errorf("Wanted %v, got %v\n on cache read", sum1Str, sum2Str)
38 | for i := 0; i < len(ret); i++ {
39 | if ret[i] != orig[i] {
40 | log.Println("Found a difference at", i, "wanted", orig[i], "got", ret[i])
41 | break
42 | }
43 | }
44 | }
45 |
46 | ret = make([]byte, testFile.fileLen)
47 | _, err = fs.ReadAt(ret, 0)
48 | if err != nil {
49 | t.Fatal(err)
50 | }
51 | gotsum = sha1.Sum(ret)
52 | sum2Str = hex.EncodeToString(gotsum[0:])
53 | if sum1Str != sum2Str {
54 | t.Errorf("Wanted %v, got %v\n on filestore read", sum1Str, sum2Str)
55 | for i := 0; i < len(ret); i++ {
56 | if ret[i] != orig[i] {
57 | log.Println("Found a difference at", i, "wanted", orig[i], "got", ret[i])
58 | break
59 | }
60 | }
61 | }
62 |
63 | fs.Close()
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/torrent/choker.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "math/rand"
5 | "sort"
6 | )
7 |
8 | // BitTorrent choking policy.
9 |
10 | // The choking policy's view of a peer. For current policies we only care
11 | // about identity and download bandwidth.
12 | type Choker interface {
13 | DownloadBPS() float32 // bps
14 | }
15 |
16 | type ChokePolicy interface {
17 | // Only pass in interested peers.
18 | // mutate the chokers into a list where the first N are to be unchoked.
19 | Choke(chokers []Choker) (unchokeCount int, err error)
20 | }
21 |
22 | // Our naive never-choke policy
23 | type NeverChokePolicy struct{}
24 |
25 | func (n *NeverChokePolicy) Choke(chokers []Choker) (unchokeCount int, err error) {
26 | return len(chokers), nil
27 | }
28 |
29 | // Our interpretation of the classic bittorrent choke policy.
30 | // Expects to be called once every 10 seconds.
31 | // See the section "Choking and optimistic unchoking" in
32 | // https://wiki.theory.org/BitTorrentSpecification
33 | type ClassicChokePolicy struct {
34 | optimisticUnchoker Choker // The choker we unchoked optimistically
35 | counter int // When to choose a new optimisticUnchoker.
36 | }
37 |
38 | type ByDownloadBPS []Choker
39 |
40 | func (a ByDownloadBPS) Len() int {
41 | return len(a)
42 | }
43 |
44 | func (a ByDownloadBPS) Swap(i, j int) {
45 | a[i], a[j] = a[j], a[i]
46 | }
47 |
48 | func (a ByDownloadBPS) Less(i, j int) bool {
49 | return a[i].DownloadBPS() > a[j].DownloadBPS()
50 | }
51 |
52 | const HIGH_BANDWIDTH_SLOTS = 3
53 | const OPTIMISTIC_UNCHOKE_INDEX = HIGH_BANDWIDTH_SLOTS
54 |
55 | // How many cycles of this algorithm before we pick a new optimistic
56 | const OPTIMISTIC_UNCHOKE_COUNT = 3
57 |
58 | func (ccp *ClassicChokePolicy) Choke(chokers []Choker) (unchokeCount int, err error) {
59 | sort.Sort(ByDownloadBPS(chokers))
60 |
61 | optimistIndex := ccp.findOptimist(chokers)
62 | if optimistIndex >= 0 {
63 | if optimistIndex < OPTIMISTIC_UNCHOKE_INDEX {
64 | // Forget optimistic choke
65 | optimistIndex = -1
66 | } else {
67 | ByDownloadBPS(chokers).Swap(OPTIMISTIC_UNCHOKE_INDEX, optimistIndex)
68 | optimistIndex = OPTIMISTIC_UNCHOKE_INDEX
69 | }
70 | }
71 |
72 | if optimistIndex >= 0 {
73 | ccp.counter++
74 | if ccp.counter >= OPTIMISTIC_UNCHOKE_COUNT {
75 | ccp.counter = 0
76 | optimistIndex = -1
77 | }
78 | }
79 |
80 | if optimistIndex < 0 {
81 | candidateCount := len(chokers) - OPTIMISTIC_UNCHOKE_INDEX
82 | if candidateCount > 0 {
83 | candidate := OPTIMISTIC_UNCHOKE_INDEX + rand.Intn(candidateCount)
84 | ByDownloadBPS(chokers).Swap(OPTIMISTIC_UNCHOKE_INDEX, candidate)
85 | ccp.counter = 0
86 | ccp.optimisticUnchoker = chokers[OPTIMISTIC_UNCHOKE_INDEX]
87 | }
88 | }
89 | unchokeCount = OPTIMISTIC_UNCHOKE_INDEX + 1
90 | if unchokeCount > len(chokers) {
91 | unchokeCount = len(chokers)
92 | }
93 | return
94 | }
95 |
96 | func (ccp *ClassicChokePolicy) findOptimist(chokers []Choker) (index int) {
97 | for i, c := range chokers {
98 | if c == ccp.optimisticUnchoker {
99 | return i
100 | }
101 | }
102 | return -1
103 | }
104 |
--------------------------------------------------------------------------------
/torrent/choker_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "testing"
7 | )
8 |
9 | type testChoker struct {
10 | name string
11 | downloadBPS float32
12 | }
13 |
14 | func (t *testChoker) DownloadBPS() float32 {
15 | return t.downloadBPS
16 | }
17 |
18 | func (t *testChoker) String() string {
19 | return fmt.Sprintf("{%#v, %g}", t.name, t.downloadBPS)
20 | }
21 |
22 | var chokersSets [][]*testChoker = [][]*testChoker{
23 | []*testChoker{},
24 | []*testChoker{{"a", 0}},
25 | []*testChoker{{"a", 0}, {"b", 1}},
26 | []*testChoker{{"a", 0}, {"b", 1}, {"c", 2}},
27 | []*testChoker{{"a", 0}, {"b", 1}, {"c", 2}, {"d", 3}},
28 | []*testChoker{{"a", 0}, {"b", 1}, {"c", 2}, {"d", 3},
29 | {"e", 4}, {"f", 5}, {"g", 6}},
30 | }
31 |
32 | func toChokerSlice(chokers []*testChoker) (result []Choker) {
33 | result = make([]Choker, len(chokers))
34 | for i, c := range chokers {
35 | result[i] = Choker(c)
36 | }
37 | return
38 | }
39 |
40 | func TestNeverChokePolicy(t *testing.T) {
41 | for _, chokers := range chokersSets {
42 | policy := NeverChokePolicy{}
43 | candidates := toChokerSlice(chokers)
44 | candidatesCopy := append([]Choker{}, candidates...)
45 | unchokeCount, err := policy.Choke(candidates)
46 | if err != nil || unchokeCount != len(candidates) ||
47 | !similar(candidates, candidatesCopy) {
48 | t.Errorf("NeverChokePolicy.Choke(%v) => %v, %d, %v",
49 | candidatesCopy, candidates, unchokeCount, err)
50 | }
51 | }
52 | }
53 |
54 | // Check that a and b both have the same elements (different order).
55 | func similar(a, b []Choker) bool {
56 | if len(a) != len(b) {
57 | return false
58 | }
59 | // O(n^2)
60 | for _, aa := range a {
61 | found := false
62 | for _, bb := range b {
63 | if aa == bb {
64 | found = true
65 | break
66 | }
67 | }
68 | if !found {
69 | return false
70 | }
71 | }
72 | return true
73 | }
74 |
75 | func TestClassicChokePolicy(t *testing.T) {
76 | for _, chokers := range chokersSets {
77 | policy := ClassicChokePolicy{}
78 | candidates := toChokerSlice(chokers)
79 | candidatesCopy := append([]Choker{}, candidates...)
80 | unchokeCount, err := policy.Choke(candidates)
81 | expectedUnchokeCount := len(candidates)
82 | maxUnchokeCount := OPTIMISTIC_UNCHOKE_INDEX + 1
83 | if expectedUnchokeCount > maxUnchokeCount {
84 | expectedUnchokeCount = maxUnchokeCount
85 | }
86 | if err != nil || unchokeCount != expectedUnchokeCount ||
87 | !similar(candidates, candidatesCopy) ||
88 | !verifyClassicSortOrder(candidates, HIGH_BANDWIDTH_SLOTS) {
89 | t.Errorf("ClassicChokePolicy.Choke(%v) => %v, %d, %v",
90 | candidatesCopy, candidates, unchokeCount, err)
91 | }
92 | }
93 | }
94 |
95 | func verifyClassicSortOrder(a []Choker, highBandwidthSlotCount int) bool {
96 | var lowestHighBandwidthSlotBps float32 = float32(math.Inf(0))
97 | for i, aa := range a {
98 | bps := aa.DownloadBPS()
99 | if i < highBandwidthSlotCount {
100 | if bps < lowestHighBandwidthSlotBps {
101 | lowestHighBandwidthSlotBps = bps
102 | }
103 | } else if bps > lowestHighBandwidthSlotBps {
104 | return false
105 | }
106 | }
107 | return true
108 | }
109 |
--------------------------------------------------------------------------------
/torrent/execOnSeeding.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "os/exec"
7 | )
8 |
9 | func (t *TorrentSession) execOnSeeding() {
10 | cmd := exec.Command(t.flags.ExecOnSeeding)
11 | cmd.Env = []string{
12 | fmt.Sprintf("TORRENT_FILE=%s", t.torrentFile),
13 | fmt.Sprintf("TORRENT_INFOHASH=%x", t.M.InfoHash),
14 | }
15 | starterr := cmd.Start()
16 | if starterr != nil {
17 | log.Printf("[ %s ] Error starting '%s': %v\n", t.M.Info.Name, t.flags.ExecOnSeeding, starterr)
18 | return
19 | }
20 |
21 | go func() {
22 | err := cmd.Wait()
23 | if err != nil {
24 | log.Printf("[ %s ] Error while executing '%s': %v\n", t.M.Info.Name, t.flags.ExecOnSeeding, err)
25 | } else {
26 | log.Printf("[ %s ] Executing finished on '%s'\n", t.M.Info.Name, t.flags.ExecOnSeeding)
27 | }
28 | }()
29 | }
30 |
--------------------------------------------------------------------------------
/torrent/files.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "errors"
5 | "io"
6 | )
7 |
8 | // Interface for a file.
9 | // Multiple goroutines may access a File at the same time.
10 | type File interface {
11 | io.ReaderAt
12 | io.WriterAt
13 | io.Closer
14 | }
15 |
16 | //Interface for a provider of filesystems.
17 | type FsProvider interface {
18 | NewFS(directory string) (FileSystem, error)
19 | }
20 |
21 | // Interface for a file system. A file system contains files.
22 | type FileSystem interface {
23 | Open(name []string, length int64) (file File, err error)
24 | io.Closer
25 | }
26 |
27 | // A torrent file store.
28 | // WritePiece should be called for full, verified pieces only;
29 | type FileStore interface {
30 | io.ReaderAt
31 | io.Closer
32 | WritePiece(buffer []byte, piece int) (written int, err error)
33 | }
34 |
35 | type fileStore struct {
36 | fileSystem FileSystem
37 | offsets []int64
38 | files []fileEntry // Stored in increasing globalOffset order
39 | pieceSize int64
40 | }
41 |
42 | type fileEntry struct {
43 | length int64
44 | file File
45 | }
46 |
47 | func NewFileStore(info *InfoDict, fileSystem FileSystem) (f FileStore, totalSize int64, err error) {
48 | fs := &fileStore{}
49 | fs.fileSystem = fileSystem
50 | fs.pieceSize = info.PieceLength
51 | numFiles := len(info.Files)
52 | if numFiles == 0 {
53 | // Create dummy Files structure.
54 | info = &InfoDict{Files: []FileDict{FileDict{info.Length, []string{info.Name}, info.Md5sum}}}
55 | numFiles = 1
56 | }
57 | fs.files = make([]fileEntry, numFiles)
58 | fs.offsets = make([]int64, numFiles)
59 | for i, _ := range info.Files {
60 | src := &info.Files[i]
61 | var file File
62 | file, err = fs.fileSystem.Open(src.Path, src.Length)
63 | if err != nil {
64 | // Close all files opened up to now.
65 | for i2 := 0; i2 < i; i2++ {
66 | fs.files[i2].file.Close()
67 | }
68 | return
69 | }
70 | fs.files[i].file = file
71 | fs.files[i].length = src.Length
72 | fs.offsets[i] = totalSize
73 | totalSize += src.Length
74 | }
75 | f = fs
76 | return
77 | }
78 |
79 | func (f *fileStore) find(offset int64) int {
80 | // Binary search
81 | offsets := f.offsets
82 | low := 0
83 | high := len(offsets)
84 | for low < high-1 {
85 | probe := (low + high) / 2
86 | entry := offsets[probe]
87 | if offset < entry {
88 | high = probe
89 | } else {
90 | low = probe
91 | }
92 | }
93 | return low
94 | }
95 |
96 | func (f *fileStore) ReadAt(p []byte, off int64) (n int, err error) {
97 | index := f.find(off)
98 | for len(p) > 0 && index < len(f.offsets) {
99 | chunk := int64(len(p))
100 | entry := &f.files[index]
101 | itemOffset := off - f.offsets[index]
102 | if itemOffset < entry.length {
103 | space := entry.length - itemOffset
104 | if space < chunk {
105 | chunk = space
106 | }
107 | var nThisTime int
108 | nThisTime, err = entry.file.ReadAt(p[0:chunk], itemOffset)
109 | n = n + nThisTime
110 | if err != nil {
111 | return
112 | }
113 | p = p[nThisTime:]
114 | off += int64(nThisTime)
115 | }
116 | index++
117 | }
118 | // At this point if there's anything left to read it means we've run off the
119 | // end of the file store. Read zeros. This is defined by the bittorrent protocol.
120 | for i, _ := range p {
121 | p[i] = 0
122 | }
123 | return
124 | }
125 |
126 | func (f *fileStore) WritePiece(p []byte, piece int) (n int, err error) {
127 | off := int64(piece) * f.pieceSize
128 | index := f.find(off)
129 | for len(p) > 0 && index < len(f.offsets) {
130 | chunk := int64(len(p))
131 | entry := &f.files[index]
132 | itemOffset := off - f.offsets[index]
133 | if itemOffset < entry.length {
134 | space := entry.length - itemOffset
135 | if space < chunk {
136 | chunk = space
137 | }
138 | var nThisTime int
139 | nThisTime, err = entry.file.WriteAt(p[0:chunk], itemOffset)
140 | n += nThisTime
141 | if err != nil {
142 | return
143 | }
144 | p = p[nThisTime:]
145 | off += int64(nThisTime)
146 | }
147 | index++
148 | }
149 | // At this point if there's anything left to write it means we've run off the
150 | // end of the file store. Check that the data is zeros.
151 | // This is defined by the bittorrent protocol.
152 | for i, _ := range p {
153 | if p[i] != 0 {
154 | err = errors.New("Unexpected non-zero data at end of store.")
155 | n = n + i
156 | return
157 | }
158 | }
159 | n = n + len(p)
160 | return
161 | }
162 |
163 | func (f *fileStore) Close() (err error) {
164 | for i := range f.files {
165 | f.files[i].file.Close()
166 | }
167 |
168 | if f.fileSystem != nil {
169 | err = f.fileSystem.Close()
170 | }
171 | return
172 | }
173 |
--------------------------------------------------------------------------------
/torrent/files_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "crypto/sha1"
5 | "encoding/hex"
6 | "io/ioutil"
7 | "testing"
8 | )
9 |
10 | type testFile struct {
11 | path string
12 | fileLen int64
13 | // SHA1 of fileLen bytes.
14 | hash string
15 | // SHA1 of the first 25 bytes only.
16 | hashPieceA string
17 | // SHA1 of bytes 25-49
18 | hashPieceB string
19 | }
20 |
21 | var tests []testFile = []testFile{{
22 | "../testData/testFile",
23 | 8054,
24 | // shasum testData/testFile | tr "[a-z]" "[A-Z]"
25 | "BC6314A1D1D36EC6C0888AF9DBD3B5E826612ADA",
26 | // dd if=testData/testFile bs=25 count=1 | shasum | tr "[a-z]" "[A-Z]"
27 | "F072A5A05C7ED8EECFFB6524FBFA89CA725A66C3",
28 | // dd if=testData/testFile bs=25 count=1 skip=1 | shasum | tr "[a-z]" "[A-Z]"
29 | "859CF11E055E61296F42EEB5BB19E598626A5173",
30 | }}
31 |
32 | func mkFileStore(tf testFile) (fs *fileStore, err error) {
33 | f := fileEntry{tf.fileLen, &osFile{tf.path}}
34 | return &fileStore{fileSystem: nil, offsets: []int64{0}, files: []fileEntry{f}, pieceSize: 512}, nil
35 | }
36 |
37 | func TestFileStoreRead(t *testing.T) {
38 | for _, testFile := range tests {
39 | fs, err := mkFileStore(testFile)
40 | if err != nil {
41 | t.Fatal(err)
42 | }
43 | ret := make([]byte, testFile.fileLen)
44 | _, err = fs.ReadAt(ret, 0)
45 | if err != nil {
46 | t.Fatal(err)
47 | }
48 | orig, _ := ioutil.ReadFile(testFile.path)
49 | wantedsum := sha1.Sum(orig[:testFile.fileLen])
50 | sum1Str := hex.EncodeToString(wantedsum[0:])
51 | gotsum := sha1.Sum(ret)
52 | sum2Str := hex.EncodeToString(gotsum[0:])
53 | if sum1Str != sum2Str {
54 | t.Errorf("Wanted %v, got %v\n", sum1Str, sum2Str)
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/torrent/hdcache.go:
--------------------------------------------------------------------------------
1 | // hdcache
2 | package torrent
3 |
4 | import (
5 | "encoding/hex"
6 | "io"
7 | "log"
8 | "math"
9 | "os"
10 | "path/filepath"
11 | "sort"
12 | "strconv"
13 | "sync/atomic"
14 | "time"
15 | )
16 |
17 | //This provider creates an HD cache for each torrent.
18 | //Each time a cache is created or closed, all cache
19 | //are recalculated so they total <= capacity (in MiB).
20 | type HdCacheProvider struct {
21 | capacity int
22 | caches map[string]*HdCache
23 | }
24 |
25 | func NewHdCacheProvider(capacity int) CacheProvider {
26 | os.Mkdir(filepath.FromSlash(os.TempDir()+"/taipeitorrent"), 0777)
27 | rc := &HdCacheProvider{capacity, make(map[string]*HdCache)}
28 | return rc
29 | }
30 |
31 | func (r *HdCacheProvider) NewCache(infohash string, numPieces int, pieceSize int64, torrentLength int64, underlying FileStore) FileStore {
32 | i := uint32(1)
33 | rc := &HdCache{pieceSize: pieceSize, atimes: make([]time.Time, numPieces), boxExists: *NewBitset(numPieces),
34 | boxPrefix: filepath.FromSlash(os.TempDir() + "/taipeitorrent/" + hex.EncodeToString([]byte(infohash)) + "-"),
35 | torrentLength: torrentLength, cacheProvider: r, capacity: &i, infohash: infohash, underlying: underlying}
36 | rc.empty() //clear out any detritus from previous runs
37 | r.caches[infohash] = rc
38 | r.rebalance()
39 | return rc
40 | }
41 |
42 | //Rebalance the cache capacity allocations; has to be called on each cache creation or deletion.
43 | func (r *HdCacheProvider) rebalance() {
44 | //Cache size is a diminishing return thing:
45 | //The more of it a torrent has, the less of a difference additional cache makes.
46 | //Thus, instead of scaling the distribution lineraly with torrent size, we'll do it by square-root
47 | log.Println("Rebalancing caches...")
48 | var scalingTotal float64
49 | sqrts := make(map[string]float64)
50 | for i, cache := range r.caches {
51 | sqrts[i] = math.Sqrt(float64(cache.torrentLength))
52 | scalingTotal += sqrts[i]
53 | }
54 |
55 | scalingFactor := float64(r.capacity*1024*1024) / scalingTotal
56 | for i, cache := range r.caches {
57 | newCap := int64(math.Floor(scalingFactor * sqrts[i] / float64(cache.pieceSize)))
58 | if newCap == 0 {
59 | newCap = 1 //Something's better than nothing!
60 | }
61 | log.Printf("Setting cache '%x' to new capacity %v (%v MiB)", cache.infohash, newCap, float32(newCap*cache.pieceSize)/float32(1024*1024))
62 | cache.setCapacity(uint32(newCap))
63 | }
64 |
65 | for _, cache := range r.caches {
66 | cache.trim()
67 | }
68 | }
69 |
70 | func (r *HdCacheProvider) cacheClosed(infohash string) {
71 | delete(r.caches, infohash)
72 | r.rebalance()
73 | }
74 |
75 | //'pieceSize' is the size of the average piece
76 | //'capacity' is how many pieces the cache can hold
77 | //'actualUsage' is how many pieces the cache has at the moment
78 | //'atime' is an array of access times for each stored box
79 | //'boxExists' indicates if a box is existent in cache
80 | //'boxPrefix' is the partial path to the boxes.
81 | //'torrentLength' is the number of bytes in the torrent
82 | //'cacheProvider' is a pointer to the cacheProvider that created this cache
83 | //'infohash' is the infohash of the torrent
84 | //'underlying' is the FileStore we're caching
85 | type HdCache struct {
86 | pieceSize int64
87 | capacity *uint32 //Access only through getter/setter
88 | actualUsage int
89 | atimes []time.Time
90 | boxExists Bitset
91 | boxPrefix string
92 | torrentLength int64
93 | cacheProvider *HdCacheProvider
94 | infohash string
95 | underlying FileStore
96 | }
97 |
98 | func (r *HdCache) Close() error {
99 | r.cacheProvider.cacheClosed(r.infohash)
100 | r.empty()
101 | return r.underlying.Close()
102 | }
103 |
104 | func (r *HdCache) empty() {
105 | for i := 0; i < r.boxExists.Len(); i++ {
106 | os.Remove(r.boxPrefix + strconv.Itoa(i))
107 | }
108 | }
109 |
110 | func (r *HdCache) ReadAt(p []byte, off int64) (retInt int, retErr error) {
111 | boxI := int(off / r.pieceSize)
112 | boxOff := off % r.pieceSize
113 |
114 | for i := 0; i < len(p); {
115 | copied := 0
116 | if !r.boxExists.IsSet(boxI) { //not in cache
117 | bufferLength := r.pieceSize
118 | bufferOffset := int64(boxI) * r.pieceSize
119 |
120 | if bufferLength > r.torrentLength-bufferOffset { //do we want the last, smaller than usual piece?
121 | bufferLength = r.torrentLength - bufferOffset
122 | }
123 |
124 | buffer := make([]byte, bufferLength)
125 | r.underlying.ReadAt(buffer, bufferOffset)
126 | copied = copy(p[i:], buffer[boxOff:])
127 | r.addBox(buffer, boxI)
128 |
129 | } else { //in cache
130 | box, err := os.Open(r.boxPrefix + strconv.Itoa(boxI))
131 | if err != nil {
132 | log.Println("Error opening cache item we thought we had:", r.boxPrefix+strconv.Itoa(boxI), "error:", err)
133 | box.Close()
134 | r.removeBox(boxI)
135 | continue //loop around without incrementing 'i', forget this ever happened
136 | }
137 |
138 | copied, err = box.ReadAt(p[i:], boxOff)
139 | box.Close()
140 | r.atimes[boxI] = time.Now()
141 |
142 | if err != nil && err != io.EOF {
143 | log.Println("Error while reading cache item:", r.boxPrefix+strconv.Itoa(boxI), "error:", err)
144 | r.removeBox(boxI)
145 | continue //loop around without incrementing 'i', forget this ever happened
146 | }
147 | }
148 |
149 | i += copied
150 | boxI++
151 | boxOff = 0
152 | }
153 |
154 | retInt = len(p)
155 | return
156 | }
157 |
158 | func (r *HdCache) WritePiece(p []byte, boxI int) (n int, retErr error) {
159 |
160 | if r.boxExists.IsSet(boxI) { //box exists, our work is done
161 | log.Println("Got a WritePiece for a piece we should already have:", boxI)
162 | return
163 | }
164 |
165 | r.addBox(p, boxI)
166 |
167 | //TODO: Maybe goroutine the calls to underlying?
168 | return r.underlying.WritePiece(p, boxI)
169 | }
170 |
171 | func (r *HdCache) addBox(p []byte, boxI int) {
172 |
173 | box, err := os.Create(r.boxPrefix + strconv.Itoa(boxI))
174 | if err != nil {
175 | log.Println("Couldn't create cache file:", err)
176 | } else {
177 | box.Truncate(int64(len(p)))
178 | r.actualUsage++
179 | //TODO: Maybe goroutine the calls to box?
180 | _, err = box.WriteAt(p, 0)
181 | if err != nil {
182 | log.Println("Error at write cache box:", box.Name(), "error:", err)
183 | } else {
184 | r.atimes[boxI] = time.Now()
185 | r.boxExists.Set(boxI)
186 | }
187 | box.Close()
188 | }
189 |
190 | r.trim()
191 | }
192 |
193 | func (r *HdCache) removeBox(boxI int) {
194 | r.boxExists.Clear(boxI)
195 | err := os.Remove(r.boxPrefix + strconv.Itoa(boxI))
196 | if err != nil {
197 | log.Println("Error removing cache box:", err)
198 | } else {
199 | r.actualUsage--
200 | }
201 | }
202 |
203 | func (r *HdCache) getCapacity() int {
204 | return int(atomic.LoadUint32(r.capacity))
205 | }
206 |
207 | func (r *HdCache) setCapacity(capacity uint32) {
208 | atomic.StoreUint32(r.capacity, capacity)
209 | }
210 |
211 | //Trim excess data.
212 | func (r *HdCache) trim() {
213 | if r.actualUsage <= r.getCapacity() {
214 | return
215 | }
216 |
217 | //Figure out what's oldest and clear that then
218 | tATA := make([]accessTime, 0, r.actualUsage)
219 |
220 | for i, atime := range r.atimes {
221 | if r.boxExists.IsSet(i) {
222 | tATA = append(tATA, accessTime{i, atime})
223 | }
224 | }
225 |
226 | sort.Sort(byTime(tATA))
227 |
228 | deficit := r.actualUsage - r.getCapacity()
229 | for i := 0; i < deficit; i++ {
230 | deadBox := tATA[i].index
231 | r.removeBox(deadBox)
232 | }
233 | }
234 |
--------------------------------------------------------------------------------
/torrent/hdcache_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "crypto/sha1"
5 | "encoding/hex"
6 | "io/ioutil"
7 | "log"
8 | "testing"
9 | )
10 |
11 | func TestHDCachedFileStoreRead(t *testing.T) {
12 | rcp := NewHdCacheProvider(2000)
13 | for _, testFile := range tests {
14 | fs, err := mkFileStore(testFile)
15 | orig, _ := ioutil.ReadFile(testFile.path)
16 | numPieces := len(orig) / 512
17 | if len(orig)%512 > 0 {
18 | numPieces++
19 | }
20 | tC := rcp.NewCache("test", numPieces, 512, int64(len(orig)), fs)
21 | tC.WritePiece(orig[:512], 0)
22 | tC.WritePiece(orig[512:1024], 1)
23 |
24 | if err != nil {
25 | t.Fatal(err)
26 | }
27 | ret := make([]byte, testFile.fileLen)
28 | _, err = tC.ReadAt(ret, 0)
29 | if err != nil {
30 | t.Fatal(err)
31 | }
32 | wantedsum := sha1.Sum(orig[:testFile.fileLen])
33 | sum1Str := hex.EncodeToString(wantedsum[0:])
34 | gotsum := sha1.Sum(ret)
35 | sum2Str := hex.EncodeToString(gotsum[0:])
36 | if sum1Str != sum2Str {
37 | t.Errorf("Wanted %v, got %v\n on cache read", sum1Str, sum2Str)
38 | for i := 0; i < len(ret); i++ {
39 | if ret[i] != orig[i] {
40 | log.Println("Found a difference at", i, "wanted", orig[i], "got", ret[i])
41 | break
42 | }
43 | }
44 | }
45 |
46 | ret = make([]byte, testFile.fileLen)
47 | _, err = fs.ReadAt(ret, 0)
48 | if err != nil {
49 | t.Fatal(err)
50 | }
51 | gotsum = sha1.Sum(ret)
52 | sum2Str = hex.EncodeToString(gotsum[0:])
53 | if sum1Str != sum2Str {
54 | t.Errorf("Wanted %v, got %v\n on filestore read", sum1Str, sum2Str)
55 | for i := 0; i < len(ret); i++ {
56 | if ret[i] != orig[i] {
57 | log.Println("Found a difference at", i, "wanted", orig[i], "got", ret[i])
58 | break
59 | }
60 | }
61 | }
62 |
63 | fs.Close()
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/torrent/listen.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "fmt"
5 | "github.com/jackpal/gateway"
6 | "log"
7 | "net"
8 | "strconv"
9 | )
10 |
11 | // btConn wraps an incoming network connection and contains metadata that helps
12 | // identify which active torrentSession it's relevant for.
13 | type BtConn struct {
14 | conn net.Conn
15 | RemoteAddr net.Addr
16 | header []byte
17 | Infohash string
18 | id string
19 | }
20 |
21 | // listenForPeerConnections listens on a TCP port for incoming connections and
22 | // demuxes them to the appropriate active torrentSession based on the InfoHash
23 | // in the header.
24 | func ListenForPeerConnections(flags *TorrentFlags) (conChan chan *BtConn, listenPort int, err error) {
25 | listener, listenPort, err := CreateListener(flags)
26 | if err != nil {
27 | return
28 | }
29 | conChan = make(chan *BtConn)
30 | _, portstring, err := net.SplitHostPort(listener.Addr().String())
31 | if err != nil {
32 | log.Printf("Listener failed while finding the host/port for %v: %v", portstring, err)
33 | return
34 | }
35 | go func() {
36 | for {
37 | var conn net.Conn
38 | conn, err := listener.Accept()
39 | if err != nil {
40 | log.Println("Listener accept failed:", err)
41 | continue
42 | }
43 | header, err := readHeader(conn)
44 | if err != nil {
45 | log.Println("Error reading header: ", err)
46 | continue
47 | }
48 | peersInfoHash := string(header[8:28])
49 | id := string(header[28:48])
50 | conChan <- &BtConn{
51 | header: header,
52 | Infohash: peersInfoHash,
53 | id: id,
54 | conn: conn,
55 | RemoteAddr: conn.RemoteAddr(),
56 | }
57 | }
58 | }()
59 | return
60 | }
61 |
62 | func CreateListener(flags *TorrentFlags) (listener net.Listener, externalPort int, err error) {
63 | nat, err := CreatePortMapping(flags)
64 | if err != nil {
65 | err = fmt.Errorf("Unable to create NAT: %v", err)
66 | return
67 | }
68 | listenPort := flags.Port
69 | if nat != nil {
70 | var external net.IP
71 | if external, err = nat.GetExternalAddress(); err != nil {
72 | err = fmt.Errorf("Unable to get external IP address from NAT: %v", err)
73 | return
74 | }
75 | log.Println("External ip address: ", external)
76 | if listenPort, err = chooseListenPort(nat, listenPort); err != nil {
77 | log.Println("Could not choose listen port.", err)
78 | log.Println("Peer connectivity will be affected.")
79 | }
80 | }
81 | listener, err = net.ListenTCP("tcp", &net.TCPAddr{Port: listenPort})
82 | if err != nil {
83 | log.Fatal("Listen failed:", err)
84 | }
85 | if listenPort == 0 {
86 | _, portString, _ := net.SplitHostPort(listener.Addr().String())
87 | listenPort, _ = strconv.Atoi(portString)
88 | flags.Port = listenPort
89 | }
90 | log.Println("Listening for peers on port:", listenPort)
91 | externalPort = listenPort
92 | return
93 | }
94 |
95 | // createPortMapping creates a NAT port mapping, or nil if none requested or found.
96 | func CreatePortMapping(flags *TorrentFlags) (nat NAT, err error) {
97 | if flags.UseUPnP && flags.UseNATPMP {
98 | err = fmt.Errorf("Cannot specify both -useUPnP and -useNATPMP")
99 | return
100 | }
101 | if flags.UseUPnP {
102 | log.Println("Using UPnP to open port.")
103 | nat, err = Discover()
104 | }
105 | if flags.UseNATPMP {
106 | var gatewayIP net.IP
107 | if flags.Gateway == "" {
108 | log.Printf("useNATPMP but gateway not provided, trying discovery")
109 | gatewayIP, err = gateway.DiscoverGateway()
110 | if err != nil {
111 | return
112 | }
113 | log.Printf("...discovered gateway IP: %s", gatewayIP)
114 | } else {
115 | gatewayIP = net.ParseIP(flags.Gateway)
116 | }
117 | log.Println("Using NAT-PMP to open port.")
118 | if gatewayIP == nil {
119 | err = fmt.Errorf("Could not parse gateway %q", flags.Gateway)
120 | }
121 | nat = NewNatPMP(gatewayIP)
122 | }
123 | return
124 | }
125 |
126 | func chooseListenPort(nat NAT, externalPort int) (listenPort int, err error) {
127 | // TODO: Unmap port when exiting. (Right now we never exit cleanly.)
128 | // TODO: Defend the port, remap when router reboots
129 | listenPort, err = nat.AddPortMapping("tcp", externalPort, externalPort,
130 | "Taipei-Torrent port "+strconv.Itoa(externalPort), 360000)
131 | if err != nil {
132 | return
133 | }
134 | return
135 | }
136 |
137 | func readHeader(conn net.Conn) (h []byte, err error) {
138 | header := make([]byte, 68)
139 | _, err = conn.Read(header[0:1])
140 | if err != nil {
141 | err = fmt.Errorf("Couldn't read 1st byte: %v", err)
142 | return
143 | }
144 | if header[0] != 19 {
145 | err = fmt.Errorf("First byte is not 19")
146 | return
147 | }
148 | _, err = conn.Read(header[1:20])
149 | if err != nil {
150 | err = fmt.Errorf("Couldn't read magic string: %v", err)
151 | return
152 | }
153 | if string(header[1:20]) != "BitTorrent protocol" {
154 | err = fmt.Errorf("Magic string is not correct: %v", string(header[1:20]))
155 | return
156 | }
157 | // Read rest of header
158 | _, err = conn.Read(header[20:])
159 | if err != nil {
160 | err = fmt.Errorf("Couldn't read rest of header")
161 | return
162 | }
163 |
164 | h = make([]byte, 48)
165 | copy(h, header[20:])
166 | return
167 | }
168 |
--------------------------------------------------------------------------------
/torrent/lpd.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "log"
8 | "net"
9 | "net/http"
10 | "time"
11 | )
12 |
13 | var (
14 | request_template = "BT-SEARCH * HTTP/1.1\r\n" +
15 | "Host: 239.192.152.143:6771\r\n" +
16 | "Port: %u\r\n" +
17 | "Infohash: %X\r\n\r\n"
18 | )
19 |
20 | type Announce struct {
21 | Peer string
22 | Infohash string
23 | }
24 |
25 | type Announcer struct {
26 | btPort uint16
27 | addr *net.UDPAddr
28 | conn *net.UDPConn
29 |
30 | Announces chan *Announce
31 | activeAnnounces map[string]*time.Ticker
32 | }
33 |
34 | func NewAnnouncer(listenPort uint16) (lpd *Announcer, err error) {
35 | addr, err := net.ResolveUDPAddr("udp4", "239.192.152.143:6771")
36 | if err != nil {
37 | return
38 | }
39 |
40 | conn, err := net.ListenMulticastUDP("udp4", nil, addr)
41 | if err != nil {
42 | return
43 | }
44 |
45 | activeAnnounces := make(map[string]*time.Ticker)
46 | lpd = &Announcer{
47 | btPort: listenPort,
48 | addr: addr,
49 | conn: conn,
50 | Announces: make(chan *Announce),
51 | activeAnnounces: activeAnnounces,
52 | }
53 |
54 | go lpd.run()
55 | return
56 | }
57 |
58 | func (lpd *Announcer) run() {
59 | for {
60 | answer := make([]byte, 256)
61 | _, from, err := lpd.conn.ReadFromUDP(answer)
62 | if err != nil {
63 | log.Println("Error reading from UDP: ", err)
64 | continue
65 | }
66 |
67 | req, err := http.ReadRequest(bufio.NewReader(bytes.NewReader(answer)))
68 | if err != nil {
69 | log.Println("Error reading HTTP request from UDP: ", err)
70 | continue
71 | }
72 |
73 | if req.Method != "BT-SEARCH" {
74 | log.Println("Invalid method: ", req.Method)
75 | }
76 |
77 | ih := req.Header.Get("Infohash")
78 | if ih == "" {
79 | log.Println("No Infohash")
80 | continue
81 | }
82 |
83 | port := req.Header.Get("Port")
84 | if port == "" {
85 | log.Println("No port")
86 | continue
87 | }
88 |
89 | addr, err := net.ResolveTCPAddr("tcp4", from.IP.String()+":"+port)
90 | if err != nil {
91 | log.Println(err)
92 | continue
93 | }
94 | lpd.Announces <- &Announce{addr.String(), ih}
95 | }
96 | }
97 |
98 | func (lpd *Announcer) Announce(ih string) {
99 | go func() {
100 | requestMessage := []byte(fmt.Sprintf(request_template, lpd.btPort,
101 | ih))
102 |
103 | // Announce at launch, then every 5 minutes
104 | _, err := lpd.conn.WriteToUDP(requestMessage, lpd.addr)
105 | if err != nil {
106 | log.Println(err)
107 | }
108 |
109 | ticker := time.NewTicker(5 * time.Minute)
110 | lpd.activeAnnounces[ih] = ticker
111 |
112 | for _ = range ticker.C {
113 | _, err := lpd.conn.WriteToUDP(requestMessage, lpd.addr)
114 | if err != nil {
115 | log.Println(err)
116 | }
117 | }
118 | }()
119 | }
120 |
121 | func (lpd *Announcer) StopAnnouncing(ih string) {
122 | if ticker, ok := lpd.activeAnnounces[ih]; ok {
123 | ticker.Stop()
124 | delete(lpd.activeAnnounces, ih)
125 | }
126 | }
127 |
--------------------------------------------------------------------------------
/torrent/metainfo.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bytes"
5 | "crypto/md5"
6 | "crypto/sha1"
7 | "encoding/hex"
8 | "errors"
9 | "fmt"
10 | "io"
11 | "io/ioutil"
12 | "log"
13 | "os"
14 | "path"
15 | "strings"
16 |
17 | "golang.org/x/net/proxy"
18 |
19 | bencode "github.com/jackpal/bencode-go"
20 | "github.com/nictuku/dht"
21 | )
22 |
23 | type FileDict struct {
24 | Length int64
25 | Path []string
26 | Md5sum string
27 | }
28 |
29 | type InfoDict struct {
30 | PieceLength int64 `bencode:"piece length"`
31 | Pieces string
32 | Private int64
33 | Name string
34 | // Single File Mode
35 | Length int64
36 | Md5sum string
37 | // Multiple File mode
38 | Files []FileDict
39 | }
40 |
41 | type MetaInfo struct {
42 | Info InfoDict
43 | InfoHash string
44 | Announce string
45 | AnnounceList [][]string `bencode:"announce-list"`
46 | CreationDate string `bencode:"creation date"`
47 | Comment string
48 | CreatedBy string `bencode:"created by"`
49 | Encoding string
50 | }
51 |
52 | func getString(m map[string]interface{}, k string) string {
53 | if v, ok := m[k]; ok {
54 | if s, ok := v.(string); ok {
55 | return s
56 | }
57 | }
58 | return ""
59 | }
60 |
61 | // Parse a list of list of strings structure, filtering out anything that's
62 | // not a string, and filtering out empty lists. May return nil.
63 | func getSliceSliceString(m map[string]interface{}, k string) (aas [][]string) {
64 | if a, ok := m[k]; ok {
65 | if b, ok := a.([]interface{}); ok {
66 | for _, c := range b {
67 | if d, ok := c.([]interface{}); ok {
68 | var sliceOfStrings []string
69 | for _, e := range d {
70 | if f, ok := e.(string); ok {
71 | sliceOfStrings = append(sliceOfStrings, f)
72 | }
73 | }
74 | if len(sliceOfStrings) > 0 {
75 | aas = append(aas, sliceOfStrings)
76 | }
77 | }
78 | }
79 | }
80 | }
81 | return
82 | }
83 |
84 | func GetMetaInfo(dialer proxy.Dialer, torrent string) (metaInfo *MetaInfo, err error) {
85 | var input io.ReadCloser
86 | if strings.HasPrefix(torrent, "http:") {
87 | r, err := proxyHttpGet(dialer, torrent)
88 | if err != nil {
89 | return nil, err
90 | }
91 | input = r.Body
92 | } else if strings.HasPrefix(torrent, "magnet:") {
93 | magnet, err := parseMagnet(torrent)
94 | if err != nil {
95 | log.Println("Couldn't parse magnet: ", err)
96 | return nil, err
97 | }
98 |
99 | ih, err := dht.DecodeInfoHash(magnet.InfoHashes[0])
100 | if err != nil {
101 | return nil, err
102 | }
103 |
104 | metaInfo = &MetaInfo{InfoHash: string(ih), AnnounceList: magnet.Trackers}
105 |
106 | //Gives us something to call the torrent until metadata can be procurred
107 | metaInfo.Info.Name = hex.EncodeToString([]byte(ih))
108 |
109 | return metaInfo, err
110 |
111 | } else {
112 | if input, err = os.Open(torrent); err != nil {
113 | return
114 | }
115 | }
116 |
117 | // We need to calcuate the sha1 of the Info map, including every value in the
118 | // map. The easiest way to do this is to read the data using the Decode
119 | // API, and then pick through it manually.
120 | var m interface{}
121 | m, err = bencode.Decode(input)
122 | input.Close()
123 | if err != nil {
124 | err = errors.New("Couldn't parse torrent file phase 1: " + err.Error())
125 | return
126 | }
127 |
128 | topMap, ok := m.(map[string]interface{})
129 | if !ok {
130 | err = errors.New("Couldn't parse torrent file phase 2.")
131 | return
132 | }
133 |
134 | infoMap, ok := topMap["info"]
135 | if !ok {
136 | err = errors.New("Couldn't parse torrent file. info")
137 | return
138 | }
139 | var b bytes.Buffer
140 | if err = bencode.Marshal(&b, infoMap); err != nil {
141 | return
142 | }
143 | hash := sha1.New()
144 | hash.Write(b.Bytes())
145 |
146 | var m2 MetaInfo
147 | err = bencode.Unmarshal(&b, &m2.Info)
148 | if err != nil {
149 | return
150 | }
151 |
152 | m2.InfoHash = string(hash.Sum(nil))
153 | m2.Announce = getString(topMap, "announce")
154 | m2.AnnounceList = getSliceSliceString(topMap, "announce-list")
155 | m2.CreationDate = getString(topMap, "creation date")
156 | m2.Comment = getString(topMap, "comment")
157 | m2.CreatedBy = getString(topMap, "created by")
158 | m2.Encoding = strings.ToUpper(getString(topMap, "encoding"))
159 |
160 | metaInfo = &m2
161 | return
162 | }
163 |
164 | type MetaInfoFileSystem interface {
165 | Open(name string) (MetaInfoFile, error)
166 | Stat(name string) (os.FileInfo, error)
167 | }
168 |
169 | type MetaInfoFile interface {
170 | io.Closer
171 | io.Reader
172 | io.ReaderAt
173 | Readdirnames(n int) (names []string, err error)
174 | Stat() (os.FileInfo, error)
175 | }
176 |
177 | type OSMetaInfoFileSystem struct {
178 | dir string
179 | }
180 |
181 | func (o *OSMetaInfoFileSystem) Open(name string) (MetaInfoFile, error) {
182 | return os.Open(path.Join(o.dir, name))
183 | }
184 |
185 | func (o *OSMetaInfoFileSystem) Stat(name string) (os.FileInfo, error) {
186 | return os.Stat(path.Join(o.dir, name))
187 | }
188 |
189 | // Adapt a MetaInfoFileSystem into a torrent file store FileSystem
190 | type FileStoreFileSystemAdapter struct {
191 | m MetaInfoFileSystem
192 | }
193 |
194 | type FileStoreFileAdapter struct {
195 | f MetaInfoFile
196 | }
197 |
198 | func (f *FileStoreFileSystemAdapter) Open(name []string, length int64) (file File, err error) {
199 | var ff MetaInfoFile
200 | ff, err = f.m.Open(path.Join(name...))
201 | if err != nil {
202 | return
203 | }
204 | stat, err := ff.Stat()
205 | if err != nil {
206 | return
207 | }
208 | actualSize := stat.Size()
209 | if actualSize != length {
210 | err = fmt.Errorf("Unexpected file size %v. Expected %v", actualSize, length)
211 | return
212 | }
213 | file = &FileStoreFileAdapter{ff}
214 | return
215 | }
216 |
217 | func (f *FileStoreFileSystemAdapter) Close() error {
218 | return nil
219 | }
220 |
221 | func (f *FileStoreFileAdapter) ReadAt(p []byte, off int64) (n int, err error) {
222 | return f.f.ReadAt(p, off)
223 | }
224 |
225 | func (f *FileStoreFileAdapter) WriteAt(p []byte, off int64) (n int, err error) {
226 | // Writes must match existing data exactly.
227 | q := make([]byte, len(p))
228 | _, err = f.ReadAt(q, off)
229 | if err != nil {
230 | return
231 | }
232 | if bytes.Compare(p, q) != 0 {
233 | err = fmt.Errorf("New data does not match original data.")
234 | }
235 | return
236 | }
237 |
238 | func (f *FileStoreFileAdapter) Close() (err error) {
239 | return f.f.Close()
240 | }
241 |
242 | // Create a MetaInfo for a given file and file system.
243 | // If fs is nil then the OSMetaInfoFileSystem will be used.
244 | // If pieceLength is 0 then an optimal piece length will be chosen.
245 | func CreateMetaInfoFromFileSystem(fs MetaInfoFileSystem, root, tracker string, pieceLength int64, wantMD5Sum bool) (metaInfo *MetaInfo, err error) {
246 | if fs == nil {
247 | dir, file := path.Split(root)
248 | fs = &OSMetaInfoFileSystem{dir}
249 | root = file
250 | }
251 | var m *MetaInfo = &MetaInfo{}
252 | var fileInfo os.FileInfo
253 | fileInfo, err = fs.Stat(root)
254 | if err != nil {
255 | return
256 | }
257 | var totalLength int64
258 | if fileInfo.IsDir() {
259 | err = m.addFiles(fs, root)
260 | if err != nil {
261 | return
262 | }
263 | for i := range m.Info.Files {
264 | totalLength += m.Info.Files[i].Length
265 | }
266 | if wantMD5Sum {
267 | for i := range m.Info.Files {
268 | fd := &m.Info.Files[i]
269 | fd.Md5sum, err = md5Sum(fs, path.Join(fd.Path...))
270 | if err != nil {
271 | return
272 | }
273 | }
274 | }
275 | } else {
276 | m.Info.Name = path.Base(root)
277 | totalLength = fileInfo.Size()
278 | m.Info.Length = totalLength
279 | if wantMD5Sum {
280 | m.Info.Md5sum, err = md5Sum(fs, root)
281 | if err != nil {
282 | return
283 | }
284 | }
285 | }
286 | if pieceLength == 0 {
287 | pieceLength = choosePieceLength(totalLength)
288 | }
289 | m.Info.PieceLength = int64(pieceLength)
290 | fileStoreFS := &FileStoreFileSystemAdapter{fs}
291 | var fileStore FileStore
292 | var fileStoreLength int64
293 | fileStore, fileStoreLength, err = NewFileStore(&m.Info, fileStoreFS)
294 | if err != nil {
295 | return
296 | }
297 | if fileStoreLength != totalLength {
298 | err = fmt.Errorf("Filestore total length %v, expected %v", fileStoreLength, totalLength)
299 | return
300 | }
301 | var sums []byte
302 | sums, err = computeSums(fileStore, totalLength, int64(pieceLength))
303 | if err != nil {
304 | return
305 | }
306 | m.Info.Pieces = string(sums)
307 | m.UpdateInfoHash(metaInfo)
308 | if tracker != "" {
309 | m.Announce = "http://" + tracker + "/announce"
310 | }
311 | metaInfo = m
312 | return
313 | }
314 |
315 | const MinimumPieceLength = 16 * 1024
316 | const TargetPieceCountLog2 = 10
317 | const TargetPieceCountMin = 1 << TargetPieceCountLog2
318 |
319 | // Target piece count should be < TargetPieceCountMax
320 | const TargetPieceCountMax = TargetPieceCountMin << 1
321 |
322 | // Choose a good piecelength.
323 | func choosePieceLength(totalLength int64) (pieceLength int64) {
324 | // Must be a power of 2.
325 | // Must be a multiple of 16KB
326 | // Prefer to provide around 1024..2048 pieces.
327 | pieceLength = MinimumPieceLength
328 | pieces := totalLength / pieceLength
329 | for pieces >= TargetPieceCountMax {
330 | pieceLength <<= 1
331 | pieces >>= 1
332 | }
333 | return
334 | }
335 |
336 | func roundUpToPowerOfTwo(v uint64) uint64 {
337 | v--
338 | v |= v >> 1
339 | v |= v >> 2
340 | v |= v >> 4
341 | v |= v >> 8
342 | v |= v >> 16
343 | v |= v >> 32
344 | v++
345 | return v
346 | }
347 |
348 | func WriteMetaInfoBytes(root, tracker string, w io.Writer) (err error) {
349 | var m *MetaInfo
350 | m, err = CreateMetaInfoFromFileSystem(nil, root, tracker, 0, true)
351 | if err != nil {
352 | return
353 | }
354 | // log.Printf("Metainfo: %#v", m)
355 | err = m.Bencode(w)
356 | if err != nil {
357 | return
358 | }
359 | return
360 | }
361 |
362 | func md5Sum(fs MetaInfoFileSystem, file string) (sum string, err error) {
363 | var f MetaInfoFile
364 | f, err = fs.Open(file)
365 | if err != nil {
366 | return
367 | }
368 | defer f.Close()
369 | hash := md5.New()
370 | _, err = io.Copy(hash, f)
371 | if err != nil {
372 | return
373 | }
374 | sum = string(hash.Sum(nil))
375 | return
376 | }
377 |
378 | func (m *MetaInfo) addFiles(fs MetaInfoFileSystem, file string) (err error) {
379 | var fileInfo os.FileInfo
380 | fileInfo, err = fs.Stat(file)
381 | if err != nil {
382 | return
383 | }
384 | if fileInfo.IsDir() {
385 | var f MetaInfoFile
386 | f, err = fs.Open(file)
387 | if err != nil {
388 | return
389 | }
390 | var fi []string
391 | fi, err = f.Readdirnames(0)
392 | if err != nil {
393 | return
394 | }
395 | for _, name := range fi {
396 | err = m.addFiles(fs, path.Join(file, name))
397 | if err != nil {
398 | return
399 | }
400 | }
401 | } else {
402 | fileDict := FileDict{Length: fileInfo.Size()}
403 | cleanFile := path.Clean(file)
404 | parts := strings.Split(cleanFile, string(os.PathSeparator))
405 | fileDict.Path = parts
406 | m.Info.Files = append(m.Info.Files, fileDict)
407 | }
408 | return
409 | }
410 |
411 | // Updates the InfoHash field. Call this after manually changing the Info data.
412 | func (m *MetaInfo) UpdateInfoHash(metaInfo *MetaInfo) (err error) {
413 | var b bytes.Buffer
414 | infoMap := m.Info.toMap()
415 | if len(infoMap) > 0 {
416 | err = bencode.Marshal(&b, infoMap)
417 | if err != nil {
418 | return
419 | }
420 | }
421 | hash := sha1.New()
422 | hash.Write(b.Bytes())
423 |
424 | m.InfoHash = string(hash.Sum(nil))
425 | return
426 | }
427 |
428 | // Copy the non-default values from an InfoDict to a map.
429 | func (i *InfoDict) toMap() (m map[string]interface{}) {
430 | id := map[string]interface{}{}
431 | // InfoDict
432 | if i.PieceLength != 0 {
433 | id["piece length"] = i.PieceLength
434 | }
435 | if i.Pieces != "" {
436 | id["pieces"] = i.Pieces
437 | }
438 | if i.Private != 0 {
439 | id["private"] = i.Private
440 | }
441 | if i.Name != "" {
442 | id["name"] = i.Name
443 | }
444 | if i.Length != 0 {
445 | id["length"] = i.Length
446 | }
447 | if i.Md5sum != "" {
448 | id["md5sum"] = i.Md5sum
449 | }
450 | if len(i.Files) > 0 {
451 | var fi []map[string]interface{}
452 | for ii := range i.Files {
453 | f := &i.Files[ii]
454 | fd := map[string]interface{}{}
455 | if f.Length > 0 {
456 | fd["length"] = f.Length
457 | }
458 | if len(f.Path) > 0 {
459 | fd["path"] = f.Path
460 | }
461 | if f.Md5sum != "" {
462 | fd["md5sum"] = f.Md5sum
463 | }
464 | if len(fd) > 0 {
465 | fi = append(fi, fd)
466 | }
467 | }
468 | if len(fi) > 0 {
469 | id["files"] = fi
470 | }
471 | }
472 | if len(id) > 0 {
473 | m = id
474 | }
475 | return
476 | }
477 |
478 | // Encode to Bencode, but only encode non-default values.
479 | func (m *MetaInfo) Bencode(w io.Writer) (err error) {
480 | var mi map[string]interface{} = map[string]interface{}{}
481 | id := m.Info.toMap()
482 | if len(id) > 0 {
483 | mi["info"] = id
484 | }
485 | // Do not encode InfoHash. Clients are supposed to calculate it themselves.
486 | if m.Announce != "" {
487 | mi["announce"] = m.Announce
488 | }
489 | if len(m.AnnounceList) > 0 {
490 | mi["announce-list"] = m.AnnounceList
491 | }
492 | if m.CreationDate != "" {
493 | mi["creation date"] = m.CreationDate
494 | }
495 | if m.Comment != "" {
496 | mi["comment"] = m.Comment
497 | }
498 | if m.CreatedBy != "" {
499 | mi["created by"] = m.CreatedBy
500 | }
501 | if m.Encoding != "" {
502 | mi["encoding"] = m.Encoding
503 | }
504 | bencode.Marshal(w, mi)
505 | return
506 | }
507 |
508 | type TrackerResponse struct {
509 | FailureReason string `bencode:"failure reason"`
510 | WarningMessage string `bencode:"warning message"`
511 | Interval uint
512 | MinInterval uint `bencode:"min interval"`
513 | TrackerId string `bencode:"tracker id"`
514 | Complete uint
515 | Incomplete uint
516 | Peers string
517 | Peers6 string
518 | }
519 |
520 | type SessionInfo struct {
521 | PeerID string
522 | Port uint16
523 | OurAddresses map[string]bool //List of addresses that resolve to ourselves.
524 | Uploaded uint64
525 | Downloaded uint64
526 | Left uint64
527 |
528 | UseDHT bool
529 | FromMagnet bool
530 | HaveTorrent bool
531 |
532 | OurExtensions map[int]string
533 | ME *MetaDataExchange
534 | }
535 |
536 | type MetaDataExchange struct {
537 | Transferring bool
538 | Pieces [][]byte
539 | }
540 |
541 | func getTrackerInfo(dialer proxy.Dialer, url string) (tr *TrackerResponse, err error) {
542 | r, err := proxyHttpGet(dialer, url)
543 | if err != nil {
544 | return
545 | }
546 | defer r.Body.Close()
547 | if r.StatusCode >= 400 {
548 | data, _ := ioutil.ReadAll(r.Body)
549 | reason := "Bad Request " + string(data)
550 | log.Println(reason)
551 | err = errors.New(reason)
552 | return
553 | }
554 | var tr2 TrackerResponse
555 | err = bencode.Unmarshal(r.Body, &tr2)
556 | r.Body.Close()
557 | if err != nil {
558 | return
559 | }
560 | tr = &tr2
561 | return
562 | }
563 |
564 | func saveMetaInfo(metadata string) (err error) {
565 | var info InfoDict
566 | err = bencode.Unmarshal(bytes.NewReader([]byte(metadata)), &info)
567 | if err != nil {
568 | return
569 | }
570 |
571 | f, err := os.Create(info.Name + ".torrent")
572 | if err != nil {
573 | log.Println("Error when opening file for creation: ", err)
574 | return
575 | }
576 | defer f.Close()
577 |
578 | _, err = f.WriteString(metadata)
579 |
580 | return
581 | }
582 |
--------------------------------------------------------------------------------
/torrent/metainfo_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "testing"
5 | )
6 |
7 | func TestCreateMetaInfo(t *testing.T) {
8 |
9 | }
10 |
11 | func TestChoosePieceLength(t *testing.T) {
12 | for i := uint(0); i < 63; i++ {
13 | var totalLength int64 = 1 << i
14 | testOnePieceLength(t, totalLength)
15 | testOnePieceLength(t, totalLength*3/2)
16 | }
17 | }
18 |
19 | func testOnePieceLength(t *testing.T, totalLength int64) {
20 | pieceLength := choosePieceLength(totalLength)
21 | nextPowerOfTwoLength := int64(roundUpToPowerOfTwo(uint64(pieceLength)))
22 | if pieceLength < MinimumPieceLength {
23 | t.Errorf("choosePieceLen(%v) = %v. < %v", totalLength, pieceLength, MinimumPieceLength)
24 | return
25 | }
26 | if pieceLength != nextPowerOfTwoLength {
27 | t.Errorf("choosePieceLen(%v) = %v. Not Power of Two %v", totalLength, pieceLength, nextPowerOfTwoLength)
28 | return
29 | }
30 | if totalLength >= MinimumPieceLength*TargetPieceCountMin {
31 | pieces := (totalLength + pieceLength - 1) / pieceLength
32 | if pieces < TargetPieceCountMin || pieces >= TargetPieceCountMax {
33 | t.Errorf("choosePieceLen(%v) = %v. Pieces: %v. expected %v <= pieces < %v", totalLength, pieceLength,
34 | pieces, TargetPieceCountMin, TargetPieceCountMax)
35 | return
36 | }
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/torrent/nat.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "net"
5 | )
6 |
7 | // protocol is either "udp" or "tcp"
8 | type NAT interface {
9 | GetExternalAddress() (addr net.IP, err error)
10 | AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error)
11 | DeletePortMapping(protocol string, externalPort, internalPort int) (err error)
12 | }
13 |
--------------------------------------------------------------------------------
/torrent/natpmp.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "fmt"
5 | "net"
6 |
7 | natpmp "github.com/jackpal/go-nat-pmp"
8 | )
9 |
10 | // Adapt the NAT-PMP protocol to the NAT interface
11 |
12 | // TODO:
13 | // + Register for changes to the external address.
14 | // + Re-register port mapping when router reboots.
15 | // + A mechanism for keeping a port mapping registered.
16 |
17 | type natPMPClient struct {
18 | client *natpmp.Client
19 | }
20 |
21 | func NewNatPMP(gateway net.IP) (nat NAT) {
22 | return &natPMPClient{natpmp.NewClient(gateway)}
23 | }
24 |
25 | func (n *natPMPClient) GetExternalAddress() (addr net.IP, err error) {
26 | response, err := n.client.GetExternalAddress()
27 | if err != nil {
28 | return
29 | }
30 | ip := response.ExternalIPAddress
31 | addr = net.IPv4(ip[0], ip[1], ip[2], ip[3])
32 | return
33 | }
34 |
35 | func (n *natPMPClient) AddPortMapping(protocol string, externalPort, internalPort int,
36 | description string, timeout int) (mappedExternalPort int, err error) {
37 | if timeout <= 0 {
38 | err = fmt.Errorf("timeout must not be <= 0")
39 | return
40 | }
41 | // Note order of port arguments is switched between our AddPortMapping and the client's AddPortMapping.
42 | response, err := n.client.AddPortMapping(protocol, internalPort, externalPort, timeout)
43 | if err != nil {
44 | return
45 | }
46 | mappedExternalPort = int(response.MappedExternalPort)
47 | return
48 | }
49 |
50 | func (n *natPMPClient) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
51 | // To destroy a mapping, send an add-port with
52 | // an internalPort of the internal port to destroy, an external port of zero and a time of zero.
53 | _, err = n.client.AddPortMapping(protocol, internalPort, 0, 0)
54 | return
55 | }
56 |
--------------------------------------------------------------------------------
/torrent/osfiles.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "errors"
5 | "os"
6 | "path"
7 | "strings"
8 | )
9 |
10 | // a torrent FileSystem that is backed by real OS files
11 | type osFileSystem struct {
12 | storePath string
13 | }
14 |
15 | // A torrent File that is backed by an OS file
16 | type osFile struct {
17 | filePath string
18 | }
19 |
20 | type OsFsProvider struct{}
21 |
22 | func (o OsFsProvider) NewFS(directory string) (fs FileSystem, err error) {
23 | return &osFileSystem{directory}, nil
24 | }
25 |
26 | func (o *osFileSystem) Open(name []string, length int64) (file File, err error) {
27 | // Clean the source path before appending to the storePath. This
28 | // ensures that source paths that start with ".." can't escape.
29 | cleanSrcPath := path.Clean("/" + path.Join(name...))[1:]
30 | fullPath := path.Join(o.storePath, cleanSrcPath)
31 | err = ensureDirectory(fullPath)
32 | if err != nil {
33 | return
34 | }
35 | osfile := &osFile{fullPath}
36 | file = osfile
37 | err = osfile.ensureExists(length)
38 | return
39 | }
40 |
41 | func (o *osFileSystem) Close() error {
42 | return nil
43 | }
44 |
45 | func (o *osFile) Close() (err error) {
46 | return
47 | }
48 |
49 | func ensureDirectory(fullPath string) (err error) {
50 | fullPath = path.Clean(fullPath)
51 | if !strings.HasPrefix(fullPath, "/") {
52 | // Transform into absolute path.
53 | var cwd string
54 | if cwd, err = os.Getwd(); err != nil {
55 | return
56 | }
57 | fullPath = cwd + "/" + fullPath
58 | }
59 | base, _ := path.Split(fullPath)
60 | if base == "" {
61 | panic("Programming error: could not find base directory for absolute path " + fullPath)
62 | }
63 | err = os.MkdirAll(base, 0755)
64 | return
65 | }
66 |
67 | func (o *osFile) ensureExists(length int64) (err error) {
68 | name := o.filePath
69 | st, err := os.Stat(name)
70 | if err != nil && os.IsNotExist(err) {
71 | f, err := os.Create(name)
72 | defer f.Close()
73 | if err != nil {
74 | return err
75 | }
76 | } else {
77 | if st.Size() == length {
78 | return
79 | }
80 | }
81 | err = os.Truncate(name, length)
82 | if err != nil {
83 | err = errors.New("Could not truncate file.")
84 | return
85 | }
86 | return
87 | }
88 |
89 | func (o *osFile) ReadAt(p []byte, off int64) (n int, err error) {
90 | file, err := os.OpenFile(o.filePath, os.O_RDWR, 0600)
91 | if err != nil {
92 | return
93 | }
94 | defer file.Close()
95 | return file.ReadAt(p, off)
96 | }
97 |
98 | func (o *osFile) WriteAt(p []byte, off int64) (n int, err error) {
99 | file, err := os.OpenFile(o.filePath, os.O_RDWR, 0600)
100 | if err != nil {
101 | return
102 | }
103 | defer file.Close()
104 | return file.WriteAt(p, off)
105 | }
106 |
--------------------------------------------------------------------------------
/torrent/peer.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "io"
7 | "log"
8 | "net"
9 | "time"
10 |
11 | bencode "github.com/jackpal/bencode-go"
12 | )
13 |
14 | const MAX_OUR_REQUESTS = 2
15 | const MAX_PEER_REQUESTS = 10
16 | const STANDARD_BLOCK_LENGTH = 16 * 1024
17 |
18 | type peerMessage struct {
19 | peer *peerState
20 | message []byte // nil means an error occurred
21 | }
22 |
23 | type peerState struct {
24 | address string
25 | id string
26 | writeChan chan []byte
27 | writeChan2 chan []byte
28 | lastReadTime time.Time
29 | have *Bitset // What the peer has told us it has
30 | conn net.Conn
31 | am_choking bool // this client is choking the peer
32 | am_interested bool // this client is interested in the peer
33 | peer_choking bool // peer is choking this client
34 | peer_interested bool // peer is interested in this client
35 | peer_requests map[uint64]bool
36 | our_requests map[uint64]time.Time // What we requested, when we requested it
37 |
38 | // This field tells if the peer can send a bitfield or not
39 | can_receive_bitfield bool
40 |
41 | theirExtensions map[string]int
42 |
43 | downloaded Accumulator
44 | }
45 |
46 | func (p *peerState) creditDownload(length int64) {
47 | p.downloaded.Add(time.Now(), length)
48 | }
49 |
50 | func (p *peerState) computeDownloadRate() {
51 | // Has the side effect of computing the download rate.
52 | p.downloaded.GetRate(time.Now())
53 | }
54 |
55 | func (p *peerState) DownloadBPS() float32 {
56 | return float32(p.downloaded.GetRateNoUpdate())
57 | }
58 |
59 | func queueingWriter(in, out chan []byte) {
60 | queue := make(map[int][]byte)
61 | head, tail := 0, 0
62 | L:
63 | for {
64 | if head == tail {
65 | select {
66 | case m, ok := <-in:
67 | if !ok {
68 | break L
69 | }
70 | queue[head] = m
71 | head++
72 | }
73 | } else {
74 | select {
75 | case m, ok := <-in:
76 | if !ok {
77 | break L
78 | }
79 | queue[head] = m
80 | head++
81 | case out <- queue[tail]:
82 | delete(queue, tail)
83 | tail++
84 | }
85 | }
86 | }
87 | // We throw away any messages waiting to be sent, including the
88 | // nil message that is automatically sent when the in channel is closed
89 | close(out)
90 | }
91 |
92 | func NewPeerState(conn net.Conn) *peerState {
93 | writeChan := make(chan []byte)
94 | writeChan2 := make(chan []byte)
95 | go queueingWriter(writeChan, writeChan2)
96 | return &peerState{writeChan: writeChan, writeChan2: writeChan2, conn: conn,
97 | am_choking: true, peer_choking: true,
98 | peer_requests: make(map[uint64]bool, MAX_PEER_REQUESTS),
99 | our_requests: make(map[uint64]time.Time, MAX_OUR_REQUESTS),
100 | can_receive_bitfield: true}
101 | }
102 |
103 | func (p *peerState) Close() {
104 | //log.Println("Closing connection to", p.address)
105 | p.conn.Close()
106 | // No need to close p.writeChan. Further writes to p.conn will just fail.
107 | }
108 |
109 | func (p *peerState) AddRequest(index, begin, length uint32) {
110 | if !p.am_choking && len(p.peer_requests) < MAX_PEER_REQUESTS {
111 | offset := (uint64(index) << 32) | uint64(begin)
112 | p.peer_requests[offset] = true
113 | }
114 | }
115 |
116 | func (p *peerState) CancelRequest(index, begin, length uint32) {
117 | offset := (uint64(index) << 32) | uint64(begin)
118 | if _, ok := p.peer_requests[offset]; ok {
119 | delete(p.peer_requests, offset)
120 | }
121 | }
122 |
123 | func (p *peerState) RemoveRequest() (index, begin, length uint32, ok bool) {
124 | for k, _ := range p.peer_requests {
125 | index, begin = uint32(k>>32), uint32(k)
126 | length = STANDARD_BLOCK_LENGTH
127 | ok = true
128 | return
129 | }
130 | return
131 | }
132 |
133 | func (p *peerState) SetChoke(choke bool) {
134 | if choke != p.am_choking {
135 | p.am_choking = choke
136 | b := byte(UNCHOKE)
137 | if choke {
138 | b = CHOKE
139 | p.peer_requests = make(map[uint64]bool, MAX_PEER_REQUESTS)
140 | }
141 | p.sendOneCharMessage(b)
142 | }
143 | }
144 |
145 | func (p *peerState) SetInterested(interested bool) {
146 | if interested != p.am_interested {
147 | // log.Println("SetInterested", interested, p.address)
148 | p.am_interested = interested
149 | b := byte(NOT_INTERESTED)
150 | if interested {
151 | b = INTERESTED
152 | }
153 | p.sendOneCharMessage(b)
154 | }
155 | }
156 |
157 | func (p *peerState) SendBitfield(bs *Bitset) {
158 | msg := make([]byte, len(bs.Bytes())+1)
159 | msg[0] = BITFIELD
160 | copy(msg[1:], bs.Bytes())
161 | p.sendMessage(msg)
162 | }
163 |
164 | func (p *peerState) SendExtensions(port uint16) {
165 |
166 | handshake := map[string]interface{}{
167 | "m": map[string]int{
168 | "ut_metadata": 1,
169 | },
170 | "v": "Taipei-Torrent dev",
171 | }
172 |
173 | var buf bytes.Buffer
174 | err := bencode.Marshal(&buf, handshake)
175 | if err != nil {
176 | //log.Println("Error when marshalling extension message")
177 | return
178 | }
179 |
180 | msg := make([]byte, 2+buf.Len())
181 | msg[0] = EXTENSION
182 | msg[1] = EXTENSION_HANDSHAKE
183 | copy(msg[2:], buf.Bytes())
184 |
185 | p.sendMessage(msg)
186 | }
187 |
188 | func (p *peerState) sendOneCharMessage(b byte) {
189 | // log.Println("ocm", b, p.address)
190 | p.sendMessage([]byte{b})
191 | }
192 |
193 | func (p *peerState) sendMessage(b []byte) {
194 | p.writeChan <- b
195 | }
196 |
197 | func (p *peerState) keepAlive(now time.Time) {
198 | p.sendMessage([]byte{})
199 | }
200 |
201 | // There's two goroutines per peer, one to read data from the peer, the other to
202 | // send data to the peer.
203 |
204 | func uint32ToBytes(buf []byte, n uint32) {
205 | buf[0] = byte(n >> 24)
206 | buf[1] = byte(n >> 16)
207 | buf[2] = byte(n >> 8)
208 | buf[3] = byte(n)
209 | }
210 |
211 | func writeNBOUint32(conn net.Conn, n uint32) (err error) {
212 | var buf []byte = make([]byte, 4)
213 | uint32ToBytes(buf, n)
214 | _, err = conn.Write(buf[0:])
215 | return
216 | }
217 |
218 | func bytesToUint32(buf []byte) uint32 {
219 | return (uint32(buf[0]) << 24) |
220 | (uint32(buf[1]) << 16) |
221 | (uint32(buf[2]) << 8) | uint32(buf[3])
222 | }
223 |
224 | func readNBOUint32(r io.Reader) (n uint32, err error) {
225 | var buf [4]byte
226 | _, err = io.ReadFull(r, buf[:])
227 | if err != nil {
228 | return
229 | }
230 | n = bytesToUint32(buf[0:])
231 | return
232 | }
233 |
234 | // This func is designed to be run as a goroutine. It
235 | // listens for messages on a channel and sends them to a peer.
236 |
237 | func (p *peerState) peerWriter(errorChan chan peerMessage) {
238 | // log.Println("Writing messages")
239 | var lastWriteTime time.Time
240 |
241 | for msg := range p.writeChan2 {
242 | now := time.Now()
243 | if len(msg) == 0 {
244 | // This is a keep-alive message.
245 | if now.Sub(lastWriteTime) < 2*time.Minute {
246 | // Don't need to send keep-alive because we have recently sent a
247 | // message to this peer.
248 | continue
249 | }
250 | // log.Stderr("Sending keep alive", p)
251 | }
252 | lastWriteTime = now
253 |
254 | // log.Println("Writing", uint32(len(msg)), p.conn.RemoteAddr())
255 | err := writeNBOUint32(p.conn, uint32(len(msg)))
256 | if err != nil {
257 | log.Println(err)
258 | break
259 | }
260 | _, err = p.conn.Write(msg)
261 | if err != nil {
262 | // log.Println("Failed to write a message", p.address, len(msg), msg, err)
263 | break
264 | }
265 | }
266 | // log.Println("peerWriter exiting")
267 | errorChan <- peerMessage{p, nil}
268 | }
269 |
270 | func readPeerMessage(r io.Reader, maxMessageSize int) (buf []byte, err error) {
271 | var n uint32
272 | n, err = readNBOUint32(r)
273 | if err != nil {
274 | return
275 | }
276 | if int(n) > maxMessageSize {
277 | // log.Println("Message size too large: ", n)
278 | err = fmt.Errorf("Message size too large: %d > %d", n, maxMessageSize)
279 | return
280 | }
281 |
282 | buf = make([]byte, n)
283 |
284 | _, err = io.ReadFull(r, buf)
285 | return
286 | }
287 |
288 | // This func is designed to be run as a goroutine. It
289 | // listens for messages from the peer and forwards them to a channel.
290 |
291 | func (p *peerState) peerReader(msgChan chan peerMessage) {
292 | // log.Println("Reading messages")
293 | for {
294 | buf, err := readPeerMessage(p.conn, 130*1024)
295 | if err != nil {
296 | break
297 | }
298 | msgChan <- peerMessage{p, buf}
299 | }
300 |
301 | msgChan <- peerMessage{p, nil}
302 | // log.Println("peerReader exiting")
303 | }
304 |
305 | func (p *peerState) sendMetadataRequest(piece int) {
306 | log.Printf("Sending metadata request for piece %d to %s\n", piece, p.address)
307 |
308 | m := map[string]int{
309 | "msg_type": METADATA_REQUEST,
310 | "piece": piece,
311 | }
312 |
313 | var raw bytes.Buffer
314 | err := bencode.Marshal(&raw, m)
315 | if err != nil {
316 | return
317 | }
318 |
319 | msg := make([]byte, raw.Len()+2)
320 | msg[0] = EXTENSION
321 | msg[1] = byte(p.theirExtensions["ut_metadata"])
322 | copy(msg[2:], raw.Bytes())
323 |
324 | p.sendMessage(msg)
325 | }
326 |
--------------------------------------------------------------------------------
/torrent/peer_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bytes"
5 | "errors"
6 | "io"
7 | "testing"
8 | )
9 |
10 | type readPeerMessageTest struct {
11 | name string
12 | maxSize int
13 | data []byte
14 | buf []byte
15 | err error
16 | }
17 |
18 | func errorsEqual(a, b error) bool {
19 | if a == nil && b == nil {
20 | return true
21 | }
22 | if a == nil || b == nil {
23 | return false
24 | }
25 | return a.Error() == b.Error()
26 | }
27 |
28 | func TestReadPeerMessage(t *testing.T) {
29 | tests := []readPeerMessageTest{
30 | {name: "eof", maxSize: 1024, data: []byte{}, buf: nil, err: io.EOF},
31 | {name: "unexpectedEof", maxSize: 1024, data: []byte{0}, buf: nil, err: io.ErrUnexpectedEOF},
32 | {name: "keepAlive", maxSize: 1024, data: []byte{0, 0, 0, 0}, buf: []byte{}, err: nil},
33 | {name: "normal", maxSize: 2, data: []byte{0, 0, 0, 2, 77, 78},
34 | buf: []byte{77, 78}, err: nil},
35 | {name: "tooLarge", maxSize: 255, data: []byte{0, 0, 1, 0}, buf: nil,
36 | err: errors.New("Message size too large: 256 > 255")},
37 | }
38 | for _, test := range tests {
39 | buffer := bytes.NewBuffer(test.data)
40 | buf, err := readPeerMessage(buffer, test.maxSize)
41 | if !errorsEqual(err, test.err) {
42 | t.Errorf("Test %v readPeerMessage(%v,%v) = %v,%v. expected err: %v",
43 | test.name, test.data, test.maxSize,
44 | buf, err, test.err)
45 | } else if !bytes.Equal(buf, test.buf) {
46 | t.Errorf("Test %v readPeerMessage(%v,%v) = %v,%v. expected buf: %v",
47 | test.name, test.data, test.maxSize,
48 | buf, err,
49 | test.buf)
50 | }
51 | if buf != nil && err == nil {
52 | remainingBytes := buffer.Len()
53 | if remainingBytes != 0 {
54 | t.Errorf("Test %v readPeerMessage(%v,%v) %d bytes remaining in input buffer.",
55 | test.name, test.data, test.maxSize,
56 | remainingBytes)
57 | }
58 | }
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/torrent/pieces.go:
--------------------------------------------------------------------------------
1 | // Compute missing pieces for a torrent.
2 | package torrent
3 |
4 | import (
5 | "crypto/sha1"
6 | "errors"
7 | "fmt"
8 | "runtime"
9 | )
10 |
11 | func checkPieces(fs FileStore, totalLength int64, m *MetaInfo) (good, bad int, goodBits *Bitset, err error) {
12 | pieceLength := m.Info.PieceLength
13 | numPieces := int((totalLength + pieceLength - 1) / pieceLength)
14 | goodBits = NewBitset(int(numPieces))
15 | ref := m.Info.Pieces
16 | if len(ref) != numPieces*sha1.Size {
17 | err = errors.New("Incorrect Info.Pieces length")
18 | return
19 | }
20 | currentSums, err := computeSums(fs, totalLength, m.Info.PieceLength)
21 | if err != nil {
22 | return
23 | }
24 | for i := 0; i < numPieces; i++ {
25 | base := i * sha1.Size
26 | end := base + sha1.Size
27 | if checkEqual([]byte(ref[base:end]), currentSums[base:end]) {
28 | good++
29 | goodBits.Set(int(i))
30 | } else {
31 | bad++
32 | }
33 | }
34 | return
35 | }
36 |
37 | func checkEqual(ref, current []byte) bool {
38 | for i := 0; i < len(current); i++ {
39 | if ref[i] != current[i] {
40 | return false
41 | }
42 | }
43 | return true
44 | }
45 |
46 | type chunk struct {
47 | i int64
48 | data []byte
49 | }
50 |
51 | // computeSums reads the file content and computes the SHA1 hash for each
52 | // piece. Spawns parallel goroutines to compute the hashes, since each
53 | // computation takes ~30ms.
54 | func computeSums(fs FileStore, totalLength int64, pieceLength int64) (sums []byte, err error) {
55 | // Calculate the SHA1 hash for each piece in parallel goroutines.
56 | hashes := make(chan chunk)
57 | results := make(chan chunk, 3)
58 | for i := 0; i < runtime.GOMAXPROCS(0); i++ {
59 | go hashPiece(hashes, results)
60 | }
61 |
62 | // Read file content and send to "pieces", keeping order.
63 | numPieces := (totalLength + pieceLength - 1) / pieceLength
64 | go func() {
65 | for i := int64(0); i < numPieces; i++ {
66 | piece := make([]byte, pieceLength, pieceLength)
67 | if i == numPieces-1 {
68 | piece = piece[0 : totalLength-i*pieceLength]
69 | }
70 | // Ignore errors.
71 | fs.ReadAt(piece, i*pieceLength)
72 | hashes <- chunk{i: i, data: piece}
73 | }
74 | close(hashes)
75 | }()
76 |
77 | // Merge back the results.
78 | sums = make([]byte, sha1.Size*numPieces)
79 | for i := int64(0); i < numPieces; i++ {
80 | h := <-results
81 | copy(sums[h.i*sha1.Size:], h.data)
82 | }
83 | return
84 | }
85 |
86 | func hashPiece(h chan chunk, result chan chunk) {
87 | hasher := sha1.New()
88 | for piece := range h {
89 | hasher.Reset()
90 | _, err := hasher.Write(piece.data)
91 | if err != nil {
92 | result <- chunk{piece.i, nil}
93 | } else {
94 | result <- chunk{piece.i, hasher.Sum(nil)}
95 | }
96 | }
97 | }
98 |
99 | func checkPiece(piece []byte, m *MetaInfo, pieceIndex int) (good bool, err error) {
100 | ref := m.Info.Pieces
101 | var currentSum []byte
102 | currentSum, err = computePieceSum(piece)
103 | if err != nil {
104 | return
105 | }
106 | base := pieceIndex * sha1.Size
107 | end := base + sha1.Size
108 | refSha1 := []byte(ref[base:end])
109 | good = checkEqual(refSha1, currentSum)
110 | if !good {
111 | err = fmt.Errorf("reference sha1: %v != piece sha1: %v", refSha1, currentSum)
112 | }
113 | return
114 | }
115 |
116 | func computePieceSum(piece []byte) (sum []byte, err error) {
117 | hasher := sha1.New()
118 |
119 | _, err = hasher.Write(piece)
120 | if err != nil {
121 | return
122 | }
123 | sum = hasher.Sum(nil)
124 | return
125 | }
126 |
--------------------------------------------------------------------------------
/torrent/pieces_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "crypto/sha1"
5 | "fmt"
6 | "testing"
7 | )
8 |
9 | func TestComputeSums(t *testing.T) {
10 | pieceLen := int64(25)
11 | for _, testFile := range tests {
12 | fs, err := mkFileStore(testFile)
13 | if err != nil {
14 | t.Fatal(err)
15 | }
16 | sums, err := computeSums(fs, testFile.fileLen, pieceLen)
17 | if err != nil {
18 | t.Fatal(err)
19 | }
20 | if len(sums) < sha1.Size {
21 | t.Errorf("computeSums got len %d, wanted %d", len(sums), sha1.Size)
22 | }
23 | a := fmt.Sprintf("%X", sums[:sha1.Size])
24 | b := fmt.Sprintf("%X", sums[sha1.Size:sha1.Size*2])
25 | if a != testFile.hashPieceA {
26 | t.Errorf("Piece A Wanted %v, got %v\n", testFile.hashPieceA, a)
27 | }
28 | if b != testFile.hashPieceB {
29 | t.Errorf("Piece B Wanted %v, got %v\n", testFile.hashPieceB, b)
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/torrent/proxy.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "golang.org/x/net/proxy"
5 | "net"
6 | "net/http"
7 | )
8 |
9 | func proxyNetDial(dialer proxy.Dialer, network, address string) (net.Conn, error) {
10 | if dialer != nil {
11 | return dialer.Dial(network, address)
12 | }
13 | return net.Dial(network, address)
14 | }
15 |
16 | func proxyHttpGet(dialer proxy.Dialer, url string) (r *http.Response, e error) {
17 | return proxyHttpClient(dialer).Get(url)
18 | }
19 |
20 | func proxyHttpClient(dialer proxy.Dialer) (client *http.Client) {
21 | if dialer == nil {
22 | dialer = proxy.Direct
23 | }
24 | tr := &http.Transport{Dial: dialer.Dial}
25 | client = &http.Client{Transport: tr}
26 | return
27 | }
28 |
--------------------------------------------------------------------------------
/torrent/ramfiles.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | type ramFsProvider struct{}
4 |
5 | func (o ramFsProvider) NewFS(directory string) (fs FileSystem, err error) {
6 | return &ramFileSystem{}, nil
7 | }
8 |
9 | // A RAM file system.
10 | type ramFileSystem struct {
11 | }
12 |
13 | type ramFile []byte
14 |
15 | func NewRAMFileSystem() (fs FileSystem, err error) {
16 | fs = &ramFileSystem{}
17 | return
18 | }
19 |
20 | func (r *ramFileSystem) Open(name []string, length int64) (file File, err error) {
21 | file = ramFile(make([]byte, int(length)))
22 | return
23 | }
24 |
25 | func (r *ramFileSystem) Close() error {
26 | return nil
27 | }
28 |
29 | func (r ramFile) ReadAt(p []byte, off int64) (n int, err error) {
30 | n = copy(p, []byte(r)[off:])
31 | return
32 | }
33 |
34 | func (r ramFile) WriteAt(p []byte, off int64) (n int, err error) {
35 | n = copy([]byte(r)[off:], p)
36 | return
37 | }
38 |
39 | func (r ramFile) Close() (err error) {
40 | return
41 | }
42 |
--------------------------------------------------------------------------------
/torrent/sftp.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "errors"
5 | "github.com/pkg/sftp"
6 | "golang.org/x/crypto/ssh"
7 | "golang.org/x/crypto/ssh/agent"
8 | "log"
9 | "net"
10 | "os"
11 | pathpkg "path"
12 | "path/filepath"
13 | "strings"
14 | )
15 |
16 | type SftpFsProvider struct {
17 | Server string
18 | Username string
19 | Password string
20 | ServerPath string
21 | }
22 |
23 | //Connection string: username:password@example.com:8042/over/there/
24 | func NewSftpFsProvider(connection string) SftpFsProvider {
25 | connSA := strings.Split(connection, "@")
26 | authSA := strings.Split(connSA[0], ":")
27 | serverSA := strings.SplitN(connSA[1], "/", 2)
28 | path := "/"
29 | if len(serverSA) == 2 { // {example.com:8042, over/there/}
30 | path += serverSA[1]
31 | }
32 | sp := SftpFsProvider{
33 | Username: authSA[0],
34 | Password: authSA[1],
35 | Server: serverSA[0],
36 | ServerPath: path,
37 | }
38 | return sp
39 | }
40 |
41 | func (o SftpFsProvider) NewFS(directory string) (fs FileSystem, err error) {
42 | sftpfs := &SftpFileSystem{
43 | sp: o,
44 | torrentDirectory: directory,
45 | closed: true,
46 | }
47 |
48 | return sftpfs, sftpfs.Connect()
49 | }
50 |
51 | type SftpFileSystem struct {
52 | sp SftpFsProvider
53 | torrentDirectory string
54 | sftpClient *sftp.Client
55 | sshClient *ssh.Client
56 | closed bool //false normally, true if closed
57 | }
58 |
59 | func (sfs *SftpFileSystem) Connect() error {
60 | var auths []ssh.AuthMethod
61 | if aconn, err := net.Dial("unix", os.Getenv("SSH_AUTH_SOCK")); err == nil {
62 | auths = append(auths, ssh.PublicKeysCallback(agent.NewClient(aconn).Signers))
63 | }
64 | if len(sfs.sp.Password) != 0 {
65 | auths = append(auths, ssh.Password(sfs.sp.Password))
66 | }
67 |
68 | config := ssh.ClientConfig{
69 | User: sfs.sp.Username,
70 | Auth: auths,
71 | }
72 | var err error
73 | sfs.sshClient, err = ssh.Dial("tcp", sfs.sp.Server, &config)
74 | if err != nil {
75 | log.Printf("unable to connect to [%s]: %v", sfs.sp.Server, err)
76 | return err
77 | }
78 |
79 | sfs.sftpClient, err = sftp.NewClient(sfs.sshClient)
80 | if err != nil {
81 | log.Printf("unable to start sftp subsytem: %v", err)
82 | return err
83 | }
84 | return nil
85 | }
86 |
87 | func (sfs *SftpFileSystem) translate(name []string) string {
88 | path := pathpkg.Clean(sfs.torrentDirectory + "/" + pathpkg.Join(name...))
89 | return pathpkg.Clean(filepath.Join(sfs.sp.ServerPath, path))
90 | }
91 |
92 | func (sfs *SftpFileSystem) Open(name []string, length int64) (File, error) {
93 | fullPath := sfs.translate(name)
94 | err := sfs.ensureDirectory(fullPath)
95 | if err != nil {
96 | log.Println("Couldn't ensure directory:", fullPath)
97 | return nil, err
98 | }
99 |
100 | file, err := sfs.sftpClient.OpenFile(fullPath, os.O_RDWR)
101 | if err != nil {
102 | file, err = sfs.sftpClient.Create(fullPath)
103 | if err != nil {
104 | log.Println("Couldn't create file:", fullPath, "error:", err)
105 | return nil, err
106 | }
107 | }
108 | retVal := &SftpFile{file}
109 | err = file.Truncate(length)
110 | return retVal, err
111 | }
112 |
113 | func (sfs *SftpFileSystem) Close() (err error) {
114 | sfs.closed = true
115 | err = sfs.sftpClient.Close()
116 | sfs.sshClient.Close()
117 | if err != nil {
118 | log.Println("Error closing sftp client:", err)
119 | }
120 | return
121 | }
122 |
123 | func (sfs *SftpFileSystem) ensureDirectory(fullPath string) error {
124 | fullPath = filepath.ToSlash(fullPath)
125 | fullPath = pathpkg.Clean(fullPath)
126 | path := strings.Split(fullPath, "/")
127 | path = path[:len(path)-1] //remove filename
128 |
129 | total := ""
130 | for _, str := range path {
131 | total += str + "/"
132 | sfs.sftpClient.Mkdir(total)
133 | //We're not too concerned with if Mkdir gave an error, since it could just be that the
134 | //directory already exists. And if not, then the final Stat call will error out anyway.
135 | }
136 | fi, err := sfs.sftpClient.Lstat(total)
137 | if err != nil {
138 | return err
139 | }
140 |
141 | if fi.IsDir() {
142 | return nil
143 | }
144 |
145 | return errors.New("Part of path isn't a directory! path=" + total)
146 | }
147 |
148 | type SftpFile struct {
149 | file *sftp.File
150 | }
151 |
152 | func (sff *SftpFile) ReadAt(p []byte, off int64) (n int, err error) {
153 | sff.file.Seek(off, os.SEEK_SET)
154 | return sff.file.Read(p)
155 | }
156 |
157 | func (sff *SftpFile) WriteAt(p []byte, off int64) (n int, err error) {
158 | sff.file.Seek(off, os.SEEK_SET)
159 | return sff.file.Write(p)
160 | }
161 |
162 | func (sff *SftpFile) Close() error {
163 | return sff.file.Close()
164 | }
165 |
--------------------------------------------------------------------------------
/torrent/torrent.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bytes"
5 | "crypto/sha1"
6 | "encoding/hex"
7 | "errors"
8 | "fmt"
9 | "io"
10 | "io/ioutil"
11 | "log"
12 | "math"
13 | "math/rand"
14 | "net"
15 | "os"
16 | "path"
17 | "path/filepath"
18 | "strconv"
19 | "strings"
20 | "time"
21 |
22 | bencode "github.com/jackpal/bencode-go"
23 | "github.com/nictuku/dht"
24 | "github.com/nictuku/nettools"
25 | )
26 |
27 | const (
28 | MAX_NUM_PEERS = 60
29 | TARGET_NUM_PEERS = 15
30 | )
31 |
32 | // BitTorrent message types. Sources:
33 | // http://bittorrent.org/beps/bep_0003.html
34 | // http://wiki.theory.org/BitTorrentSpecification
35 | const (
36 | CHOKE = iota
37 | UNCHOKE
38 | INTERESTED
39 | NOT_INTERESTED
40 | HAVE
41 | BITFIELD
42 | REQUEST
43 | PIECE
44 | CANCEL
45 | PORT // Not implemented. For DHT support.
46 | EXTENSION = 20
47 | )
48 |
49 | const (
50 | EXTENSION_HANDSHAKE = iota
51 | )
52 |
53 | const (
54 | METADATA_REQUEST = iota
55 | METADATA_DATA
56 | METADATA_REJECT
57 | )
58 |
59 | func peerID() string {
60 | r := rand.New(rand.NewSource(time.Now().UnixNano()))
61 | sid := "-tt" + strconv.Itoa(os.Getpid()) + "_" + strconv.FormatInt(r.Int63(), 10)
62 | return sid[0:20]
63 | }
64 |
65 | var kBitTorrentHeader = []byte{'\x13', 'B', 'i', 't', 'T', 'o', 'r',
66 | 'r', 'e', 'n', 't', ' ', 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l'}
67 |
68 | type ActivePiece struct {
69 | downloaderCount []int // -1 means piece is already downloaded
70 | buffer []byte
71 | }
72 |
73 | func (a *ActivePiece) chooseBlockToDownload(endgame bool) (index int) {
74 | if endgame {
75 | return a.chooseBlockToDownloadEndgame()
76 | }
77 | return a.chooseBlockToDownloadNormal()
78 | }
79 |
80 | func (a *ActivePiece) chooseBlockToDownloadNormal() (index int) {
81 | for i, v := range a.downloaderCount {
82 | if v == 0 {
83 | a.downloaderCount[i]++
84 | return i
85 | }
86 | }
87 | return -1
88 | }
89 |
90 | func (a *ActivePiece) chooseBlockToDownloadEndgame() (index int) {
91 | index, minCount := -1, -1
92 | for i, v := range a.downloaderCount {
93 | if v >= 0 && (minCount == -1 || minCount > v) {
94 | index, minCount = i, v
95 | }
96 | }
97 | if index > -1 {
98 | a.downloaderCount[index]++
99 | }
100 | return
101 | }
102 |
103 | func (a *ActivePiece) recordBlock(index int) (requestCount int) {
104 | requestCount = a.downloaderCount[index]
105 | a.downloaderCount[index] = -1
106 | return
107 | }
108 |
109 | func (a *ActivePiece) isComplete() bool {
110 | for _, v := range a.downloaderCount {
111 | if v != -1 {
112 | return false
113 | }
114 | }
115 | return true
116 | }
117 |
118 | type TorrentSession struct {
119 | flags *TorrentFlags
120 | M *MetaInfo
121 | Session SessionInfo
122 | ti *TrackerResponse
123 | torrentHeader []byte
124 | fileStore FileStore
125 | trackerReportChan chan ClientStatusReport
126 | trackerInfoChan chan *TrackerResponse
127 | hintNewPeerChan chan string
128 | addPeerChan chan *BtConn
129 | peers map[string]*peerState
130 | peerMessageChan chan peerMessage
131 | pieceSet *Bitset // The pieces we have
132 | totalPieces int
133 | totalSize int64
134 | lastPieceLength int
135 | goodPieces int
136 | activePieces map[int]*ActivePiece
137 | maxActivePieces int
138 | heartbeat chan bool
139 | dht *dht.DHT
140 | quit chan bool
141 | ended chan bool
142 | trackerLessMode bool
143 | torrentFile string
144 | chokePolicy ChokePolicy
145 | chokePolicyHeartbeat <-chan time.Time
146 | execOnSeedingDone bool
147 | }
148 |
149 | func NewTorrentSession(flags *TorrentFlags, torrent string, listenPort uint16) (t *TorrentSession, err error) {
150 | ts := &TorrentSession{
151 | flags: flags,
152 | peers: make(map[string]*peerState),
153 | peerMessageChan: make(chan peerMessage),
154 | activePieces: make(map[int]*ActivePiece),
155 | quit: make(chan bool),
156 | ended: make(chan bool),
157 | torrentFile: torrent,
158 | chokePolicy: &ClassicChokePolicy{},
159 | chokePolicyHeartbeat: time.Tick(10 * time.Second),
160 | execOnSeedingDone: len(flags.ExecOnSeeding) == 0,
161 | }
162 | fromMagnet := strings.HasPrefix(torrent, "magnet:")
163 | ts.M, err = GetMetaInfo(flags.Dial, torrent)
164 | if err != nil {
165 | return
166 | }
167 |
168 | if ts.M.Announce == "" && len(ts.M.AnnounceList) == 0 {
169 | ts.trackerLessMode = true
170 | } else {
171 | ts.trackerLessMode = ts.flags.TrackerlessMode
172 | }
173 |
174 | dhtAllowed := flags.UseDHT && ts.M.Info.Private == 0
175 | if flags.UseDHT && !dhtAllowed {
176 | log.Println("[", ts.M.Info.Name, "] Can't use DHT because torrent is marked Private")
177 | }
178 |
179 | ts.Session = SessionInfo{
180 | PeerID: peerID(),
181 | Port: listenPort,
182 | UseDHT: dhtAllowed,
183 | FromMagnet: fromMagnet,
184 | HaveTorrent: false,
185 | ME: &MetaDataExchange{},
186 | OurExtensions: map[int]string{1: "ut_metadata"},
187 | OurAddresses: map[string]bool{"127.0.0.1:" + strconv.Itoa(int(listenPort)): true},
188 | }
189 | ts.setHeader()
190 |
191 | if !ts.Session.FromMagnet {
192 | err = ts.load()
193 | }
194 | return ts, err
195 | }
196 |
197 | func (ts *TorrentSession) reload(metadata string) (err error) {
198 | var info InfoDict
199 | err = bencode.Unmarshal(bytes.NewReader([]byte(metadata)), &info)
200 | if err != nil {
201 | log.Println("[", ts.M.Info.Name, "] Error when reloading torrent: ", err)
202 | return
203 | }
204 |
205 | ts.M.Info = info
206 | err = ts.load()
207 |
208 | if ts.flags.Cacher != nil && ts.fileStore != nil {
209 | ts.fileStore = ts.flags.Cacher.NewCache(ts.M.InfoHash, ts.totalPieces, ts.M.Info.PieceLength, ts.totalSize, ts.fileStore)
210 | }
211 | return
212 | }
213 |
214 | func (ts *TorrentSession) load() (err error) {
215 | log.Printf("[ %s ] Tracker: %v, Comment: %v, InfoHash: %x, Encoding: %v, Private: %v",
216 | ts.M.Info.Name, ts.M.AnnounceList, ts.M.Comment, ts.M.InfoHash, ts.M.Encoding, ts.M.Info.Private)
217 | if e := ts.M.Encoding; e != "" && e != "UTF-8" {
218 | err = fmt.Errorf("Unknown encoding %v", e)
219 | return
220 | }
221 |
222 | ext := ".torrent"
223 | dir := ts.flags.FileDir
224 | if len(ts.M.Info.Files) != 0 {
225 | torrentName := ts.M.Info.Name
226 | if torrentName == "" {
227 | torrentName = filepath.Base(ts.torrentFile)
228 | }
229 | // canonicalize the torrent path and make sure it doesn't start with ".."
230 | torrentName = path.Clean("/" + torrentName)
231 | dir += torrentName
232 | //Remove ".torrent" extension if present
233 | if strings.HasSuffix(strings.ToLower(dir), ext) {
234 | dir = dir[:len(dir)-len(ext)]
235 | }
236 | }
237 |
238 | var fileSystem FileSystem
239 | fileSystem, err = ts.flags.FileSystemProvider.NewFS(dir)
240 | if err != nil {
241 | return
242 | }
243 |
244 | ts.fileStore, ts.totalSize, err = NewFileStore(&ts.M.Info, fileSystem)
245 | if err != nil {
246 | return
247 | }
248 |
249 | if ts.M.Info.PieceLength == 0 {
250 | err = fmt.Errorf("Bad PieceLength: %v", ts.M.Info.PieceLength)
251 | return
252 | }
253 |
254 | ts.totalPieces = int(ts.totalSize / ts.M.Info.PieceLength)
255 | ts.lastPieceLength = int(ts.totalSize % ts.M.Info.PieceLength)
256 | if ts.lastPieceLength == 0 { // last piece is a full piece
257 | ts.lastPieceLength = int(ts.M.Info.PieceLength)
258 | } else {
259 | ts.totalPieces++
260 | }
261 |
262 | if ts.flags.MemoryPerTorrent < 0 {
263 | ts.maxActivePieces = 2147483640
264 | log.Printf("[ %s ] Max Active Pieces set to Unlimited\n", ts.M.Info.Name)
265 | } else {
266 | ts.maxActivePieces = int(int64(ts.flags.MemoryPerTorrent*1024*1024) / ts.M.Info.PieceLength)
267 | if ts.maxActivePieces == 0 {
268 | ts.maxActivePieces++
269 | }
270 |
271 | log.Printf("[ %s ] Max Active Pieces set to %v\n", ts.M.Info.Name, ts.maxActivePieces)
272 | }
273 |
274 | ts.goodPieces = 0
275 | if ts.flags.InitialCheck {
276 | start := time.Now()
277 | ts.goodPieces, _, ts.pieceSet, err = checkPieces(ts.fileStore, ts.totalSize, ts.M)
278 | end := time.Now()
279 | log.Printf("[ %s ] Computed missing pieces (%.2f seconds)\n", ts.M.Info.Name, end.Sub(start).Seconds())
280 | if err != nil {
281 | return
282 | }
283 | } else if ts.flags.QuickResume {
284 | resumeFilePath := "./" + hex.EncodeToString([]byte(ts.M.InfoHash)) + "-haveBitset"
285 | if resumeFile, err := os.Open(resumeFilePath); err == nil {
286 | rfstat, _ := resumeFile.Stat()
287 | tBA := make([]byte, 2*rfstat.Size())
288 | count, _ := resumeFile.Read(tBA)
289 | ts.pieceSet = NewBitsetFromBytes(ts.totalPieces, tBA[:count])
290 | if ts.pieceSet == nil {
291 | return fmt.Errorf("[ %s ] Malformed resume data: %v", ts.M.Info.Name, resumeFilePath)
292 | }
293 |
294 | for i := 0; i < ts.totalPieces; i++ {
295 | if ts.pieceSet.IsSet(i) {
296 | ts.goodPieces++
297 | }
298 | }
299 | log.Printf("[ %s ] Got piece list from haveBitset file.\n", ts.M.Info.Name)
300 | } else {
301 | log.Printf("[ %s ] Couldn't open haveBitset file: %v", ts.M.Info.Name, err)
302 | }
303 | }
304 |
305 | if ts.pieceSet == nil { //Blank slate it is then.
306 | ts.pieceSet = NewBitset(ts.totalPieces)
307 | log.Printf("[ %s ] Starting from scratch.\n", ts.M.Info.Name)
308 | }
309 |
310 | bad := ts.totalPieces - ts.goodPieces
311 | left := uint64(bad) * uint64(ts.M.Info.PieceLength)
312 | if !ts.pieceSet.IsSet(ts.totalPieces - 1) {
313 | left = left - uint64(ts.M.Info.PieceLength) + uint64(ts.lastPieceLength)
314 | }
315 | ts.Session.Left = left
316 |
317 | log.Println("[", ts.M.Info.Name, "] Good pieces:", ts.goodPieces, "Bad pieces:", bad, "Bytes left:", left)
318 |
319 | // Enlarge any existing peers piece maps
320 | for _, p := range ts.peers {
321 | if p.have.n != ts.totalPieces {
322 | if p.have.n != 0 {
323 | panic("Expected p.have.n == 0")
324 | }
325 | p.have = NewBitset(ts.totalPieces)
326 | }
327 | }
328 |
329 | ts.Session.HaveTorrent = true
330 | return
331 | }
332 |
333 | func (ts *TorrentSession) pieceLength(piece int) int {
334 | if piece < ts.totalPieces-1 {
335 | return int(ts.M.Info.PieceLength)
336 | }
337 | return ts.lastPieceLength
338 | }
339 |
340 | func (ts *TorrentSession) fetchTrackerInfo(event string) {
341 | m, si := ts.M, ts.Session
342 | log.Println("[", ts.M.Info.Name, "] Stats: Uploaded", si.Uploaded, "Downloaded", si.Downloaded, "Left", si.Left)
343 | ts.trackerReportChan <- ClientStatusReport{
344 | event, m.InfoHash, si.PeerID, si.Port, si.Uploaded, si.Downloaded, si.Left}
345 | }
346 |
347 | func (ts *TorrentSession) setHeader() {
348 | header := make([]byte, 68)
349 | copy(header, kBitTorrentHeader[0:])
350 | if ts.Session.UseDHT {
351 | header[27] = header[27] | 0x01
352 | }
353 | // Support Extension Protocol (BEP-0010)
354 | header[25] |= 0x10
355 | copy(header[28:48], []byte(ts.M.InfoHash))
356 | copy(header[48:68], []byte(ts.Session.PeerID))
357 | ts.torrentHeader = header
358 | }
359 |
360 | func (ts *TorrentSession) Header() (header []byte) {
361 | return ts.torrentHeader
362 | }
363 |
364 | // Try to connect if the peer is not already in our peers.
365 | // Can be called from any goroutine.
366 | func (ts *TorrentSession) HintNewPeer(peer string) {
367 | if len(ts.hintNewPeerChan) < cap(ts.hintNewPeerChan) { //We don't want to block the main loop because a single torrent is having problems
368 | select {
369 | case ts.hintNewPeerChan <- peer:
370 | case <-ts.ended:
371 | }
372 | } else {
373 | // log.Println("[", ts.M.Info.Name, "] New peer hint failed, because DoTorrent() hasn't been clearing out the channel.")
374 | }
375 | }
376 |
377 | func (ts *TorrentSession) tryNewPeer(peer string) bool {
378 | if (ts.Session.HaveTorrent || ts.Session.FromMagnet) && len(ts.peers) < MAX_NUM_PEERS {
379 | if _, ok := ts.Session.OurAddresses[peer]; !ok {
380 | if _, ok := ts.peers[peer]; !ok {
381 | go ts.connectToPeer(peer)
382 | return true
383 | }
384 | } else {
385 | // log.Println("[", ts.M.Info.Name, "] New peer hint rejected, because it's one of our addresses (", peer, ")")
386 | }
387 | }
388 | return false
389 | }
390 |
391 | func (ts *TorrentSession) connectToPeer(peer string) {
392 | conn, err := proxyNetDial(ts.flags.Dial, "tcp", peer)
393 | if err != nil {
394 | // log.Println("[", ts.M.Info.Name, "] Failed to connect to", peer, err)
395 | return
396 | }
397 |
398 | _, err = conn.Write(ts.Header())
399 | if err != nil {
400 | log.Println("[", ts.M.Info.Name, "] Failed to send header to", peer, err)
401 | return
402 | }
403 |
404 | theirheader, err := readHeader(conn)
405 | if err != nil {
406 | return
407 | }
408 |
409 | peersInfoHash := string(theirheader[8:28])
410 | id := string(theirheader[28:48])
411 |
412 | btconn := &BtConn{
413 | header: theirheader,
414 | Infohash: peersInfoHash,
415 | id: id,
416 | conn: conn,
417 | }
418 | // log.Println("[", ts.M.Info.Name, "] Connected to", peer)
419 | ts.AddPeer(btconn)
420 | }
421 |
422 | func (ts *TorrentSession) AcceptNewPeer(btconn *BtConn) {
423 | _, err := btconn.conn.Write(ts.Header())
424 | if err != nil {
425 | return
426 | }
427 | ts.AddPeer(btconn)
428 | }
429 |
430 | // Can be called from any goroutine
431 | func (ts *TorrentSession) AddPeer(btconn *BtConn) {
432 | if len(ts.addPeerChan) < cap(ts.addPeerChan) { //We don't want to block the main loop because a single torrent is having problems
433 | select {
434 | case ts.addPeerChan <- btconn:
435 | case <-ts.ended:
436 | }
437 | } else {
438 | // log.Println("[", ts.M.Info.Name, "] Add peer failed, because DoTorrent() hasn't been clearing out the channel.")
439 | btconn.conn.Close()
440 | }
441 | }
442 |
443 | func (ts *TorrentSession) addPeerImp(btconn *BtConn) {
444 | if !ts.Session.HaveTorrent && !ts.Session.FromMagnet {
445 | log.Println("[", ts.M.Info.Name, "] Rejecting peer because we don't have a torrent yet")
446 | btconn.conn.Close()
447 | return
448 | }
449 |
450 | peer := btconn.conn.RemoteAddr().String()
451 |
452 | if btconn.id == ts.Session.PeerID {
453 | log.Println("[", ts.M.Info.Name, "] Rejecting self-connection:", peer, "<->", btconn.conn.LocalAddr())
454 | ts.Session.OurAddresses[btconn.conn.LocalAddr().String()] = true
455 | ts.Session.OurAddresses[peer] = true
456 | btconn.conn.Close()
457 | return
458 | }
459 |
460 | for _, p := range ts.peers {
461 | if p.id == btconn.id {
462 | log.Println("[", ts.M.Info.Name, "] Rejecting peer because already have a peer with the same id")
463 | btconn.conn.Close()
464 | return
465 | }
466 | }
467 |
468 | // log.Println("[", ts.M.Info.Name, "] Adding peer", peer)
469 | if len(ts.peers) >= MAX_NUM_PEERS {
470 | log.Println("[", ts.M.Info.Name, "] We have enough peers. Rejecting additional peer", peer)
471 | btconn.conn.Close()
472 | return
473 | }
474 |
475 | theirheader := btconn.header
476 |
477 | if ts.Session.UseDHT {
478 | // If 128, then it supports DHT.
479 | if int(theirheader[7])&0x01 == 0x01 {
480 | // It's OK if we know this node already. The DHT engine will
481 | // ignore it accordingly.
482 | go ts.dht.AddNode(peer)
483 | }
484 | }
485 |
486 | ps := NewPeerState(btconn.conn)
487 | ps.address = peer
488 | ps.id = btconn.id
489 |
490 | // By default, a peer has no pieces. If it has pieces, it should send
491 | // a BITFIELD message as a first message
492 | // If the torrent has not been received yet, ts.totalPieces will be 0, and
493 | // the "have" map will have to be enlarged later when ts.totalPieces is
494 | // learned.
495 |
496 | ps.have = NewBitset(ts.totalPieces)
497 |
498 | ts.peers[peer] = ps
499 | go ps.peerWriter(ts.peerMessageChan)
500 | go ps.peerReader(ts.peerMessageChan)
501 |
502 | if int(theirheader[5])&0x10 == 0x10 {
503 | ps.SendExtensions(ts.Session.Port)
504 | } else if ts.pieceSet != nil {
505 | ps.SendBitfield(ts.pieceSet)
506 | }
507 | }
508 |
509 | func (ts *TorrentSession) ClosePeer(peer *peerState) {
510 | if ts.Session.ME != nil && !ts.Session.ME.Transferring {
511 | ts.Session.ME.Transferring = false
512 | }
513 |
514 | //log.Println("[", ts.M.Info.Name, "] Closing peer", peer.address)
515 | _ = ts.removeRequests(peer)
516 | peer.Close()
517 | delete(ts.peers, peer.address)
518 | }
519 |
520 | func (ts *TorrentSession) deadlockDetector() {
521 | // Wait for a heartbeat before we start deadlock detection.
522 | // This handle the case where it takes a long time to find
523 | // a tracker.
524 | <-ts.heartbeat
525 | lastHeartbeat := time.Now()
526 | for {
527 | select {
528 | case <-ts.heartbeat:
529 | lastHeartbeat = time.Now()
530 | case <-time.After(15 * time.Second):
531 | age := time.Now().Sub(lastHeartbeat)
532 | log.Println("[", ts.M.Info.Name, "] Starvation or deadlock of main thread detected. Look in the stack dump for what DoTorrent() is currently doing")
533 | log.Println("[", ts.M.Info.Name, "] Last heartbeat", age.Seconds(), "seconds ago")
534 | panic("[" + ts.M.Info.Name + "] Killed by deadlock detector")
535 | }
536 | }
537 | }
538 |
539 | func (ts *TorrentSession) Quit() (err error) {
540 | select {
541 | case ts.quit <- true:
542 | case <-ts.ended:
543 | }
544 | return
545 | }
546 |
547 | func (ts *TorrentSession) Shutdown() (err error) {
548 | close(ts.ended)
549 |
550 | if ts.fileStore != nil {
551 | err = ts.fileStore.Close()
552 | if err != nil {
553 | log.Println("[", ts.M.Info.Name, "] Error closing filestore:", err)
554 | }
555 | }
556 |
557 | for _, peer := range ts.peers {
558 | peer.Close()
559 | }
560 |
561 | return
562 | }
563 |
564 | func (ts *TorrentSession) DoTorrent() {
565 | ts.heartbeat = make(chan bool, 1)
566 | if ts.flags.UseDeadlockDetector {
567 | go ts.deadlockDetector()
568 | }
569 |
570 | if ts.flags.Cacher != nil && ts.fileStore != nil {
571 | ts.fileStore = ts.flags.Cacher.NewCache(ts.M.InfoHash, ts.totalPieces, ts.M.Info.PieceLength, ts.totalSize, ts.fileStore)
572 | }
573 |
574 | heartbeatDuration := 1 * time.Second
575 | heartbeatChan := time.Tick(heartbeatDuration)
576 |
577 | keepAliveChan := time.Tick(60 * time.Second)
578 | var retrackerChan <-chan time.Time
579 | ts.hintNewPeerChan = make(chan string, MAX_NUM_PEERS)
580 | ts.addPeerChan = make(chan *BtConn, MAX_NUM_PEERS)
581 | if !ts.trackerLessMode {
582 | // Start out polling tracker every 20 seconds until we get a response.
583 | // Maybe be exponential backoff here?
584 | retrackerChan = time.Tick(20 * time.Second)
585 | ts.trackerInfoChan = make(chan *TrackerResponse)
586 | ts.trackerReportChan = make(chan ClientStatusReport)
587 | startTrackerClient(ts.flags.Dial, ts.M.Announce, ts.M.AnnounceList, ts.trackerInfoChan, ts.trackerReportChan)
588 | }
589 |
590 | if ts.Session.UseDHT {
591 | ts.dht.PeersRequest(ts.M.InfoHash, true)
592 | }
593 |
594 | if !ts.trackerLessMode && ts.Session.HaveTorrent {
595 | ts.fetchTrackerInfo("started")
596 | }
597 |
598 | defer ts.Shutdown()
599 |
600 | lastDownloaded := ts.Session.Downloaded
601 |
602 | for {
603 | if !ts.execOnSeedingDone && ts.goodPieces == ts.totalPieces {
604 | ts.execOnSeeding()
605 | ts.execOnSeedingDone = true
606 | }
607 | select {
608 | case <-ts.chokePolicyHeartbeat:
609 | ts.chokePeers()
610 | case hintNewPeer := <-ts.hintNewPeerChan:
611 | ts.tryNewPeer(hintNewPeer)
612 | case btconn := <-ts.addPeerChan:
613 | ts.addPeerImp(btconn)
614 | case <-retrackerChan:
615 | if !ts.trackerLessMode {
616 | ts.fetchTrackerInfo("")
617 | }
618 | case ti := <-ts.trackerInfoChan:
619 | ts.ti = ti
620 | log.Println("[", ts.M.Info.Name, "] Torrent has", ts.ti.Complete, "seeders and", ts.ti.Incomplete, "leachers")
621 | if !ts.trackerLessMode {
622 | newPeerCount := 0
623 | {
624 | peers := ts.ti.Peers
625 | if len(peers) > 0 {
626 | const peerLen = 6
627 | log.Println("[", ts.M.Info.Name, "] Tracker gave us", len(peers)/peerLen, "peers")
628 | for i := 0; i < len(peers); i += peerLen {
629 | peer := nettools.BinaryToDottedPort(peers[i : i+peerLen])
630 | if ts.tryNewPeer(peer) {
631 | newPeerCount++
632 | }
633 | }
634 | }
635 | }
636 | {
637 | peers6 := ts.ti.Peers6
638 | if len(peers6) > 0 {
639 | const peerLen = 18
640 | log.Println("[", ts.M.Info.Name, "] Tracker gave us", len(peers6)/peerLen, "IPv6 peers")
641 | for i := 0; i < len(peers6); i += peerLen {
642 | peerEntry := peers6[i : i+peerLen]
643 | host := net.IP(peerEntry[0:16])
644 | port := int((uint(peerEntry[16]) << 8) | uint(peerEntry[17]))
645 | peer := net.JoinHostPort(host.String(), strconv.Itoa(port))
646 | if ts.tryNewPeer(peer) {
647 | newPeerCount++
648 | }
649 | }
650 | }
651 | }
652 | log.Println("[", ts.M.Info.Name, "] Contacting", newPeerCount, "new peers")
653 | }
654 |
655 | interval := ts.ti.Interval
656 | minInterval := uint(120)
657 | maxInterval := uint(24 * 3600)
658 | if interval < minInterval {
659 | interval = minInterval
660 | } else if interval > maxInterval {
661 | interval = maxInterval
662 | }
663 | log.Println("[", ts.M.Info.Name, "] ..checking again in", interval, "seconds")
664 | retrackerChan = time.Tick(time.Duration(interval) * time.Second)
665 |
666 | case pm := <-ts.peerMessageChan:
667 | peer, message := pm.peer, pm.message
668 | peer.lastReadTime = time.Now()
669 | err2 := ts.DoMessage(peer, message)
670 | if err2 != nil {
671 | if err2 != io.EOF {
672 | log.Println("[", ts.M.Info.Name, "] Closing peer", peer.address, "because", err2)
673 | }
674 | ts.ClosePeer(peer)
675 | }
676 | case <-heartbeatChan:
677 | if ts.flags.UseDeadlockDetector {
678 | ts.heartbeat <- true
679 | }
680 | ratio := float64(0.0)
681 | if ts.Session.Downloaded > 0 {
682 | ratio = float64(ts.Session.Uploaded) / float64(ts.Session.Downloaded)
683 | }
684 | speed := humanSize(float64(ts.Session.Downloaded-lastDownloaded) / heartbeatDuration.Seconds())
685 | lastDownloaded = ts.Session.Downloaded
686 | log.Printf("[ %s ] Peers: %d downloaded: %d (%s/s) uploaded: %d ratio: %f pieces: %d/%d\n",
687 | ts.M.Info.Name,
688 | len(ts.peers),
689 | ts.Session.Downloaded,
690 | speed,
691 | ts.Session.Uploaded,
692 | ratio,
693 | ts.goodPieces,
694 | ts.totalPieces)
695 | if ts.totalPieces != 0 && ts.goodPieces == ts.totalPieces && ratio >= ts.flags.SeedRatio {
696 | log.Println("[", ts.M.Info.Name, "] Achieved target seed ratio", ts.flags.SeedRatio)
697 | return
698 | }
699 | if len(ts.peers) < TARGET_NUM_PEERS && (ts.totalPieces == 0 || ts.goodPieces < ts.totalPieces) {
700 | if ts.Session.UseDHT {
701 | go ts.dht.PeersRequest(ts.M.InfoHash, true)
702 | }
703 | if !ts.trackerLessMode {
704 | if ts.ti == nil || ts.ti.Complete > 100 {
705 | ts.fetchTrackerInfo("")
706 | }
707 | }
708 | }
709 | case <-keepAliveChan:
710 | now := time.Now()
711 | for _, peer := range ts.peers {
712 | if peer.lastReadTime.Second() != 0 && now.Sub(peer.lastReadTime) > 3*time.Minute {
713 | // log.Println("[", ts.M.Info.Name, "] Closing peer", peer.address, "because timed out")
714 | ts.ClosePeer(peer)
715 | continue
716 | }
717 | err2 := ts.doCheckRequests(peer)
718 | if err2 != nil {
719 | if err2 != io.EOF {
720 | log.Println("[", ts.M.Info.Name, "] Closing peer", peer.address, "because", err2)
721 | }
722 | ts.ClosePeer(peer)
723 | continue
724 | }
725 | peer.keepAlive(now)
726 | }
727 |
728 | case <-ts.quit:
729 | log.Println("[", ts.M.Info.Name, "] Quitting torrent session")
730 | ts.fetchTrackerInfo("stopped")
731 | time.Sleep(10 * time.Millisecond)
732 | return
733 | }
734 | }
735 | }
736 |
737 | func (ts *TorrentSession) chokePeers() (err error) {
738 | // log.Printf("[ %s ] Choking peers", ts.M.Info.Name)
739 | peers := ts.peers
740 | chokers := make([]Choker, 0, len(peers))
741 | for _, peer := range peers {
742 | if peer.peer_interested {
743 | peer.computeDownloadRate()
744 | // log.Printf("%s %g bps", peer.address, peer.DownloadBPS())
745 | chokers = append(chokers, Choker(peer))
746 | }
747 | }
748 | var unchokeCount int
749 | unchokeCount, err = ts.chokePolicy.Choke(chokers)
750 | if err != nil {
751 | return
752 | }
753 | for i, c := range chokers {
754 | shouldChoke := i >= unchokeCount
755 | if peer, ok := c.(*peerState); ok {
756 | if shouldChoke != peer.am_choking {
757 | // log.Printf("[ %s ] Changing choke status %v -> %v", ts.M.Info.Name, peer.address, shouldChoke)
758 | peer.SetChoke(shouldChoke)
759 | }
760 | }
761 | }
762 | return
763 | }
764 |
765 | func (ts *TorrentSession) RequestBlock(p *peerState) error {
766 | if !ts.Session.HaveTorrent { // We can't request a block without a torrent
767 | return nil
768 | }
769 |
770 | for k := range ts.activePieces {
771 | if p.have.IsSet(k) {
772 | err := ts.RequestBlock2(p, k, false)
773 | if err != io.EOF {
774 | return err
775 | }
776 | }
777 | }
778 |
779 | if len(ts.activePieces) >= ts.maxActivePieces {
780 | return nil
781 | }
782 |
783 | // No active pieces. (Or no suitable active pieces.) Pick one
784 | piece := ts.ChoosePiece(p)
785 | if piece < 0 {
786 | // No unclaimed pieces. See if we can double-up on an active piece
787 | for k := range ts.activePieces {
788 | if p.have.IsSet(k) {
789 | err := ts.RequestBlock2(p, k, true)
790 | if err != io.EOF {
791 | return err
792 | }
793 | }
794 | }
795 | }
796 |
797 | if piece < 0 {
798 | p.SetInterested(false)
799 | return nil
800 | }
801 | pieceLength := ts.pieceLength(piece)
802 | pieceCount := (pieceLength + STANDARD_BLOCK_LENGTH - 1) / STANDARD_BLOCK_LENGTH
803 | ts.activePieces[piece] = &ActivePiece{make([]int, pieceCount), make([]byte, pieceLength)}
804 | return ts.RequestBlock2(p, piece, false)
805 | }
806 |
807 | func (ts *TorrentSession) ChoosePiece(p *peerState) (piece int) {
808 | n := ts.totalPieces
809 | start := rand.Intn(n)
810 | piece = ts.checkRange(p, start, n)
811 | if piece == -1 {
812 | piece = ts.checkRange(p, 0, start)
813 | }
814 | return
815 | }
816 |
817 | // checkRange returns the first piece in range start..end that is not in the
818 | // torrent's pieceSet but is in the peer's pieceSet.
819 | func (ts *TorrentSession) checkRange(p *peerState, start, end int) (piece int) {
820 | clampedEnd := min(end, min(p.have.n, ts.pieceSet.n))
821 | for i := start; i < clampedEnd; i++ {
822 | if (!ts.pieceSet.IsSet(i)) && p.have.IsSet(i) {
823 | if _, ok := ts.activePieces[i]; !ok {
824 | return i
825 | }
826 | }
827 | }
828 | return -1
829 | }
830 |
831 | func (ts *TorrentSession) RequestBlock2(p *peerState, piece int, endGame bool) (err error) {
832 | v := ts.activePieces[piece]
833 | block := v.chooseBlockToDownload(endGame)
834 | if block >= 0 {
835 | ts.requestBlockImp(p, piece, block, true)
836 | } else {
837 | return io.EOF
838 | }
839 | return
840 | }
841 |
842 | // Request or cancel a block
843 | func (ts *TorrentSession) requestBlockImp(p *peerState, piece int, block int, request bool) {
844 | begin := block * STANDARD_BLOCK_LENGTH
845 | req := make([]byte, 13)
846 | opcode := byte(REQUEST)
847 | if !request {
848 | opcode = byte(CANCEL)
849 | }
850 | length := STANDARD_BLOCK_LENGTH
851 | if piece == ts.totalPieces-1 {
852 | left := ts.lastPieceLength - begin
853 | if left < length {
854 | length = left
855 | }
856 | }
857 | // log.Println("[", ts.M.Info.Name, "] Requesting block", piece, ".", block, length, request)
858 | req[0] = opcode
859 | uint32ToBytes(req[1:5], uint32(piece))
860 | uint32ToBytes(req[5:9], uint32(begin))
861 | uint32ToBytes(req[9:13], uint32(length))
862 | requestIndex := (uint64(piece) << 32) | uint64(begin)
863 | if !request {
864 | delete(p.our_requests, requestIndex)
865 | } else {
866 | p.our_requests[requestIndex] = time.Now()
867 | }
868 | p.sendMessage(req)
869 | return
870 | }
871 |
872 | func (ts *TorrentSession) RecordBlock(p *peerState, piece, begin, length uint32) (err error) {
873 | block := begin / STANDARD_BLOCK_LENGTH
874 | // log.Println("[", ts.M.Info.Name, "] Received block", piece, ".", block)
875 | requestIndex := (uint64(piece) << 32) | uint64(begin)
876 | delete(p.our_requests, requestIndex)
877 | v, ok := ts.activePieces[int(piece)]
878 | if ok {
879 | requestCount := v.recordBlock(int(block))
880 | if requestCount > 1 {
881 | // Someone else has also requested this, so send cancel notices
882 | for _, peer := range ts.peers {
883 | if p != peer {
884 | if _, ok := peer.our_requests[requestIndex]; ok {
885 | ts.requestBlockImp(peer, int(piece), int(block), false)
886 | requestCount--
887 | }
888 | }
889 | }
890 | }
891 | ts.Session.Downloaded += uint64(length)
892 | if v.isComplete() {
893 | delete(ts.activePieces, int(piece))
894 |
895 | ok, err = checkPiece(v.buffer, ts.M, int(piece))
896 | if !ok || err != nil {
897 | log.Println("[", ts.M.Info.Name, "] Closing peer that sent a bad piece", piece, p.id, err)
898 | p.Close()
899 | return
900 | }
901 | ts.fileStore.WritePiece(v.buffer, int(piece))
902 | ts.Session.Left -= uint64(len(v.buffer))
903 | ts.pieceSet.Set(int(piece))
904 | ts.goodPieces++
905 | if ts.flags.QuickResume {
906 | ioutil.WriteFile("./"+hex.EncodeToString([]byte(ts.M.InfoHash))+"-haveBitset", ts.pieceSet.Bytes(), 0777)
907 | }
908 | var percentComplete float32
909 | if ts.totalPieces > 0 {
910 | percentComplete = float32(ts.goodPieces*100) / float32(ts.totalPieces)
911 | }
912 | log.Println("[", ts.M.Info.Name, "] Have", ts.goodPieces, "of", ts.totalPieces,
913 | "pieces", percentComplete, "% complete")
914 | if ts.goodPieces == ts.totalPieces {
915 | if !ts.trackerLessMode {
916 | ts.fetchTrackerInfo("completed")
917 | }
918 | // TODO: Drop connections to all seeders.
919 | }
920 | for _, p := range ts.peers {
921 | if p.have != nil {
922 | if int(piece) < p.have.n && p.have.IsSet(int(piece)) {
923 | // We don't do anything special. We rely on the caller
924 | // to decide if this peer is still interesting.
925 | } else {
926 | // log.Println("[", ts.M.Info.Name, "] ...telling ", p)
927 | haveMsg := make([]byte, 5)
928 | haveMsg[0] = HAVE
929 | uint32ToBytes(haveMsg[1:5], piece)
930 | p.sendMessage(haveMsg)
931 | }
932 | }
933 | }
934 | }
935 | } else {
936 | log.Println("[", ts.M.Info.Name, "] Received a block we already have.", piece, block, p.address)
937 | }
938 | return
939 | }
940 |
941 | func (ts *TorrentSession) doChoke(p *peerState) (err error) {
942 | p.peer_choking = true
943 | err = ts.removeRequests(p)
944 | return
945 | }
946 |
947 | func (ts *TorrentSession) removeRequests(p *peerState) (err error) {
948 | for k := range p.our_requests {
949 | piece := int(k >> 32)
950 | begin := int(k & 0xffffffff)
951 | block := begin / STANDARD_BLOCK_LENGTH
952 | // log.Println("[", ts.M.Info.Name, "] Forgetting we requested block ", piece, ".", block)
953 | ts.removeRequest(piece, block)
954 | }
955 | p.our_requests = make(map[uint64]time.Time, MAX_OUR_REQUESTS)
956 | return
957 | }
958 |
959 | func (ts *TorrentSession) removeRequest(piece, block int) {
960 | v, ok := ts.activePieces[piece]
961 | if ok && v.downloaderCount[block] > 0 {
962 | v.downloaderCount[block]--
963 | }
964 | }
965 |
966 | func (ts *TorrentSession) doCheckRequests(p *peerState) (err error) {
967 | now := time.Now()
968 | for k, v := range p.our_requests {
969 | if now.Sub(v).Seconds() > 30 {
970 | piece := int(k >> 32)
971 | block := int(k&0xffffffff) / STANDARD_BLOCK_LENGTH
972 | // log.Println("[", ts.M.Info.Name, "] timing out request of", piece, ".", block)
973 | ts.removeRequest(piece, block)
974 | }
975 | }
976 | return
977 | }
978 |
979 | func (ts *TorrentSession) DoMessage(p *peerState, message []byte) (err error) {
980 | if message == nil {
981 | return io.EOF // The reader or writer goroutine has exited
982 | }
983 | if len(message) == 0 { // keep alive
984 | return
985 | }
986 |
987 | if ts.Session.HaveTorrent {
988 | err = ts.generalMessage(message, p)
989 | } else {
990 | err = ts.extensionMessage(message, p)
991 | }
992 | return
993 | }
994 |
995 | func (ts *TorrentSession) extensionMessage(message []byte, p *peerState) (err error) {
996 | if message[0] == EXTENSION {
997 | err := ts.DoExtension(message[1:], p)
998 | if err != nil {
999 | log.Printf("[ %s ] Failed extensions for %s: %s\n", ts.M.Info.Name, p.address, err)
1000 | }
1001 | }
1002 | return
1003 | }
1004 |
1005 | func (ts *TorrentSession) generalMessage(message []byte, p *peerState) (err error) {
1006 | messageID := message[0]
1007 |
1008 | switch messageID {
1009 | case CHOKE:
1010 | // log.Println("[", ts.M.Info.Name, "] choke", p.address)
1011 | if len(message) != 1 {
1012 | return errors.New("Unexpected length")
1013 | }
1014 | err = ts.doChoke(p)
1015 | case UNCHOKE:
1016 | // log.Println("[", ts.M.Info.Name, "] unchoke", p.address)
1017 | if len(message) != 1 {
1018 | return errors.New("Unexpected length")
1019 | }
1020 | p.peer_choking = false
1021 | for i := 0; i < MAX_OUR_REQUESTS; i++ {
1022 | err = ts.RequestBlock(p)
1023 | if err != nil {
1024 | return
1025 | }
1026 | }
1027 | case INTERESTED:
1028 | // log.Println("[", ts.M.Info.Name, "] interested", p)
1029 | if len(message) != 1 {
1030 | return errors.New("Unexpected length")
1031 | }
1032 | p.peer_interested = true
1033 | ts.chokePeers()
1034 | case NOT_INTERESTED:
1035 | // log.Println("[", ts.M.Info.Name, "] not interested", p)
1036 | if len(message) != 1 {
1037 | return errors.New("Unexpected length")
1038 | }
1039 | p.peer_interested = false
1040 | ts.chokePeers()
1041 | case HAVE:
1042 | if len(message) != 5 {
1043 | return errors.New("Unexpected length")
1044 | }
1045 | n := bytesToUint32(message[1:])
1046 | if n < uint32(p.have.n) {
1047 | p.have.Set(int(n))
1048 | if !p.am_interested && !ts.pieceSet.IsSet(int(n)) {
1049 | p.SetInterested(true)
1050 | }
1051 | } else {
1052 | return errors.New("have index is out of range")
1053 | }
1054 | case BITFIELD:
1055 | // log.Println("[", ts.M.Info.Name, "] bitfield", p.address)
1056 | if !p.can_receive_bitfield {
1057 | return errors.New("Late bitfield operation")
1058 | }
1059 | p.have = NewBitsetFromBytes(ts.totalPieces, message[1:])
1060 | if p.have == nil {
1061 | return errors.New("Invalid bitfield data")
1062 | }
1063 | ts.checkInteresting(p)
1064 | case REQUEST:
1065 | // log.Println("[", ts.M.Info.Name, "] request", p.address)
1066 | if len(message) != 13 {
1067 | return errors.New("Unexpected message length")
1068 | }
1069 | index := bytesToUint32(message[1:5])
1070 | begin := bytesToUint32(message[5:9])
1071 | length := bytesToUint32(message[9:13])
1072 | if index >= uint32(p.have.n) {
1073 | return errors.New("piece out of range")
1074 | }
1075 | if !ts.pieceSet.IsSet(int(index)) {
1076 | return errors.New("we don't have that piece")
1077 | }
1078 | if int64(begin) >= ts.M.Info.PieceLength {
1079 | return errors.New("begin out of range")
1080 | }
1081 | if int64(begin)+int64(length) > ts.M.Info.PieceLength {
1082 | return errors.New("begin + length out of range")
1083 | }
1084 | // TODO: Asynchronous
1085 | // p.AddRequest(index, begin, length)
1086 | return ts.sendRequest(p, index, begin, length)
1087 | case PIECE:
1088 | // piece
1089 | if len(message) < 9 {
1090 | return errors.New("unexpected message length")
1091 | }
1092 | index := bytesToUint32(message[1:5])
1093 | begin := bytesToUint32(message[5:9])
1094 | length := len(message) - 9
1095 | if index >= uint32(p.have.n) {
1096 | return errors.New("piece out of range")
1097 | }
1098 | if ts.pieceSet.IsSet(int(index)) {
1099 | // We already have that piece, keep going
1100 | break
1101 | }
1102 | if int64(begin) >= ts.M.Info.PieceLength {
1103 | return errors.New("begin out of range")
1104 | }
1105 | if int64(begin)+int64(length) > ts.M.Info.PieceLength {
1106 | return errors.New("begin + length out of range")
1107 | }
1108 | if length > 128*1024 {
1109 | return errors.New("Block length too large")
1110 | }
1111 | v, ok := ts.activePieces[int(index)]
1112 | if !ok {
1113 | return errors.New("Received piece data we weren't expecting")
1114 | }
1115 | copy(v.buffer[begin:], message[9:])
1116 |
1117 | p.creditDownload(int64(length))
1118 | ts.RecordBlock(p, index, begin, uint32(length))
1119 | err = ts.RequestBlock(p)
1120 | case CANCEL:
1121 | // log.Println("[", ts.M.Info.Name, "] cancel")
1122 | if len(message) != 13 {
1123 | return errors.New("Unexpected message length")
1124 | }
1125 | index := bytesToUint32(message[1:5])
1126 | begin := bytesToUint32(message[5:9])
1127 | length := bytesToUint32(message[9:13])
1128 | if index >= uint32(p.have.n) {
1129 | return errors.New("piece out of range")
1130 | }
1131 | if !ts.pieceSet.IsSet(int(index)) {
1132 | return errors.New("we don't have that piece")
1133 | }
1134 | if int64(begin) >= ts.M.Info.PieceLength {
1135 | return errors.New("begin out of range")
1136 | }
1137 | if int64(begin)+int64(length) > ts.M.Info.PieceLength {
1138 | return errors.New("begin + length out of range")
1139 | }
1140 | if length != STANDARD_BLOCK_LENGTH {
1141 | return errors.New("Unexpected block length")
1142 | }
1143 | p.CancelRequest(index, begin, length)
1144 | case PORT:
1145 | // TODO: Implement this message.
1146 | // We see peers sending us 16K byte messages here, so
1147 | // it seems that we don't understand what this is.
1148 | if len(message) != 3 {
1149 | return fmt.Errorf("Unexpected length for port message: %d", len(message))
1150 | }
1151 | go ts.dht.AddNode(p.address)
1152 | case EXTENSION:
1153 | err := ts.DoExtension(message[1:], p)
1154 | if err != nil {
1155 | log.Printf("[ %s ] Failed extensions for %s: %s\n", ts.M.Info.Name, p.address, err)
1156 | }
1157 |
1158 | if ts.Session.HaveTorrent {
1159 | p.SendBitfield(ts.pieceSet)
1160 | }
1161 | default:
1162 | return fmt.Errorf("Unknown message id: %d\n", messageID)
1163 | }
1164 |
1165 | if messageID != EXTENSION {
1166 | p.can_receive_bitfield = false
1167 | }
1168 |
1169 | return
1170 | }
1171 |
1172 | type ExtensionHandshake struct {
1173 | M map[string]int `bencode:"m"`
1174 | P uint16 `bencode:"p"`
1175 | V string `bencode:"v"`
1176 | Yourip string `bencode:"yourip"`
1177 | Ipv6 string `bencode:"ipv6"`
1178 | Ipv4 string `bencode:"ipv4"`
1179 | Reqq uint16 `bencode:"reqq"`
1180 |
1181 | MetadataSize uint `bencode:"metadata_size"`
1182 | }
1183 |
1184 | func (ts *TorrentSession) DoExtension(msg []byte, p *peerState) (err error) {
1185 |
1186 | var h ExtensionHandshake
1187 | if msg[0] == EXTENSION_HANDSHAKE {
1188 | err = bencode.Unmarshal(bytes.NewReader(msg[1:]), &h)
1189 | if err != nil {
1190 | log.Println("[", ts.M.Info.Name, "] Error when unmarshaling extension handshake")
1191 | return err
1192 | }
1193 |
1194 | p.theirExtensions = make(map[string]int)
1195 | for name, code := range h.M {
1196 | p.theirExtensions[name] = code
1197 | }
1198 |
1199 | if ts.Session.HaveTorrent || ts.Session.ME != nil && ts.Session.ME.Transferring {
1200 | return
1201 | }
1202 |
1203 | // Fill metadata info
1204 | if h.MetadataSize != uint(0) {
1205 | nPieces := uint(math.Ceil(float64(h.MetadataSize) / float64(16*1024)))
1206 | ts.Session.ME.Pieces = make([][]byte, nPieces)
1207 | }
1208 |
1209 | if _, ok := p.theirExtensions["ut_metadata"]; ok {
1210 | ts.Session.ME.Transferring = true
1211 | p.sendMetadataRequest(0)
1212 | }
1213 |
1214 | } else if ext, ok := ts.Session.OurExtensions[int(msg[0])]; ok {
1215 | switch ext {
1216 | case "ut_metadata":
1217 | ts.DoMetadata(msg[1:], p)
1218 | default:
1219 | log.Println("[", ts.M.Info.Name, "] Unknown extension: ", ext)
1220 | }
1221 | } else {
1222 | log.Println("[", ts.M.Info.Name, "] Unknown extension: ", int(msg[0]))
1223 | }
1224 |
1225 | return nil
1226 | }
1227 |
1228 | type MetadataMessage struct {
1229 | MsgType uint8 `bencode:"msg_type"`
1230 | Piece uint `bencode:"piece"`
1231 | TotalSize uint `bencode:"total_size"`
1232 | }
1233 |
1234 | //From bittorrent.org, a bep 9 data message is structured as follows:
1235 | //d8:msg_typei1e5:piecei0e10:total_sizei34256eexxxx
1236 | //xxxx being the piece data
1237 | //So, simplest approach: search for 'ee' as the end of bencoded data
1238 | func getMetadataPiece(msg []byte) ([]byte, error) {
1239 | for i := 0; i < len(msg)-1; i++ {
1240 | if msg[i] == 'e' && msg[i+1] == 'e' {
1241 | return msg[i+2:], nil
1242 | }
1243 | }
1244 | return nil, errors.New("Couldn't find an appropriate end to the bencoded message")
1245 | }
1246 |
1247 | func (ts *TorrentSession) DoMetadata(msg []byte, p *peerState) {
1248 | var message MetadataMessage
1249 | err := bencode.Unmarshal(bytes.NewReader(msg), &message)
1250 | if err != nil {
1251 | log.Println("[", ts.M.Info.Name, "] Error when parsing metadata:", err)
1252 | return
1253 | }
1254 |
1255 | mt := message.MsgType
1256 | switch mt {
1257 | case METADATA_REQUEST:
1258 | //TODO: Answer to metadata request
1259 | case METADATA_DATA:
1260 | if ts.Session.HaveTorrent {
1261 | log.Println("[", ts.M.Info.Name, "] Received metadata we don't need, from", p.address)
1262 | return
1263 | }
1264 |
1265 | piece, err := getMetadataPiece(msg)
1266 | if err != nil {
1267 | log.Println("[", ts.M.Info.Name, "] Error when getting metadata piece: ", err)
1268 | return
1269 | }
1270 | ts.Session.ME.Pieces[message.Piece] = piece
1271 |
1272 | finished := true
1273 | for idx, data := range ts.Session.ME.Pieces {
1274 | if len(data) == 0 {
1275 | p.sendMetadataRequest(idx)
1276 | finished = false
1277 | }
1278 | }
1279 |
1280 | if !finished {
1281 | break
1282 | }
1283 |
1284 | log.Println("[", ts.M.Info.Name, "] Finished downloading metadata!")
1285 | var full bytes.Buffer
1286 | for _, piece := range ts.Session.ME.Pieces {
1287 | full.Write(piece)
1288 | }
1289 | b := full.Bytes()
1290 |
1291 | // Verify sha
1292 | sha := sha1.New()
1293 | sha.Write(b)
1294 | actual := string(sha.Sum(nil))
1295 | if actual != ts.M.InfoHash {
1296 | log.Printf("[ %s ] Invalid metadata; got %x\n", ts.M.Info.Name, actual)
1297 | }
1298 |
1299 | metadata := string(b)
1300 | err = saveMetaInfo(metadata)
1301 | if err != nil {
1302 | return
1303 | }
1304 | ts.reload(metadata)
1305 | case METADATA_REJECT:
1306 | log.Printf("[ %s ] %s didn't want to send piece %d\n", ts.M.Info.Name, p.address, message.Piece)
1307 | default:
1308 | log.Println("[", ts.M.Info.Name, "] Didn't understand metadata extension type: ", mt)
1309 | }
1310 | }
1311 |
1312 | func (ts *TorrentSession) sendRequest(peer *peerState, index, begin, length uint32) (err error) {
1313 | if !peer.am_choking {
1314 | // log.Println("[", ts.M.Info.Name, "] Sending block", index, begin, length)
1315 | buf := make([]byte, length+9)
1316 | buf[0] = PIECE
1317 | uint32ToBytes(buf[1:5], index)
1318 | uint32ToBytes(buf[5:9], begin)
1319 | _, err = ts.fileStore.ReadAt(buf[9:],
1320 | int64(index)*ts.M.Info.PieceLength+int64(begin))
1321 | if err != nil {
1322 | return
1323 | }
1324 | peer.sendMessage(buf)
1325 | ts.Session.Uploaded += uint64(length)
1326 | }
1327 | return
1328 | }
1329 |
1330 | func (ts *TorrentSession) checkInteresting(p *peerState) {
1331 | p.SetInterested(ts.isInteresting(p))
1332 | }
1333 |
1334 | func (ts *TorrentSession) isInteresting(p *peerState) bool {
1335 | for i := 0; i < ts.totalPieces; i++ {
1336 | if !ts.pieceSet.IsSet(i) && p.have.IsSet(i) {
1337 | return true
1338 | }
1339 | }
1340 | return false
1341 | }
1342 |
1343 | func min(a, b int) int {
1344 | if a < b {
1345 | return a
1346 | }
1347 | return b
1348 | }
1349 |
1350 | func humanSize(value float64) string {
1351 | switch {
1352 | case value > 1<<30:
1353 | return fmt.Sprintf("%.2f GB", value/(1<<30))
1354 | case value > 1<<20:
1355 | return fmt.Sprintf("%.2f MB", value/(1<<20))
1356 | case value > 1<<10:
1357 | return fmt.Sprintf("%.2f kB", value/(1<<10))
1358 | }
1359 | return fmt.Sprintf("%.2f B", value)
1360 | }
1361 |
--------------------------------------------------------------------------------
/torrent/torrentLoop.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "encoding/hex"
5 | "log"
6 | "os"
7 | "os/signal"
8 |
9 | "github.com/nictuku/dht"
10 | "golang.org/x/net/proxy"
11 | )
12 |
13 | type TorrentFlags struct {
14 | Port int
15 | FileDir string
16 | SeedRatio float64
17 | UseDeadlockDetector bool
18 | UseLPD bool
19 | UseDHT bool
20 | UseUPnP bool
21 | UseNATPMP bool
22 | TrackerlessMode bool
23 | ExecOnSeeding string
24 |
25 | // The dial function to use. Nil means use net.Dial
26 | Dial proxy.Dialer
27 |
28 | // IP address of gateway used for NAT-PMP
29 | Gateway string
30 |
31 | //Provides the filesystems added torrents are saved to
32 | FileSystemProvider FsProvider
33 |
34 | //Whether to check file hashes when adding torrents
35 | InitialCheck bool
36 |
37 | //Provides cache to each torrent
38 | Cacher CacheProvider
39 |
40 | //Whether to write and use *.haveBitset resume data
41 | QuickResume bool
42 |
43 | //How many torrents should be active at a time
44 | MaxActive int
45 |
46 | //Maximum amount of memory (in MiB) to use for each torrent's Active Pieces.
47 | //0 means a single Active Piece. Negative means Unlimited Active Pieces.
48 | MemoryPerTorrent int
49 | }
50 |
51 | func RunTorrents(flags *TorrentFlags, torrentFiles []string) (err error) {
52 | conChan, listenPort, err := ListenForPeerConnections(flags)
53 | if err != nil {
54 | log.Println("Couldn't listen for peers connection: ", err)
55 | return
56 | }
57 | quitChan := listenSigInt()
58 |
59 | createChan := make(chan string, flags.MaxActive)
60 | startChan := make(chan *TorrentSession, 1)
61 | doneChan := make(chan *TorrentSession, 1)
62 |
63 | var dhtNode dht.DHT
64 | if flags.UseDHT {
65 | dhtNode = *startDHT(flags.Port)
66 | }
67 |
68 | torrentSessions := make(map[string]*TorrentSession)
69 |
70 | go func() {
71 | for torrentFile := range createChan {
72 | ts, err := NewTorrentSession(flags, torrentFile, uint16(listenPort))
73 | if err != nil {
74 | log.Println("Couldn't create torrent session for "+torrentFile+" .", err)
75 | doneChan <- &TorrentSession{}
76 | } else {
77 | log.Printf("Created torrent session for %s", ts.M.Info.Name)
78 | startChan <- ts
79 | }
80 | }
81 | }()
82 |
83 | torrentQueue := []string{}
84 | if len(torrentFiles) > flags.MaxActive {
85 | torrentQueue = torrentFiles[flags.MaxActive:]
86 | }
87 |
88 | for i, torrentFile := range torrentFiles {
89 | if i < flags.MaxActive {
90 | createChan <- torrentFile
91 | } else {
92 | break
93 | }
94 | }
95 |
96 | lpd := &Announcer{}
97 | if flags.UseLPD {
98 | lpd, err = NewAnnouncer(uint16(listenPort))
99 | if err != nil {
100 | log.Println("Couldn't listen for Local Peer Discoveries: ", err)
101 | flags.UseLPD = false
102 | }
103 | }
104 |
105 | theWorldisEnding := false
106 | mainLoop:
107 | for {
108 | select {
109 | case ts := <-startChan:
110 | if !theWorldisEnding {
111 | ts.dht = &dhtNode
112 | if flags.UseLPD {
113 | lpd.Announce(ts.M.InfoHash)
114 | }
115 | torrentSessions[ts.M.InfoHash] = ts
116 | log.Printf("Starting torrent session for %s", ts.M.Info.Name)
117 | go func(t *TorrentSession) {
118 | t.DoTorrent()
119 | doneChan <- t
120 | }(ts)
121 | }
122 | case ts := <-doneChan:
123 | if ts.M != nil {
124 | delete(torrentSessions, ts.M.InfoHash)
125 | if flags.UseLPD {
126 | lpd.StopAnnouncing(ts.M.InfoHash)
127 | }
128 | }
129 | if !theWorldisEnding && len(torrentQueue) > 0 {
130 | createChan <- torrentQueue[0]
131 | torrentQueue = torrentQueue[1:]
132 | continue mainLoop
133 | }
134 |
135 | if len(torrentSessions) == 0 {
136 | break mainLoop
137 | }
138 | case <-quitChan:
139 | theWorldisEnding = true
140 | for _, ts := range torrentSessions {
141 | go ts.Quit()
142 | }
143 | case c := <-conChan:
144 | // log.Printf("New bt connection for ih %x", c.Infohash)
145 | if ts, ok := torrentSessions[c.Infohash]; ok {
146 | ts.AcceptNewPeer(c)
147 | }
148 | case dhtPeers := <-dhtNode.PeersRequestResults:
149 | for key, peers := range dhtPeers {
150 | if ts, ok := torrentSessions[string(key)]; ok {
151 | // log.Printf("Received %d DHT peers for torrent session %x\n", len(peers), []byte(key))
152 | for _, peer := range peers {
153 | peer = dht.DecodePeerAddress(peer)
154 | ts.HintNewPeer(peer)
155 | }
156 | } else {
157 | log.Printf("Received DHT peer for an unknown torrent session %x\n", []byte(key))
158 | }
159 | }
160 | case announce := <-lpd.Announces:
161 | hexhash, err := hex.DecodeString(announce.Infohash)
162 | if err != nil {
163 | log.Println("Err with hex-decoding:", err)
164 | }
165 | if ts, ok := torrentSessions[string(hexhash)]; ok {
166 | // log.Printf("Received LPD announce for ih %s", announce.Infohash)
167 | ts.HintNewPeer(announce.Peer)
168 | }
169 | }
170 | }
171 | if flags.UseDHT {
172 | dhtNode.Stop()
173 | }
174 | return
175 | }
176 |
177 | func listenSigInt() chan os.Signal {
178 | c := make(chan os.Signal, 1)
179 | signal.Notify(c, os.Interrupt, os.Kill)
180 | return c
181 | }
182 |
183 | func startDHT(listenPort int) *dht.DHT {
184 | // TODO: UPnP UDP port mapping.
185 | cfg := dht.NewConfig()
186 | cfg.Port = listenPort
187 | cfg.NumTargetPeers = TARGET_NUM_PEERS
188 | dhtnode, err := dht.New(cfg)
189 | if err != nil {
190 | log.Println("DHT node creation error:", err)
191 | return nil
192 | }
193 |
194 | go dhtnode.Run()
195 |
196 | return dhtnode
197 | }
198 |
--------------------------------------------------------------------------------
/torrent/trackerClient.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "bytes"
5 | "encoding/binary"
6 | "errors"
7 | "fmt"
8 | "log"
9 | "math/rand"
10 | "net"
11 | "net/url"
12 | "strconv"
13 | "time"
14 |
15 | "golang.org/x/net/proxy"
16 | )
17 |
18 | // Code to talk to trackers.
19 | // Implements:
20 | // BEP 12 Multitracker Metadata Extension
21 | // BEP 15 UDP Tracker Protocol
22 |
23 | type ClientStatusReport struct {
24 | Event string
25 | InfoHash string
26 | PeerID string
27 | Port uint16
28 | Uploaded uint64
29 | Downloaded uint64
30 | Left uint64
31 | }
32 |
33 | func startTrackerClient(dialer proxy.Dialer, announce string, announceList [][]string, trackerInfoChan chan *TrackerResponse, reports chan ClientStatusReport) {
34 | if announce != "" && announceList == nil {
35 | // Convert the plain announce into an announceList to simplify logic
36 | announceList = [][]string{[]string{announce}}
37 | }
38 |
39 | if announceList != nil {
40 | announceList = shuffleAnnounceList(announceList)
41 | }
42 |
43 | // Discard status old status reports if they are produced more quickly than they can
44 | // be consumed.
45 | recentReports := make(chan ClientStatusReport)
46 | go func() {
47 | outerLoop:
48 | for {
49 | // Wait until we have a report.
50 | recentReport := <-reports
51 | for {
52 | select {
53 | case recentReport = <-reports:
54 | // discard the old report, keep the new one.
55 | continue
56 | case recentReports <- recentReport:
57 | // send the latest report, then wait for new report.
58 | continue outerLoop
59 | }
60 | }
61 | }
62 | }()
63 |
64 | go func() {
65 | for report := range recentReports {
66 | tr := queryTrackers(dialer, announceList, report)
67 | if tr != nil {
68 | trackerInfoChan <- tr
69 | }
70 | }
71 | }()
72 | }
73 |
74 | // Deep copy announcelist and shuffle each level.
75 | func shuffleAnnounceList(announceList [][]string) (result [][]string) {
76 | result = make([][]string, len(announceList))
77 | for i, level := range announceList {
78 | result[i] = shuffleAnnounceListLevel(level)
79 | }
80 | return
81 | }
82 |
83 | func shuffleAnnounceListLevel(level []string) (shuffled []string) {
84 | items := len(level)
85 | shuffled = make([]string, items)
86 | perm := rand.Perm(items)
87 | for i, v := range perm {
88 | shuffled[v] = level[i]
89 | }
90 | return
91 | }
92 |
93 | func queryTrackers(dialer proxy.Dialer, announceList [][]string, report ClientStatusReport) (tr *TrackerResponse) {
94 | for _, level := range announceList {
95 | for i, tracker := range level {
96 | var err error
97 | tr, err = queryTracker(dialer, report, tracker)
98 | if err == nil {
99 | // Move successful tracker to front of slice for next announcement
100 | // cycle.
101 | copy(level[1:i+1], level[0:i])
102 | level[0] = tracker
103 | return
104 | }
105 | }
106 | }
107 | log.Println("Error: Did not successfully contact a tracker:", announceList)
108 | return
109 | }
110 |
111 | func queryTracker(dialer proxy.Dialer, report ClientStatusReport, trackerUrl string) (tr *TrackerResponse, err error) {
112 | u, err := url.Parse(trackerUrl)
113 | if err != nil {
114 | log.Println("Error: Invalid announce URL(", trackerUrl, "):", err)
115 | return
116 | }
117 | switch u.Scheme {
118 | case "http":
119 | fallthrough
120 | case "https":
121 | return queryHTTPTracker(dialer, report, u)
122 | case "udp":
123 | return queryUDPTracker(report, u)
124 | default:
125 | errorMessage := fmt.Sprintf("Unknown scheme %v in %v", u.Scheme, trackerUrl)
126 | log.Println(errorMessage)
127 | return nil, errors.New(errorMessage)
128 | }
129 | }
130 |
131 | func queryHTTPTracker(dialer proxy.Dialer, report ClientStatusReport, u *url.URL) (tr *TrackerResponse, err error) {
132 | uq := u.Query()
133 | uq.Add("info_hash", report.InfoHash)
134 | uq.Add("peer_id", report.PeerID)
135 | uq.Add("port", strconv.FormatUint(uint64(report.Port), 10))
136 | uq.Add("uploaded", strconv.FormatUint(report.Uploaded, 10))
137 | uq.Add("downloaded", strconv.FormatUint(report.Downloaded, 10))
138 | uq.Add("left", strconv.FormatUint(report.Left, 10))
139 | uq.Add("compact", "1")
140 |
141 | // Don't report IPv6 address, the user might prefer to keep
142 | // that information private when communicating with IPv4 hosts.
143 | if false {
144 | ipv6Address, err := findLocalIPV6AddressFor(u.Host)
145 | if err == nil {
146 | log.Println("our ipv6", ipv6Address)
147 | uq.Add("ipv6", ipv6Address)
148 | }
149 | }
150 |
151 | if report.Event != "" {
152 | uq.Add("event", report.Event)
153 | }
154 |
155 | // This might reorder the existing query string in the Announce url
156 | // This might break some broken trackers that don't parse URLs properly.
157 |
158 | u.RawQuery = uq.Encode()
159 |
160 | tr, err = getTrackerInfo(dialer, u.String())
161 | if tr == nil || err != nil {
162 | log.Println("Error: Could not fetch tracker info:", err)
163 | } else if tr.FailureReason != "" {
164 | log.Println("Error: Tracker returned failure reason:", tr.FailureReason)
165 | err = fmt.Errorf("tracker failure %s", tr.FailureReason)
166 | }
167 | return
168 | }
169 |
170 | func findLocalIPV6AddressFor(hostAddr string) (local string, err error) {
171 | // Figure out our IPv6 address to talk to a given host.
172 | host, hostPort, err := net.SplitHostPort(hostAddr)
173 | if err != nil {
174 | host = hostAddr
175 | hostPort = "1234"
176 | }
177 | dummyAddr := net.JoinHostPort(host, hostPort)
178 | log.Println("Looking for host ", dummyAddr)
179 | conn, err := net.Dial("udp6", dummyAddr)
180 | if err != nil {
181 | log.Println("No IPV6 for host ", host, err)
182 | return "", err
183 | }
184 | defer conn.Close()
185 | localAddr := conn.LocalAddr()
186 | local, _, err = net.SplitHostPort(localAddr.String())
187 | if err != nil {
188 | local = localAddr.String()
189 | }
190 | return
191 | }
192 |
193 | func queryUDPTracker(report ClientStatusReport, u *url.URL) (tr *TrackerResponse, err error) {
194 | serverAddr, err := net.ResolveUDPAddr("udp", u.Host)
195 | if err != nil {
196 | return
197 | }
198 | con, err := net.DialUDP("udp", nil, serverAddr)
199 | if err != nil {
200 | return
201 | }
202 | defer func() { con.Close() }()
203 |
204 | var connectionID uint64
205 | for retry := uint(0); retry < uint(8); retry++ {
206 | err = con.SetDeadline(time.Now().Add(15 * (1 << retry) * time.Second))
207 | if err != nil {
208 | return
209 | }
210 |
211 | connectionID, err = connectToUDPTracker(con)
212 | if err == nil {
213 | break
214 | }
215 | if nerr, ok := err.(net.Error); ok && nerr.Timeout() {
216 | continue
217 | }
218 | if err != nil {
219 | return
220 | }
221 | }
222 |
223 | return getAnnouncementFromUDPTracker(con, connectionID, report)
224 | }
225 |
226 | func connectToUDPTracker(con *net.UDPConn) (connectionID uint64, err error) {
227 | var connectionRequest_connectionID uint64 = 0x41727101980
228 | var action uint32 = 0
229 | transactionID := rand.Uint32()
230 |
231 | connectionRequest := new(bytes.Buffer)
232 | err = binary.Write(connectionRequest, binary.BigEndian, connectionRequest_connectionID)
233 | if err != nil {
234 | return
235 | }
236 | err = binary.Write(connectionRequest, binary.BigEndian, action)
237 | if err != nil {
238 | return
239 | }
240 | err = binary.Write(connectionRequest, binary.BigEndian, transactionID)
241 | if err != nil {
242 | return
243 | }
244 |
245 | _, err = con.Write(connectionRequest.Bytes())
246 | if err != nil {
247 | return
248 | }
249 |
250 | connectionResponseBytes := make([]byte, 16)
251 |
252 | var connectionResponseLen int
253 | connectionResponseLen, err = con.Read(connectionResponseBytes)
254 | if err != nil {
255 | return
256 | }
257 | if connectionResponseLen != 16 {
258 | err = fmt.Errorf("Unexpected response size %d", connectionResponseLen)
259 | return
260 | }
261 | connectionResponse := bytes.NewBuffer(connectionResponseBytes)
262 | var connectionResponseAction uint32
263 | err = binary.Read(connectionResponse, binary.BigEndian, &connectionResponseAction)
264 | if err != nil {
265 | return
266 | }
267 | if connectionResponseAction != 0 {
268 | err = fmt.Errorf("Unexpected response action %d", connectionResponseAction)
269 | return
270 | }
271 | var connectionResponseTransactionID uint32
272 | err = binary.Read(connectionResponse, binary.BigEndian, &connectionResponseTransactionID)
273 | if err != nil {
274 | return
275 | }
276 | if connectionResponseTransactionID != transactionID {
277 | err = fmt.Errorf("Unexpected response transactionID %x != %x",
278 | connectionResponseTransactionID, transactionID)
279 | return
280 | }
281 |
282 | err = binary.Read(connectionResponse, binary.BigEndian, &connectionID)
283 | if err != nil {
284 | return
285 | }
286 | return
287 | }
288 |
289 | func getAnnouncementFromUDPTracker(con *net.UDPConn, connectionID uint64, report ClientStatusReport) (tr *TrackerResponse, err error) {
290 | transactionID := rand.Uint32()
291 |
292 | announcementRequest := new(bytes.Buffer)
293 | err = binary.Write(announcementRequest, binary.BigEndian, connectionID)
294 | if err != nil {
295 | return
296 | }
297 | var action uint32 = 1
298 | err = binary.Write(announcementRequest, binary.BigEndian, action)
299 | if err != nil {
300 | return
301 | }
302 | err = binary.Write(announcementRequest, binary.BigEndian, transactionID)
303 | if err != nil {
304 | return
305 | }
306 | err = binary.Write(announcementRequest, binary.BigEndian, []byte(report.InfoHash))
307 | if err != nil {
308 | return
309 | }
310 | err = binary.Write(announcementRequest, binary.BigEndian, []byte(report.PeerID))
311 | if err != nil {
312 | return
313 | }
314 | err = binary.Write(announcementRequest, binary.BigEndian, report.Downloaded)
315 | if err != nil {
316 | return
317 | }
318 | err = binary.Write(announcementRequest, binary.BigEndian, report.Left)
319 | if err != nil {
320 | return
321 | }
322 | err = binary.Write(announcementRequest, binary.BigEndian, report.Uploaded)
323 | if err != nil {
324 | return
325 | }
326 | var event uint32 = 0
327 | switch report.Event {
328 | case "":
329 | event = 0
330 | case "completed":
331 | event = 1
332 | case "started":
333 | event = 2
334 | case "stopped":
335 | event = 3
336 | default:
337 | err = fmt.Errorf("Unknown event string %v", report.Event)
338 | return
339 | }
340 | err = binary.Write(announcementRequest, binary.BigEndian, event)
341 | if err != nil {
342 | return
343 | }
344 | var ipAddress uint32 = 0
345 | err = binary.Write(announcementRequest, binary.BigEndian, ipAddress)
346 | if err != nil {
347 | return
348 | }
349 | var key uint32 = 0
350 | err = binary.Write(announcementRequest, binary.BigEndian, key)
351 | if err != nil {
352 | return
353 | }
354 |
355 | const peerRequestCount = 10
356 | var numWant uint32 = peerRequestCount
357 | err = binary.Write(announcementRequest, binary.BigEndian, numWant)
358 | if err != nil {
359 | return
360 | }
361 | err = binary.Write(announcementRequest, binary.BigEndian, report.Port)
362 | if err != nil {
363 | return
364 | }
365 |
366 | _, err = con.Write(announcementRequest.Bytes())
367 | if err != nil {
368 | return
369 | }
370 |
371 | const minimumResponseLen = 20
372 | const peerDataSize = 6
373 | expectedResponseLen := minimumResponseLen + peerDataSize*peerRequestCount
374 | responseBytes := make([]byte, expectedResponseLen)
375 |
376 | var responseLen int
377 | responseLen, err = con.Read(responseBytes)
378 | if err != nil {
379 | return
380 | }
381 | if responseLen < minimumResponseLen {
382 | err = fmt.Errorf("Unexpected response size %d", responseLen)
383 | return
384 | }
385 | response := bytes.NewBuffer(responseBytes)
386 | var responseAction uint32
387 | err = binary.Read(response, binary.BigEndian, &responseAction)
388 | if err != nil {
389 | return
390 | }
391 | if action != 1 {
392 | err = fmt.Errorf("Unexpected response action %d", action)
393 | return
394 | }
395 | var responseTransactionID uint32
396 | err = binary.Read(response, binary.BigEndian, &responseTransactionID)
397 | if err != nil {
398 | return
399 | }
400 | if transactionID != responseTransactionID {
401 | err = fmt.Errorf("Unexpected response transactionID %x", responseTransactionID)
402 | return
403 | }
404 | var interval uint32
405 | err = binary.Read(response, binary.BigEndian, &interval)
406 | if err != nil {
407 | return
408 | }
409 | var leechers uint32
410 | err = binary.Read(response, binary.BigEndian, &leechers)
411 | if err != nil {
412 | return
413 | }
414 | var seeders uint32
415 | err = binary.Read(response, binary.BigEndian, &seeders)
416 | if err != nil {
417 | return
418 | }
419 |
420 | peerCount := (responseLen - minimumResponseLen) / peerDataSize
421 | peerDataBytes := make([]byte, peerDataSize*peerCount)
422 | err = binary.Read(response, binary.BigEndian, &peerDataBytes)
423 | if err != nil {
424 | return
425 | }
426 |
427 | tr = &TrackerResponse{
428 | Interval: uint(interval),
429 | Complete: uint(seeders),
430 | Incomplete: uint(leechers),
431 | Peers: string(peerDataBytes)}
432 | return
433 | }
434 |
--------------------------------------------------------------------------------
/torrent/upnp.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | // Just enough UPnP to be able to forward ports
4 | //
5 |
6 | import (
7 | "bytes"
8 | "encoding/xml"
9 | "errors"
10 | "io/ioutil"
11 | "net"
12 | "net/http"
13 | "strconv"
14 | "strings"
15 | "time"
16 | )
17 |
18 | type upnpNAT struct {
19 | serviceURL string
20 | ourIP string
21 | urnDomain string
22 | }
23 |
24 | func Discover() (nat NAT, err error) {
25 | ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900")
26 | if err != nil {
27 | return
28 | }
29 | conn, err := net.ListenPacket("udp4", ":0")
30 | if err != nil {
31 | return
32 | }
33 | socket := conn.(*net.UDPConn)
34 | defer socket.Close()
35 |
36 | err = socket.SetDeadline(time.Now().Add(3 * time.Second))
37 | if err != nil {
38 | return
39 | }
40 |
41 | st := "InternetGatewayDevice:1"
42 |
43 | buf := bytes.NewBufferString(
44 | "M-SEARCH * HTTP/1.1\r\n" +
45 | "HOST: 239.255.255.250:1900\r\n" +
46 | "ST: ssdp:all\r\n" +
47 | "MAN: \"ssdp:discover\"\r\n" +
48 | "MX: 2\r\n\r\n")
49 | message := buf.Bytes()
50 | answerBytes := make([]byte, 1024)
51 | for i := 0; i < 3; i++ {
52 | _, err = socket.WriteToUDP(message, ssdp)
53 | if err != nil {
54 | return
55 | }
56 | var n int
57 | n, _, err = socket.ReadFromUDP(answerBytes)
58 | for {
59 | n, _, err = socket.ReadFromUDP(answerBytes)
60 | if err != nil {
61 | break
62 | }
63 | answer := string(answerBytes[0:n])
64 | if strings.Index(answer, st) < 0 {
65 | continue
66 | }
67 | // HTTP header field names are case-insensitive.
68 | // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
69 | locString := "\r\nlocation:"
70 | answer = strings.ToLower(answer)
71 | locIndex := strings.Index(answer, locString)
72 | if locIndex < 0 {
73 | continue
74 | }
75 | loc := answer[locIndex+len(locString):]
76 | endIndex := strings.Index(loc, "\r\n")
77 | if endIndex < 0 {
78 | continue
79 | }
80 | locURL := strings.TrimSpace(loc[0:endIndex])
81 | var serviceURL, urnDomain string
82 | serviceURL, urnDomain, err = getServiceURL(locURL)
83 | if err != nil {
84 | return
85 | }
86 | var ourIP net.IP
87 | ourIP, err = localIPv4()
88 | if err != nil {
89 | return
90 | }
91 | nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain}
92 | return
93 | }
94 | }
95 | err = errors.New("UPnP port discovery failed.")
96 | return
97 | }
98 |
99 | type Envelope struct {
100 | XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"`
101 | Soap *SoapBody
102 | }
103 | type SoapBody struct {
104 | XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"`
105 | ExternalIP *ExternalIPAddressResponse
106 | }
107 |
108 | type ExternalIPAddressResponse struct {
109 | XMLName xml.Name `xml:"GetExternalIPAddressResponse"`
110 | IPAddress string `xml:"NewExternalIPAddress"`
111 | }
112 |
113 | type ExternalIPAddress struct {
114 | XMLName xml.Name `xml:"NewExternalIPAddress"`
115 | IP string
116 | }
117 |
118 | type Service struct {
119 | ServiceType string `xml:"serviceType"`
120 | ControlURL string `xml:"controlURL"`
121 | }
122 |
123 | type DeviceList struct {
124 | Device []Device `xml:"device"`
125 | }
126 |
127 | type ServiceList struct {
128 | Service []Service `xml:"service"`
129 | }
130 |
131 | type Device struct {
132 | XMLName xml.Name `xml:"device"`
133 | DeviceType string `xml:"deviceType"`
134 | DeviceList DeviceList `xml:"deviceList"`
135 | ServiceList ServiceList `xml:"serviceList"`
136 | }
137 |
138 | type Root struct {
139 | Device Device
140 | }
141 |
142 | func getChildDevice(d *Device, deviceType string) *Device {
143 | dl := d.DeviceList.Device
144 | for i := 0; i < len(dl); i++ {
145 | if strings.Index(dl[i].DeviceType, deviceType) >= 0 {
146 | return &dl[i]
147 | }
148 | }
149 | return nil
150 | }
151 |
152 | func getChildService(d *Device, serviceType string) *Service {
153 | sl := d.ServiceList.Service
154 | for i := 0; i < len(sl); i++ {
155 | if strings.Index(sl[i].ServiceType, serviceType) >= 0 {
156 | return &sl[i]
157 | }
158 | }
159 | return nil
160 | }
161 |
162 | func localIPv4() (net.IP, error) {
163 | tt, err := net.Interfaces()
164 | if err != nil {
165 | return nil, err
166 | }
167 | for _, t := range tt {
168 | aa, err := t.Addrs()
169 | if err != nil {
170 | return nil, err
171 | }
172 | for _, a := range aa {
173 | ipnet, ok := a.(*net.IPNet)
174 | if !ok {
175 | continue
176 | }
177 | v4 := ipnet.IP.To4()
178 | if v4 == nil || v4[0] == 127 { // loopback address
179 | continue
180 | }
181 | return v4, nil
182 | }
183 | }
184 | return nil, errors.New("cannot find local IP address")
185 | }
186 |
187 | func getServiceURL(rootURL string) (url, urnDomain string, err error) {
188 | r, err := http.Get(rootURL)
189 | if err != nil {
190 | return
191 | }
192 | defer r.Body.Close()
193 | if r.StatusCode >= 400 {
194 | err = errors.New(string(r.StatusCode))
195 | return
196 | }
197 | var root Root
198 | err = xml.NewDecoder(r.Body).Decode(&root)
199 | if err != nil {
200 | return
201 | }
202 | a := &root.Device
203 | if strings.Index(a.DeviceType, "InternetGatewayDevice:1") < 0 {
204 | err = errors.New("No InternetGatewayDevice")
205 | return
206 | }
207 | b := getChildDevice(a, "WANDevice:1")
208 | if b == nil {
209 | err = errors.New("No WANDevice")
210 | return
211 | }
212 | c := getChildDevice(b, "WANConnectionDevice:1")
213 | if c == nil {
214 | err = errors.New("No WANConnectionDevice")
215 | return
216 | }
217 | d := getChildService(c, "WANIPConnection:1")
218 | if d == nil {
219 | // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice,
220 | // instead of under WanConnectionDevice
221 | d = getChildService(b, "WANIPConnection:1")
222 |
223 | if d == nil {
224 | err = errors.New("No WANIPConnection")
225 | return
226 | }
227 | }
228 | // Extract the domain name, which isn't always 'schemas-upnp-org'
229 | urnDomain = strings.Split(d.ServiceType, ":")[1]
230 | url = combineURL(rootURL, d.ControlURL)
231 | return
232 | }
233 |
234 | func combineURL(rootURL, subURL string) string {
235 | protocolEnd := "://"
236 | protoEndIndex := strings.Index(rootURL, protocolEnd)
237 | a := rootURL[protoEndIndex+len(protocolEnd):]
238 | rootIndex := strings.Index(a, "/")
239 | return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL
240 | }
241 |
242 | func soapRequest(url, function, message, domain string) (r *http.Response, err error) {
243 | fullMessage := "" +
244 | "\r\n" +
245 | "" + message + ""
246 |
247 | req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage))
248 | if err != nil {
249 | return nil, err
250 | }
251 | req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"")
252 | req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3")
253 | //req.Header.Set("Transfer-Encoding", "chunked")
254 | req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"")
255 | req.Header.Set("Connection", "Close")
256 | req.Header.Set("Cache-Control", "no-cache")
257 | req.Header.Set("Pragma", "no-cache")
258 |
259 | // log.Stderr("soapRequest ", req)
260 |
261 | r, err = http.DefaultClient.Do(req)
262 | if err != nil {
263 | return nil, err
264 | }
265 | /*if r.Body != nil {
266 | defer r.Body.Close()
267 | }*/
268 |
269 | if r.StatusCode >= 400 {
270 | // log.Stderr(function, r.StatusCode)
271 | err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function)
272 | r = nil
273 | return
274 | }
275 | return
276 | }
277 |
278 | type statusInfo struct {
279 | externalIpAddress string
280 | }
281 |
282 | func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) {
283 |
284 | message := "\r\n" +
285 | ""
286 |
287 | var response *http.Response
288 | response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain)
289 | if response != nil {
290 | defer response.Body.Close()
291 | }
292 | if err != nil {
293 | return
294 | }
295 | var envelope Envelope
296 | data, err := ioutil.ReadAll(response.Body)
297 | reader := bytes.NewReader(data)
298 | xml.NewDecoder(reader).Decode(&envelope)
299 |
300 | info = statusInfo{envelope.Soap.ExternalIP.IPAddress}
301 |
302 | if err != nil {
303 | return
304 | }
305 |
306 | return
307 | }
308 |
309 | func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) {
310 | info, err := n.getExternalIPAddress()
311 | if err != nil {
312 | return
313 | }
314 | addr = net.ParseIP(info.externalIpAddress)
315 | return
316 | }
317 |
318 | func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) {
319 | // A single concatenation would break ARM compilation.
320 | message := "\r\n" +
321 | "" + strconv.Itoa(externalPort)
322 | message += "" + protocol + ""
323 | message += "" + strconv.Itoa(internalPort) + "" +
324 | "" + n.ourIP + "" +
325 | "1"
326 | message += description +
327 | "" + strconv.Itoa(timeout) +
328 | ""
329 |
330 | var response *http.Response
331 | response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain)
332 | if response != nil {
333 | defer response.Body.Close()
334 | }
335 | if err != nil {
336 | return
337 | }
338 |
339 | // TODO: check response to see if the port was forwarded
340 | // log.Println(message, response)
341 | mappedExternalPort = externalPort
342 | _ = response
343 | return
344 | }
345 |
346 | func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
347 |
348 | message := "\r\n" +
349 | "" + strconv.Itoa(externalPort) +
350 | "" + protocol + "" +
351 | ""
352 |
353 | var response *http.Response
354 | response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain)
355 | if response != nil {
356 | defer response.Body.Close()
357 | }
358 | if err != nil {
359 | return
360 | }
361 |
362 | // TODO: check response to see if the port was deleted
363 | // log.Println(message, response)
364 | _ = response
365 | return
366 | }
367 |
--------------------------------------------------------------------------------
/torrent/uri.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "crypto/sha1"
5 | "fmt"
6 | _ "io"
7 | "net/url"
8 | "strings"
9 |
10 | _ "github.com/nictuku/dht"
11 | )
12 |
13 | type Magnet struct {
14 | InfoHashes []string
15 | Names []string
16 | Trackers [][]string
17 | }
18 |
19 | func parseMagnet(s string) (Magnet, error) {
20 | // References:
21 | // - http://bittorrent.org/beps/bep_0009.html
22 | // - http://en.wikipedia.org/wiki/Magnet_URI_scheme
23 | //
24 | // Example bittorrent magnet link:
25 | //
26 | // => magnet:?xt=urn:btih:bbb6db69965af769f664b6636e7914f8735141b3&dn=Ubuntu-12.04-desktop-i386.iso
27 | //
28 | // xt: exact topic.
29 | // ~ urn: uniform resource name.
30 | // ~ btih: bittorrent infohash.
31 | // dn: display name (optional).
32 | // tr: address tracker (optional).
33 | u, err := url.Parse(s)
34 | if err != nil {
35 | return Magnet{}, err
36 | }
37 | xts, ok := u.Query()["xt"]
38 | if !ok {
39 | return Magnet{}, fmt.Errorf("Magnet URI missing the 'xt' argument: " + s)
40 | }
41 | infoHashes := make([]string, 0, len(xts))
42 | for _, xt := range xts {
43 | s := strings.Split(xt, "urn:btih:")
44 | if len(s) != 2 {
45 | return Magnet{}, fmt.Errorf("Magnet URI xt parameter missing the 'urn:btih:' prefix. Not a bittorrent hash link?")
46 | }
47 | ih := s[1]
48 | // TODO: support base32 encoded hashes, if they still exist.
49 | if len(ih) != sha1.Size*2 { // hex format.
50 | return Magnet{}, fmt.Errorf("Magnet URI contains infohash with unexpected length. Wanted %d, got %d: %v", sha1.Size, len(ih), ih)
51 | }
52 | infoHashes = append(infoHashes, s[1])
53 | }
54 |
55 | var names []string
56 | n, ok := u.Query()["dn"]
57 | if ok {
58 | names = n
59 | }
60 |
61 | var trackers [][]string
62 | tr, ok := u.Query()["tr"]
63 | if ok {
64 | trackers = [][]string{tr}
65 | }
66 | fmt.Println("Trackers: ", trackers)
67 |
68 | return Magnet{InfoHashes: infoHashes, Names: names, Trackers: trackers}, nil
69 | }
70 |
--------------------------------------------------------------------------------
/torrent/uri_test.go:
--------------------------------------------------------------------------------
1 | package torrent
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 | )
7 |
8 | type magnetTest struct {
9 | uri string
10 | infoHashes []string
11 | }
12 |
13 | func TestParseMagnet(t *testing.T) {
14 | uris := []magnetTest{
15 | {uri: "magnet:?xt=urn:btih:bbb6db69965af769f664b6636e7914f8735141b3&dn=Ubuntu-12.04-desktop-i386.iso&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A80&tr=udp%3A%2F%2Ftracker.publicbt.com%3A80&tr=udp%3A%2F%2Ftracker.istole.it%3A6969&tr=udp%3A%2F%2Ftracker.ccc.de%3A80", infoHashes: []string{"bbb6db69965af769f664b6636e7914f8735141b3"}},
16 | }
17 |
18 | for _, u := range uris {
19 | m, err := parseMagnet(u.uri)
20 | if err != nil {
21 | t.Errorf("ParseMagnet failed for uri %v: %v", u.uri, err)
22 | }
23 | if !reflect.DeepEqual(u.infoHashes, m.InfoHashes) {
24 | t.Errorf("ParseMagnet failed, wanted %v, got %v", u.infoHashes, m.InfoHashes)
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/tracker/tracker.go:
--------------------------------------------------------------------------------
1 | package tracker
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "log"
7 | "math/rand"
8 | "net"
9 | "net/http"
10 | "net/url"
11 | "os"
12 | "os/signal"
13 | "path"
14 | "strconv"
15 | "strings"
16 | "sync"
17 | "time"
18 |
19 | "github.com/jackpal/Taipei-Torrent/torrent"
20 | bencode "github.com/jackpal/bencode-go"
21 | )
22 |
23 | type Tracker struct {
24 | Announce string
25 | Addr string
26 | ID string
27 | done chan struct{}
28 | m sync.Mutex // Protects l and t
29 | l net.Listener
30 | t trackerTorrents
31 | }
32 |
33 | type trackerTorrents map[string]*trackerTorrent
34 |
35 | // Single-threaded imp
36 | type trackerTorrent struct {
37 | name string
38 | downloaded uint64
39 | peers trackerPeers
40 | }
41 |
42 | // key is the client's listen address, in the form IP:port
43 | type trackerPeers map[string]*trackerPeer
44 |
45 | type trackerPeer struct {
46 | listenAddr *net.TCPAddr
47 | id string
48 | lastSeen time.Time
49 | uploaded uint64
50 | downloaded uint64
51 | left uint64
52 | }
53 |
54 | type announceParams struct {
55 | infoHash string
56 | peerID string
57 | ip string // optional
58 | port int
59 | uploaded uint64
60 | downloaded uint64
61 | left uint64
62 | compact bool
63 | noPeerID bool
64 | event string
65 | numWant int
66 | trackerID string
67 | }
68 |
69 | type bmap map[string]interface{}
70 |
71 | func getBool(v url.Values, key string) (b bool, err error) {
72 | val := v.Get(key)
73 | if val == "" {
74 | err = fmt.Errorf("Missing query parameter: %v", key)
75 | return
76 | }
77 | return strconv.ParseBool(val)
78 | }
79 |
80 | func getUint64(v url.Values, key string) (i uint64, err error) {
81 | val := v.Get(key)
82 | if val == "" {
83 | err = fmt.Errorf("Missing query parameter: %v", key)
84 | return
85 | }
86 | return strconv.ParseUint(val, 10, 64)
87 | }
88 |
89 | func getUint(v url.Values, key string) (i int, err error) {
90 | var i64 uint64
91 | i64, err = getUint64(v, key)
92 | if err != nil {
93 | return
94 | }
95 | i = int(i64)
96 | return
97 | }
98 |
99 | func (a *announceParams) parse(u *url.URL) (err error) {
100 | q := u.Query()
101 | a.infoHash = q.Get("info_hash")
102 | if a.infoHash == "" {
103 | err = fmt.Errorf("Missing info_hash")
104 | return
105 | }
106 | a.ip = q.Get("ip")
107 | a.peerID = q.Get("peer_id")
108 | a.port, err = getUint(q, "port")
109 | if err != nil {
110 | return
111 | }
112 | a.uploaded, err = getUint64(q, "uploaded")
113 | if err != nil {
114 | return
115 | }
116 | a.downloaded, err = getUint64(q, "downloaded")
117 | if err != nil {
118 | return
119 | }
120 | a.left, err = getUint64(q, "left")
121 | if err != nil {
122 | return
123 | }
124 | if q.Get("compact") != "" {
125 | a.compact, err = getBool(q, "compact")
126 | if err != nil {
127 | return
128 | }
129 | }
130 | if q.Get("no_peer_id") != "" {
131 | a.noPeerID, err = getBool(q, "no_peer_id")
132 | if err != nil {
133 | return
134 | }
135 | }
136 | a.event = q.Get("event")
137 | if numWant := q.Get("numwant"); numWant != "" {
138 | a.numWant, err = strconv.Atoi(numWant)
139 | if err != nil {
140 | return
141 | }
142 | }
143 | a.trackerID = q.Get("trackerid")
144 | return
145 | }
146 |
147 | func randomHexString(n int) string {
148 | return randomString("0123456789abcdef", n)
149 | }
150 |
151 | func randomString(s string, n int) string {
152 | b := make([]byte, n)
153 | slen := len(s)
154 | r := rand.New(rand.NewSource(time.Now().UnixNano()))
155 | for i := 0; i < n; i++ {
156 | b[i] = s[r.Intn(slen)]
157 | }
158 | return string(b)
159 | }
160 |
161 | func newTrackerPeerListenAddress(requestRemoteAddr string, params *announceParams) (addr *net.TCPAddr, err error) {
162 | var host string
163 | if params.ip != "" {
164 | host = params.ip
165 | } else {
166 | host, _, err = net.SplitHostPort(requestRemoteAddr)
167 | if err != nil {
168 | return
169 | }
170 | }
171 | return net.ResolveTCPAddr("tcp", net.JoinHostPort(host, strconv.Itoa(params.port)))
172 | }
173 |
174 | // Start a tracker and run it until interrupted.
175 | func StartTracker(addr string, torrentFiles []string) (err error) {
176 | t := NewTracker()
177 | // TODO(jackpal) Allow caller to choose port number
178 | t.Addr = addr
179 | for _, torrentFile := range torrentFiles {
180 | var metaInfo *torrent.MetaInfo
181 | metaInfo, err = torrent.GetMetaInfo(nil, torrentFile)
182 | if err != nil {
183 | return
184 | }
185 | name := metaInfo.Info.Name
186 | if name == "" {
187 | name = path.Base(torrentFile)
188 | }
189 | err = t.Register(metaInfo.InfoHash, name)
190 | if err != nil {
191 | return
192 | }
193 | }
194 | go func() {
195 | quitChan := listenSigInt()
196 | select {
197 | case <-quitChan:
198 | log.Printf("got control-C")
199 | t.Quit()
200 | }
201 | }()
202 |
203 | err = t.ListenAndServe()
204 | if err != nil {
205 | return
206 | }
207 | return
208 | }
209 |
210 | func listenSigInt() chan os.Signal {
211 | c := make(chan os.Signal, 1)
212 | signal.Notify(c, os.Interrupt, os.Kill)
213 | return c
214 | }
215 |
216 | func NewTracker() *Tracker {
217 | return &Tracker{Announce: "/announce", t: NewTrackerTorrents()}
218 | }
219 |
220 | func (t *Tracker) ListenAndServe() (err error) {
221 | t.done = make(chan struct{})
222 | if t.ID == "" {
223 | t.ID = randomHexString(20)
224 | }
225 | addr := t.Addr
226 | if addr == "" {
227 | addr = ":80"
228 | }
229 | var l net.Listener
230 | l, err = net.Listen("tcp", addr)
231 | if err != nil {
232 | return
233 | }
234 | t.m.Lock()
235 | t.l = l
236 | t.m.Unlock()
237 | serveMux := http.NewServeMux()
238 | announce := t.Announce
239 | if announce == "" {
240 | announce = "/"
241 | }
242 | serveMux.HandleFunc(announce, t.handleAnnounce)
243 | scrape := ScrapePattern(announce)
244 | if scrape != "" {
245 | serveMux.HandleFunc(scrape, t.handleScrape)
246 | }
247 | go t.reaper()
248 | // This statement will not return until there is an error or the t.l channel is closed
249 | err = http.Serve(l, serveMux)
250 | if err != nil {
251 | select {
252 | case <-t.done:
253 | // We're finished. Err is probably a "use of closed network connection" error.
254 | err = nil
255 | default:
256 | // Not finished
257 | }
258 | }
259 | return
260 | }
261 |
262 | func ScrapePattern(announcePattern string) string {
263 | lastSlashIndex := strings.LastIndex(announcePattern, "/")
264 | if lastSlashIndex >= 0 {
265 | firstPart := announcePattern[0 : lastSlashIndex+1]
266 | lastPart := announcePattern[lastSlashIndex+1:]
267 | announce := "announce"
268 | if strings.HasPrefix(lastPart, announce) {
269 | afterAnnounce := lastPart[len(announce):]
270 | return strings.Join([]string{firstPart, "scrape", afterAnnounce}, "")
271 | }
272 | }
273 | return ""
274 | }
275 |
276 | func (t *Tracker) handleAnnounce(w http.ResponseWriter, r *http.Request) {
277 | w.Header().Set("Content-Type", "text/plain")
278 | response := make(bmap)
279 | var params announceParams
280 | var peerListenAddress *net.TCPAddr
281 | err := params.parse(r.URL)
282 | if err == nil {
283 | if params.trackerID != "" && params.trackerID != t.ID {
284 | err = fmt.Errorf("Incorrect tracker ID: %#v", params.trackerID)
285 | }
286 | }
287 | if err == nil {
288 | peerListenAddress, err = newTrackerPeerListenAddress(r.RemoteAddr, ¶ms)
289 | }
290 | if err == nil {
291 | now := time.Now()
292 | t.m.Lock()
293 | err = t.t.handleAnnounce(now, peerListenAddress, ¶ms, response)
294 | t.m.Unlock()
295 | if err == nil {
296 | response["interval"] = int64(30 * 60)
297 | response["tracker id"] = t.ID
298 | }
299 | }
300 | var b bytes.Buffer
301 | if err != nil {
302 | log.Printf("announce from %v failed: %#v", r.RemoteAddr, err.Error())
303 | errorResponse := make(bmap)
304 | errorResponse["failure reason"] = err.Error()
305 | err = bencode.Marshal(&b, errorResponse)
306 | } else {
307 | err = bencode.Marshal(&b, response)
308 | }
309 | if err == nil {
310 | w.Write(b.Bytes())
311 | }
312 | }
313 |
314 | func (t *Tracker) handleScrape(w http.ResponseWriter, r *http.Request) {
315 | w.Header().Set("Content-Type", "text/plain")
316 | infoHashes := r.URL.Query()["info_hash"]
317 | response := make(bmap)
318 | response["files"] = t.t.scrape(infoHashes)
319 | var b bytes.Buffer
320 | err := bencode.Marshal(&b, response)
321 | if err == nil {
322 | w.Write(b.Bytes())
323 | }
324 | }
325 |
326 | func (t *Tracker) Quit() (err error) {
327 | select {
328 | case <-t.done:
329 | err = fmt.Errorf("Already done")
330 | return
331 | default:
332 | }
333 | var l net.Listener
334 | t.m.Lock()
335 | l = t.l
336 | t.m.Unlock()
337 | l.Close()
338 | close(t.done)
339 | return
340 | }
341 |
342 | func (t *Tracker) Register(infoHash, name string) (err error) {
343 | log.Printf("Register(%#v,%#v)", infoHash, name)
344 | t.m.Lock()
345 | defer t.m.Unlock()
346 | err = t.t.register(infoHash, name)
347 | return
348 | }
349 |
350 | func (t *Tracker) Unregister(infoHash string) (err error) {
351 | t.m.Lock()
352 | defer t.m.Unlock()
353 | err = t.t.unregister(infoHash)
354 | return
355 | }
356 |
357 | func (t *Tracker) reaper() {
358 | checkDuration := 30 * time.Minute
359 | reapDuration := 2 * checkDuration
360 | ticker := time.Tick(checkDuration)
361 | select {
362 | case <-t.done:
363 | return
364 | case <-ticker:
365 | t.m.Lock()
366 | defer t.m.Unlock()
367 | deadline := time.Now().Add(-reapDuration)
368 | t.t.reap(deadline)
369 | }
370 | }
371 |
372 | func NewTrackerTorrents() trackerTorrents {
373 | return make(trackerTorrents)
374 | }
375 |
376 | func (t trackerTorrents) handleAnnounce(now time.Time, peerListenAddress *net.TCPAddr, params *announceParams, response bmap) (err error) {
377 | if tt, ok := t[params.infoHash]; ok {
378 | err = tt.handleAnnounce(now, peerListenAddress, params, response)
379 | } else {
380 | err = fmt.Errorf("Unknown infoHash %#v", params.infoHash)
381 | return
382 | }
383 | return
384 | }
385 |
386 | func (t trackerTorrents) scrape(infoHashes []string) (files bmap) {
387 | files = make(bmap)
388 | if len(infoHashes) > 0 {
389 | for _, infoHash := range infoHashes {
390 | if tt, ok := t[infoHash]; ok {
391 | files[infoHash] = tt.scrape()
392 | }
393 | }
394 | } else {
395 | for infoHash, tt := range t {
396 | files[infoHash] = tt.scrape()
397 | }
398 | }
399 | return
400 | }
401 |
402 | func (t trackerTorrents) register(infoHash, name string) (err error) {
403 | if t2, ok := t[infoHash]; ok {
404 | err = fmt.Errorf("Already have a torrent %#v with infoHash %v", t2.name, infoHash)
405 | return
406 | }
407 | t[infoHash] = &trackerTorrent{name: name, peers: make(trackerPeers)}
408 | return
409 | }
410 |
411 | func (t trackerTorrents) unregister(infoHash string) (err error) {
412 | delete(t, infoHash)
413 | return
414 | }
415 |
416 | func (t *trackerTorrent) countPeers() (complete, incomplete int) {
417 | for _, p := range t.peers {
418 | if p.isComplete() {
419 | complete++
420 | } else {
421 | incomplete++
422 | }
423 | }
424 | return
425 | }
426 |
427 | func (t *trackerTorrent) handleAnnounce(now time.Time, peerListenAddress *net.TCPAddr, params *announceParams, response bmap) (err error) {
428 | peerKey := peerListenAddress.String()
429 | var peer *trackerPeer
430 | var ok bool
431 | if peer, ok = t.peers[peerKey]; ok {
432 | // Does the new peer match the old peer?
433 | if peer.id != params.peerID {
434 | log.Printf("Peer changed ID. %#v != %#v", peer.id, params.peerID)
435 | delete(t.peers, peerKey)
436 | peer = nil
437 | }
438 | }
439 | if peer == nil {
440 | peer = &trackerPeer{
441 | listenAddr: peerListenAddress,
442 | id: params.peerID,
443 | }
444 | t.peers[peerKey] = peer
445 | log.Printf("Peer %s joined", peerKey)
446 | }
447 | peer.lastSeen = now
448 | peer.uploaded = params.uploaded
449 | peer.downloaded = params.downloaded
450 | peer.left = params.left
451 | switch params.event {
452 | default:
453 | // TODO(jackpal):maybe report this as a warning
454 | log.Printf("Peer %s Unknown event %s", peerKey, params.event)
455 | case "":
456 | case "started":
457 | // do nothing
458 | case "completed":
459 | t.downloaded++
460 | log.Printf("Peer %s completed. Total completions %d", peerKey, t.downloaded)
461 | case "stopped":
462 | // This client is reporting that they have stopped. Drop them from the peer table.
463 | // And don't send any peers, since they won't need them.
464 | log.Printf("Peer %s stopped", peerKey)
465 | delete(t.peers, peerKey)
466 | params.numWant = 0
467 | }
468 |
469 | completeCount, incompleteCount := t.countPeers()
470 | response["complete"] = completeCount
471 | response["incomplete"] = incompleteCount
472 |
473 | peerCount := len(t.peers)
474 | numWant := params.numWant
475 | const DEFAULT_PEER_COUNT = 50
476 | if numWant <= 0 || numWant > DEFAULT_PEER_COUNT {
477 | numWant = DEFAULT_PEER_COUNT
478 | }
479 | if numWant > peerCount {
480 | numWant = peerCount
481 | }
482 |
483 | peerKeys := t.peers.pickRandomPeers(peerKey, params.compact, numWant)
484 | if params.compact {
485 | var b bytes.Buffer
486 | err = t.peers.writeCompactPeers(&b, peerKeys)
487 | if err != nil {
488 | return
489 | }
490 | response["peers"] = string(b.Bytes())
491 | } else {
492 | var peers []bmap
493 | noPeerID := params.noPeerID
494 | peers, err = t.peers.getPeers(peerKeys, noPeerID)
495 | if err != nil {
496 | return
497 | }
498 | response["peers"] = peers
499 | }
500 | return
501 | }
502 |
503 | func (t *trackerTorrent) scrape() (response bmap) {
504 | response = make(bmap)
505 | completeCount, incompleteCount := t.countPeers()
506 | response["complete"] = completeCount
507 | response["incomplete"] = incompleteCount
508 | response["downloaded"] = t.downloaded
509 | if t.name != "" {
510 | response["name"] = t.name
511 | }
512 | return
513 | }
514 |
515 | func (t trackerPeers) pickRandomPeers(peerKey string, compact bool, count int) (peers []string) {
516 | // Cheesy approximation to picking randomly from all peers.
517 | // Depends upon the implementation detail that map iteration is pseudoRandom
518 | for k, v := range t {
519 | if k == peerKey {
520 | continue
521 | }
522 | if compact && v.listenAddr.IP.To4() == nil {
523 | continue
524 | }
525 | peers = append(peers, k)
526 | if len(peers) == count {
527 | break
528 | }
529 | }
530 | return
531 | }
532 |
533 | func (t trackerPeers) writeCompactPeers(b *bytes.Buffer, keys []string) (err error) {
534 | for _, k := range keys {
535 | p := t[k]
536 | la := p.listenAddr
537 | ip4 := la.IP.To4()
538 | if ip4 == nil {
539 | err = fmt.Errorf("Can't write a compact peer for a non-IPv4 peer %v %v", k, p.listenAddr.String())
540 | return
541 | }
542 | _, err = b.Write(ip4)
543 | if err != nil {
544 | return
545 | }
546 | port := la.Port
547 | portBytes := []byte{byte(port >> 8), byte(port)}
548 | _, err = b.Write(portBytes)
549 | if err != nil {
550 | return
551 | }
552 | }
553 | return
554 | }
555 |
556 | func (t trackerPeers) getPeers(keys []string, noPeerID bool) (peers []bmap, err error) {
557 | for _, k := range keys {
558 | p := t[k]
559 | la := p.listenAddr
560 | var peer bmap = make(bmap)
561 | if !noPeerID {
562 | peer["peer id"] = p.id
563 | }
564 | peer["ip"] = la.IP.String()
565 | peer["port"] = strconv.Itoa(la.Port)
566 | peers = append(peers, peer)
567 | }
568 | return
569 | }
570 |
571 | func (t trackerTorrents) reap(deadline time.Time) {
572 | for _, tt := range t {
573 | tt.reap(deadline)
574 | }
575 | }
576 |
577 | func (t *trackerTorrent) reap(deadline time.Time) {
578 | t.peers.reap(deadline)
579 | }
580 |
581 | func (t trackerPeers) reap(deadline time.Time) {
582 | for address, peer := range t {
583 | if deadline.After(peer.lastSeen) {
584 | log.Println("reaping", address)
585 | delete(t, address)
586 | }
587 | }
588 | }
589 |
590 | func (t *trackerPeer) isComplete() bool {
591 | return t.left == 0
592 | }
593 |
--------------------------------------------------------------------------------
/tracker/tracker_test.go:
--------------------------------------------------------------------------------
1 | package tracker
2 |
3 | import (
4 | "bufio"
5 | "bytes"
6 | "fmt"
7 | "io"
8 | "io/ioutil"
9 | "log"
10 | "math"
11 | "os"
12 | "os/exec"
13 | "path"
14 | "strconv"
15 | "testing"
16 | "time"
17 |
18 | "github.com/jackpal/Taipei-Torrent/torrent"
19 | )
20 |
21 | func TestScrapeURL(t *testing.T) {
22 | tests := []struct{ announce, scrape string }{
23 | {"", ""},
24 | {"foo", ""},
25 | {"x/announce", "x/scrape"},
26 | {"x/announce?ad#3", "x/scrape?ad#3"},
27 | {"announce/x", ""},
28 | }
29 | for _, test := range tests {
30 | scrape := ScrapePattern(test.announce)
31 | if scrape != test.scrape {
32 | t.Errorf("ScrapeURL(%#v) = %#v. Expected %#v", test.announce, scrape, test.scrape)
33 | }
34 | }
35 | }
36 |
37 | func TestSwarm1(t *testing.T) {
38 | testSwarm(t, 1)
39 | }
40 |
41 | func TestSwarm10(t *testing.T) {
42 | testSwarm(t, 10)
43 | }
44 |
45 | func TestSwarm20(t *testing.T) {
46 | testSwarm(t, 20)
47 | }
48 |
49 | func TestSwarm50(t *testing.T) {
50 | testSwarm(t, 50)
51 | }
52 |
53 | func TestSwarm100(t *testing.T) {
54 | testSwarm(t, 100)
55 | }
56 |
57 | func testSwarm(t *testing.T, leechCount int) {
58 | err := runSwarm(leechCount)
59 | if err != nil {
60 | t.Fatal("Error running testSwarm", err)
61 | }
62 | }
63 |
64 | type prog struct {
65 | instanceName string
66 | dirName string
67 | cmd *exec.Cmd
68 | }
69 |
70 | func (p *prog) start(doneCh chan *prog) (err error) {
71 | log.Println("starting", p.instanceName)
72 | out := logWriter(p.instanceName)
73 | p.cmd.Stdout = &out
74 | p.cmd.Stderr = &out
75 | err = p.cmd.Start()
76 | if err != nil {
77 | return
78 | }
79 | go func() {
80 | p.cmd.Wait()
81 | doneCh <- p
82 | }()
83 | return
84 | }
85 |
86 | func (p *prog) kill() (err error) {
87 | err = p.cmd.Process.Kill()
88 | return
89 | }
90 |
91 | func newProg(instanceName string, dir string, command string, arg ...string) (p *prog) {
92 | cmd := helperCommands(append([]string{command}, arg...)...)
93 | return &prog{instanceName: instanceName, dirName: dir, cmd: cmd}
94 | }
95 |
96 | func runSwarm(leechCount int) (err error) {
97 | var rootDir string
98 | rootDir, err = ioutil.TempDir("", "swarm")
99 | if err != nil {
100 | return
101 | }
102 | log.Printf("Temporary directory: %s", rootDir)
103 | seedDir := path.Join(rootDir, "seed")
104 | err = os.Mkdir(seedDir, 0700)
105 | if err != nil {
106 | return
107 | }
108 | seedData := path.Join(seedDir, "data")
109 | err = createDataFile(seedData, 1024*1024)
110 | if err != nil {
111 | return
112 | }
113 | torrentFile := path.Join(rootDir, "testSwarm.torrent")
114 | err = createTorrentFile(torrentFile, seedData, "127.0.0.1:8080/announce")
115 | if err != nil {
116 | return
117 | }
118 |
119 | doneCh := make(chan *prog, 1)
120 |
121 | tracker := newTracker("tracker", ":8080", rootDir, torrentFile)
122 | err = tracker.start(doneCh)
123 | if err != nil {
124 | return
125 | }
126 | defer tracker.kill()
127 | time.Sleep(100 * time.Microsecond)
128 |
129 | var seed, leech *prog
130 | seed = newTorrentClient("seed", 0, torrentFile, seedDir, math.Inf(0))
131 | err = seed.start(doneCh)
132 | if err != nil {
133 | return
134 | }
135 | defer seed.kill()
136 | time.Sleep(50 * time.Microsecond)
137 |
138 | for l := 0; l < leechCount; l++ {
139 | leechDir := path.Join(rootDir, fmt.Sprintf("leech %d", l))
140 | err = os.Mkdir(leechDir, 0700)
141 | if err != nil {
142 | return
143 | }
144 | leech = newTorrentClient(fmt.Sprintf("leech%d", l), 0, torrentFile, leechDir, 0)
145 | err = leech.start(doneCh)
146 | if err != nil {
147 | return
148 | }
149 | defer leech.kill()
150 | }
151 |
152 | timeout := make(chan bool, 1)
153 | go func() {
154 | // It takes about 3.5 seconds to complete the test on my computer.
155 | time.Sleep(50 * time.Second)
156 | timeout <- true
157 | }()
158 |
159 | for doneCount := 0; doneCount < leechCount; doneCount++ {
160 | select {
161 | case <-timeout:
162 | err = fmt.Errorf("Timout exceeded")
163 | case donePeer := <-doneCh:
164 | if donePeer == tracker || donePeer == seed {
165 | err = fmt.Errorf("%v finished before all leeches. Should not have.", donePeer)
166 | }
167 | err = compareData(seedData, donePeer.dirName)
168 | }
169 | if err != nil {
170 | return
171 | }
172 | log.Printf("Done: %d of %d", (doneCount + 1), leechCount)
173 | }
174 | if err != nil {
175 | return
176 | }
177 | // All is good. Clean up
178 | os.RemoveAll(rootDir)
179 |
180 | return
181 | }
182 |
183 | func newTracker(name string, addr string, fileDir string, torrentFile string) (p *prog) {
184 | return newProg(name, fileDir, "tracker", addr, torrentFile)
185 | }
186 |
187 | func newTorrentClient(name string, port int, torrentFile string, fileDir string, ratio float64) (p *prog) {
188 | return newProg(name, fileDir, "client",
189 | fmt.Sprintf("%v", port),
190 | fileDir,
191 | fmt.Sprintf("%v", ratio),
192 | torrentFile)
193 | }
194 |
195 | func createTorrentFile(torrentFileName, root, announcePath string) (err error) {
196 | var metaInfo *torrent.MetaInfo
197 | metaInfo, err = torrent.CreateMetaInfoFromFileSystem(nil, root, "127.0.0.1:8080", 0, false)
198 | if err != nil {
199 | return
200 | }
201 | metaInfo.CreatedBy = "testSwarm"
202 | var torrentFile *os.File
203 | torrentFile, err = os.Create(torrentFileName)
204 | if err != nil {
205 | return
206 | }
207 | defer torrentFile.Close()
208 | err = metaInfo.Bencode(torrentFile)
209 | if err != nil {
210 | return
211 | }
212 | return
213 | }
214 |
215 | func createDataFile(name string, length int64) (err error) {
216 | if (length & 3) != 0 {
217 | return fmt.Errorf("createDataFile only supports length that is a multiple of 4. Not %d", length)
218 | }
219 | var file *os.File
220 | file, err = os.Create(name)
221 | if err != nil {
222 | return
223 | }
224 | defer file.Close()
225 | err = file.Truncate(length)
226 | if err != nil {
227 | return
228 | }
229 | w := bufio.NewWriter(file)
230 | b := make([]byte, 4)
231 | for i := int64(0); i < length; i += 4 {
232 | b[0] = byte(i >> 24)
233 | b[1] = byte(i >> 16)
234 | b[2] = byte(i >> 8)
235 | b[3] = byte(i)
236 | _, err = w.Write(b)
237 | if err != nil {
238 | return
239 | }
240 | }
241 | return
242 | }
243 |
244 | func compareData(sourceName, copyDirName string) (err error) {
245 | _, base := path.Split(sourceName)
246 | copyName := path.Join(copyDirName, base)
247 | err = compare(sourceName, copyName)
248 | return
249 | }
250 |
251 | // Compare two files (or directories) for equality.
252 | func compare(aName, bName string) (err error) {
253 | var aFileInfo, bFileInfo os.FileInfo
254 | aFileInfo, err = os.Stat(aName)
255 | if err != nil {
256 | return
257 | }
258 | bFileInfo, err = os.Stat(bName)
259 | if err != nil {
260 | return
261 | }
262 | aIsDir, bIsDir := aFileInfo.IsDir(), bFileInfo.IsDir()
263 | if aIsDir != bIsDir {
264 | return fmt.Errorf("%s.IsDir() == %v != %s.IsDir() == %v",
265 | aName, aIsDir,
266 | bName, bIsDir)
267 | }
268 | var aFile, bFile *os.File
269 | aFile, err = os.Open(aName)
270 | if err != nil {
271 | return
272 | }
273 | defer aFile.Close()
274 | bFile, err = os.Open(bName)
275 | if err != nil {
276 | return
277 | }
278 | defer bFile.Close()
279 | if !aIsDir {
280 | aSize, bSize := aFileInfo.Size(), bFileInfo.Size()
281 | if aSize != bSize {
282 | return fmt.Errorf("%s.Size() == %v != %s.Size() == %v",
283 | aName, aSize,
284 | bName, bSize)
285 | }
286 | var aBuf, bBuf bytes.Buffer
287 | bufferSize := int64(128 * 1024)
288 | for i := int64(0); i < aSize; i += bufferSize {
289 | toRead := bufferSize
290 | remainder := aSize - i
291 | if toRead > remainder {
292 | toRead = remainder
293 | }
294 | _, err = io.CopyN(&aBuf, aFile, toRead)
295 | if err != nil {
296 | return
297 | }
298 | _, err = io.CopyN(&bBuf, bFile, toRead)
299 | if err != nil {
300 | return
301 | }
302 | aBytes, bBytes := aBuf.Bytes(), bBuf.Bytes()
303 | for j := int64(0); j < toRead; j++ {
304 | a, b := aBytes[j], bBytes[j]
305 | if a != b {
306 | err = fmt.Errorf("%s[%d] %d != %d", aName, i+j, a, b)
307 | return
308 | }
309 | }
310 | aBuf.Reset()
311 | bBuf.Reset()
312 | }
313 | } else {
314 | var aNames, bNames []string
315 | aNames, err = aFile.Readdirnames(0)
316 | if err != nil {
317 | return
318 | }
319 | bNames, err = bFile.Readdirnames(0)
320 | if err != nil {
321 | return
322 | }
323 | if len(aNames) != len(bName) {
324 | err = fmt.Errorf("Directories %v and %v don't contain same number of files %d != %d",
325 | aName, bName, len(aNames), len(bNames))
326 | }
327 | for _, name := range aNames {
328 | err = compare(path.Join(aName, name), path.Join(bName, name))
329 | if err != nil {
330 | return
331 | }
332 | }
333 | }
334 | return
335 | }
336 |
337 | // type logWriter
338 |
339 | type logWriter string
340 |
341 | func (l logWriter) Write(p []byte) (n int, err error) {
342 | log.Println(l, string(p))
343 | n = len(p)
344 | return
345 | }
346 |
347 | // A test that's used to run multiple processes. From http://golang.org/src/pkg/os/exec/exec_test.go
348 |
349 | func helperCommands(s ...string) *exec.Cmd {
350 | cs := []string{"-test.run=TestHelperProcess", "--"}
351 | cs = append(cs, s...)
352 | cmd := exec.Command(os.Args[0], cs...)
353 | cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
354 | return cmd
355 | }
356 |
357 | func TestHelperProcess(*testing.T) {
358 | if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
359 | return
360 | }
361 |
362 | defer os.Exit(0)
363 |
364 | err := testHelperProcessImp(os.Args)
365 | if err != nil {
366 | fmt.Fprintf(os.Stderr, "error %v\n", err)
367 | os.Exit(3)
368 | }
369 | }
370 |
371 | func testHelperProcessImp(args []string) (err error) {
372 | for len(args) > 0 {
373 | if args[0] == "--" {
374 | args = args[1:]
375 | break
376 | }
377 | args = args[1:]
378 | }
379 |
380 | if len(args) == 0 {
381 | err = fmt.Errorf("No commands\n")
382 | return
383 | }
384 |
385 | cmd, args := args[0], args[1:]
386 | switch cmd {
387 | case "tracker":
388 | if len(args) < 2 {
389 | err = fmt.Errorf("tracker expected 2 or more args\n")
390 | return
391 | }
392 | addr, torrentFiles := args[0], args[1:]
393 |
394 | err = StartTracker(addr, torrentFiles)
395 | if err != nil {
396 | return
397 | }
398 | case "client":
399 | if len(args) < 4 {
400 | err = fmt.Errorf("client expected 4 or more args\n")
401 | return
402 | }
403 | portStr, fileDir, seedRatioStr, torrentFiles :=
404 | args[0], args[1], args[2], args[3:]
405 | var port uint64
406 | port, err = strconv.ParseUint(portStr, 10, 16)
407 | if err != nil {
408 | return
409 | }
410 | var seedRatio float64
411 | seedRatio, err = strconv.ParseFloat(seedRatioStr, 64)
412 | torrentFlags := torrent.TorrentFlags{
413 | Port: int(port),
414 | FileDir: fileDir,
415 | SeedRatio: seedRatio,
416 | FileSystemProvider: torrent.OsFsProvider{},
417 | InitialCheck: true,
418 | MaxActive: 1,
419 | ExecOnSeeding: "",
420 | Cacher: torrent.NewRamCacheProvider(1),
421 | MemoryPerTorrent: 4,
422 | }
423 | err = torrent.RunTorrents(&torrentFlags, torrentFiles)
424 | if err != nil {
425 | return
426 | }
427 | default:
428 | err = fmt.Errorf("Unknown command %q\n", cmd)
429 | return
430 | }
431 | return
432 | }
433 |
--------------------------------------------------------------------------------