├── .gitignore ├── .travis.yml ├── AUTHORS ├── COPYING ├── HACKING.md ├── INSTALL ├── Makefile ├── NEWS ├── README.md ├── TEST.md ├── WW3.md ├── bin ├── .gitignore └── etorrentctl.in ├── ct-run.sh ├── documentation ├── Makefile ├── auto │ └── documentation.el ├── bep-12-15-interaction.md ├── configuration_options.txt ├── documentation.tex ├── dynamic_sockets.txt ├── fast_resume.txt ├── git.md ├── new_version.txt └── sup_tree_20110106.m4 ├── erlang.mk ├── etorrent_test.cfg ├── etorrent_test.spec ├── rel ├── dev.config ├── dev.sys.config ├── release.config ├── reltool.config ├── sys.config └── vm.args ├── relx-dev.config ├── relx.config ├── scratch ├── start-dev.sh ├── test ├── etorrent.cover ├── etorrent_SUITE.erl └── etorrent_SUITE_data │ ├── run_opentracker.sh │ ├── run_transmission-cli.sh │ └── transmission │ └── settings.json └── tools └── graph /.gitignore: -------------------------------------------------------------------------------- 1 | .#* 2 | *~ 3 | /apps/etorrent/ebin/etorrent.app 4 | /apps/rlimit/ebin/rlimit.app 5 | /apps/etorrent/.eunit 6 | /apps/etorrent/src/tags 7 | /apps/etorrent/logs 8 | *.beam 9 | deps/* 10 | /dev 11 | /erl_crash.dump 12 | err.*log 13 | /priv/webui/*.log 14 | /rel/etorrent 15 | /rel/vars/etorrent-dev_vars.config 16 | *.swp 17 | tracer.log 18 | webui/*.log 19 | /depgraph.dot 20 | /depgraph.pdf 21 | /depgraph.png 22 | /apps/etorrent/doc 23 | /documentation/sup_tree_20101220.pdf 24 | /documentation/sup_tree_20101220.png 25 | /documentation/sup_tree_20110106.pdf 26 | /documentation/sup_tree_20110106.png 27 | .eunit/ 28 | /etorrent_dialyzer.plt 29 | log/* 30 | # Common tests 31 | /test/etorrent_SUITE_data/autogen 32 | /logs/* 33 | test/etorrent_SUITE_data/log/ 34 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: erlang 2 | branches: 3 | only: 4 | - master 5 | - next 6 | - pu 7 | - utp 8 | notifications: 9 | email: false 10 | irc: 11 | - "irc.freenode.org#etorrent" 12 | otp_release: 13 | - R14B04 14 | - R14B03 15 | - R14B02 16 | - R14B01 17 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Adam Wolk 2 | Edward Wang 3 | Jesper Louis Andersen 4 | Magnus Klaar 5 | Maxim Treskin 6 | Peter Lemenkov 7 | Tuncer Ayaz 8 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright (c) 2007-2011 Jesper Louis Andersen 2 | 3 | All rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions are 7 | met: 8 | 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above 13 | copyright notice, this list of conditions and the following 14 | disclaimer in the documentation and/or other materials provided 15 | with the distribution. 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- /HACKING.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | This document describes an architectural overview of the Etorrent 4 | application. It is meant for hackers to be used for doing changes on 5 | the application and bootstrap them to be able to do changes faster. 6 | 7 | The document is currently dated: 8 | 9 | 2010-12-22 10 | 11 | And information herein might need a review if we are much after this 12 | date because the code is in constant motion. This is a warning. Ask 13 | for an update if it is too old! 14 | 15 | # Tips and tricks used throughout the code base 16 | 17 | There are a couple of tricks which are used again and again in the 18 | code base. These could be called the "Design Patterns" of Erlang/OTP. 19 | 20 | ## Monitoring deaths 21 | 22 | A very commonly used scheme is where we have a bookkeeping process *B* 23 | and a client *C*. Usually the bookkeeping process is used because it 24 | can globalize information, the current download rate of processes 25 | say. So *C* stores its rates in an ETS table governed 26 | by *B*. The problem is now: What if *C* dies? *C* itself can't clean 27 | up since it is in an unknown state. 28 | 29 | The trick we use is to monitor *C* from *B*. Then, when a `'DOWN'` 30 | message comes to *B* it uses it to clean up after *C*. The trick is 31 | quite simple, but since it is used throughout the code in many places, 32 | I wanted to cover it here. 33 | 34 | ## Init with a timeout of 0 35 | 36 | Some processes have a timeout of 0 in their `init/1` callback. This 37 | means that they time out right away as soon as they are initialized 38 | and can do more work there. Remember, when a newly spawned process 39 | (the spawnee) is running `init` the spawner (calling spawn_link) will 40 | be blocked. A timeout of 0 gets around that problem. 41 | 42 | This means you can spawn a `gen_server` and then politely ask it to 43 | enter a loop based upon incoming events right away. You don't have to 44 | explicitly tell it with a message to do some processing first which 45 | lowers the expectations of the caller. *The spawn of a process is its 46 | initialization* the idiom seems to be, but I will refrain from calling 47 | it SPII. 48 | 49 | ## When a controlling process dies, so does its resource 50 | 51 | If we have an open port or a created ETS table, the crash or stop of 52 | the controlling process will also remove the ETS table or end the open 53 | port. We use this throughout etorrent to avoid having to trap exits 54 | all over the place and make sure that processes are closed down. It is 55 | quite confusing for a newcomer though, hence it is mentioned here. 56 | 57 | ## The ETS table plus governor 58 | 59 | A process is spawned to create and govern an ETS table. The idea is 60 | that lookups doesn't touch the governor at all, but it is used to 61 | monitor the users of the table - and it is used to serialize access to 62 | the table when needed. 63 | 64 | Suppose for instance that something must only happen once. By sending 65 | a call to the governor, we can serialize the request and thus make 66 | sure only one process will initialize the thing. 67 | 68 | ## Message is a process 69 | 70 | (Stolen from a stackoverflow answer by jlouis@) 71 | 72 | Let `M` be an Erlang term() which is a *message* we send around in the 73 | system. One obvious way to handle `M` is to build a pipeline of 74 | processes and queues. `M` is processed by the first worker in the 75 | pipeline and then sent on to the next queue. It is then picked up by 76 | the next worker process, processed again and put into a queue. And so 77 | on until the message has been fully processed. 78 | 79 | The perhaps not-so-obvious way is to define a process `P` and then 80 | hand `M` to `P`. We will notate it as `P(M)`. Now the message itself 81 | is a *process* and not a piece of *data*. `P` will do the same job 82 | that the workers did in the queue-solution but it won't have to pay 83 | the overhead of sticking the `M` back into queues and pick it off 84 | again and so on. When the processing `P(M)` is done, the process will 85 | simply end its life. If handed another message `M'` we will simply 86 | spawn `P(M')` and let it handle that message concurrently. If we get a 87 | set of processes, we will do `[P(M) || M <- Set]` and so on. 88 | 89 | If `P` needs to do synchronization or messaging, it can do so without 90 | having to "impersonate" the message, since it *is* the 91 | message. Contrast with the worker-queue approach where a worker has to 92 | take responsibility for a message that comes along it. If `P` has an 93 | error, only the message `P(M)` affected by the error will 94 | crash. Again, contrast with the worker-queue approach where a crash in 95 | the pipeline may affect other messages (mostly if the pipeline is 96 | badly designed). 97 | 98 | So the trick in conclusion: Turn a message into a process that 99 | *becomes* the message. 100 | 101 | The idiom is 'One Process per Message' and is quite common in 102 | Erlang. The price and overhead of making a new process is low enough 103 | that this works. You may want some kind of overload protection should 104 | you use the idea however. The reason is that you probably want to put 105 | a limit to the amount of concurrent requests so you *control* the load 106 | of the system rather than blindly let it destroy your servers. One 107 | such implementation is *Jobs*, created by Erlang Solutions, see the 108 | [jobs](https://github.com/esl/jobs) repository at github. Ulf Wiger is 109 | presenting it at 110 | [Erlang Factory Lite (jobs talk)](http://www.erlang-factory.com/conference/ErlangFactoryLiteLA/speakers/UlfWiger). 111 | 112 | As Ulf hints in the talk, we will usually do some preprocessing 113 | outside `P` to parse the message and internalize it to the Erlang 114 | system. But as soon as possible we will make the message `M` into a 115 | *job* by wrapping it in a process (`P(M)`). Thus we get the benefits 116 | of the Erlang Scheduler right away. 117 | 118 | There is another important ramification of this choice: If processing 119 | takes a long time for a message, then the preemptive scheduler of 120 | Erlang will ensure that messages with less processing needs still get 121 | handled quickly. If you have a limited amount of worker queues, you 122 | may end up with many of them being clogged, hampering the throughput 123 | of the system. 124 | 125 | # Dependencies 126 | 127 | Etorrent currently use two dependencies: 128 | 129 | ### Gproc 130 | 131 | GProc is a process table written by Ulf Wiger at Erlang Solutions. It 132 | basically maintains a lookup table in ETS for processes. We use this 133 | throughout etorrent whenever we want to get hold of a process which is 134 | not "close" in the supervisor tree. We do this because it is way 135 | easier than to propagate around process ids all the time. GProc has a 136 | distributed component which we are not using at all. 137 | 138 | ### riak_err 139 | 140 | The built-in SASL error logger of Erlang has problems when the 141 | messages it tries to log goes beyond a certain size. It manifests 142 | itself by the beam process taking up several gigabytes of memory. The 143 | [riak_err](http://github.com/basho/riak_err) application built by 144 | Basho technologies remedies this problem. It is configured with a 145 | maximal size and will gracefully limit the output so these kinds of 146 | errors does not occur. 147 | 148 | The downside is less configurability. Most notably, etorrent is quite 149 | spammy in its main console. One way around it is to connect to 150 | etorrent via another node, 151 | 152 | erl -name 'foo@127.0.0.1' \ 153 | -remsh 'etorrent@127.0.0.1' \ 154 | -setcookie etorrent 155 | 156 | and then carry out commands from there. 157 | 158 | # General layout of the source code 159 | 160 | This is the hierarchy: 161 | 162 | * [/apps/](https://github.com/jlouis/etorrent/tree/master/apps) - Container for applications 163 | * [/apps/etorrent/](https://github.com/jlouis/etorrent/tree/master/apps/etorrent) - The etorrent application 164 | * /apps/etorrent/doc/ - edoc-generated output for the etorrent app. 165 | * /apps/etorrent/ebin - Where .beam files are compiled to 166 | * [/apps/etorrent/include](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/include) - include (.hrl) files 167 | * [/apps/etorrent/priv](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/priv) - private data for the app. 168 | * [/apps/etorrent/src](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src) - main source code 169 | 170 | The above should not pose any trouble with a seasoned or beginning 171 | erlang hacker. 172 | 173 | * [/apps/etorrent/priv/webui/htdocs/](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/priv/webui/htdocs) 174 | 175 | This directory contains the data used by the Web UI system. It is 176 | basically dumping ground for Javascript code and anything static we 177 | want to present to a user of the applications web interface. 178 | 179 | * [/deps/](https://github.com/jlouis/etorrent/tree/master/deps) - dependencies are downloaded to here 180 | * /dev/ - when you generate development embedded VMs they go here 181 | * [/documentation/](https://github.com/jlouis/etorrent/tree/master/documentation) - haphazard general documentation about etorrent 182 | * [/tools/](https://github.com/jlouis/etorrent/tree/master/tools) - small tools for manipulating and developing the source 183 | * [/rel/](https://github.com/jlouis/etorrent/tree/master/rel) - stuff for making releases 184 | 185 | The release stuff follow the general conventions laid bare by *rebar* 186 | so I wont be covering it here. The file 187 | [reltool.config](https://github.com/jlouis/etorrent/tree/master/rel/reltool.config) 188 | is important though. It defines what to pack up in an embedded 189 | etorrent Erlang VM and what applications to start up at boot. 190 | 191 | # Building a development environment 192 | 193 | This is documented in README.md, so follow the path from there. If it 194 | doesn't work, get back to us so we can get the documentation updated. 195 | 196 | # Code walk-through. 197 | 198 | This is the meaty part. I hope it will help a hacker to easily see 199 | what is happening in the application. And make it easier for him or 200 | her to understand the application. If we can give the hacker a 201 | bootstrap-start in understanding, we have succeeded. 202 | 203 | There is a supervisor-structure documented by the date in the 204 | [/documentation](https://github.com/jlouis/etorrent/tree/master/documentation). You 205 | may wish to peruse it and study it while reading the rest of this document. 206 | 207 | ## Top level structure. 208 | 209 | The two important program entry points are: 210 | 211 | * [etorrent_app.erl](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_app.erl) 212 | * [etorrent_sup.erl](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_sup.erl) 213 | 214 | The `etorrent_app` defines what is needed to make etorrent an 215 | application. When we make a release through the release system, we 216 | arrange that this application will be started automatically. So that 217 | is what makes etorrent start up. The important function is 218 | `etorrent_app:start/2` which does some simple configuration and then 219 | launches the main supervisor, `etorrent_sup`. 220 | 221 | The main etorrent supervisor starts up *lots of stuff*. The things 222 | started fall into three categories: 223 | 224 | * Global book-keepers. These processes keep track of global state 225 | and are not expected to die. They are usually fairly simple 226 | doing very few things by themselves. Most of them 227 | store state in ETS tables, and reading can usually bypass the 228 | governing process for speed. 229 | 230 | * Global processes. Any process in etorrent is either *global*, 231 | *torrent local* or *peer local* depending on whether it is used by 232 | all torrents, by a single torrent or by a single peer 233 | respectively. This split is what gives us fault tolerance. If a 234 | torrent dies, isolation gives us that only that particular torrent 235 | will; if a peer dies, it doesn't affect other peers. 236 | 237 | * Supervisors. There are several. The most important is the one 238 | maintaining a pool of torrents. Other maintain sub-parts of the 239 | protocol which are not relevant to the initial understanding. 240 | 241 | An important supervisor maintains the Directory Watcher. This process, 242 | the 243 | [etorrent_dirwatcher](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_dirwatcher.erl) 244 | is a gen_server which is the starting entry point for the life cycle 245 | of a torrent. It periodically watches the directory and when a torrent 246 | is added, it will execute 247 | [etorrent_ctl:start/1](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_ctl.erl) 248 | to actually start the torrent. 249 | 250 | The 251 | [etorrent_ctl](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_ctl.erl) 252 | gen_server is an interface to start and stop torrents for 253 | real. Starting a torrent is very simple. We start up a torrent 254 | supervisor and add it to the pool of currently alive torrents. Nothing 255 | more happens at the top level -- the remaining work is by the torrent 256 | supervisor, found in [etorrent_torrent_sup](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_torrent_sup.erl). 257 | 258 | ### The etorrent module 259 | 260 | The module [etorrent](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent.erl) is an interface to etorrent via the erl 261 | shell. Ask it to give help by running `etorrent:help()`. 262 | 263 | ## Torrent supervisors 264 | 265 | The torrent supervisor is 266 | [etorrent_torrent_sup](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_torrent_sup.erl). 267 | This one will initially spawn supervisors to handle a pool of 268 | filesystem processes and a pool of peers. And finally, it will spawn a 269 | controller process, which will control the torrent in question. 270 | 271 | Initially, the control process will wait in line until it is its turn 272 | to perform a *check* of the torrent for correctness. To make the check 273 | fast, there is a global process, the 274 | [fast_resume](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_fast_resume.erl) 275 | process which persists the check data to disk every 5 minutes. If the 276 | fast-resume data is consistent this is used for fast 277 | checking. Otherwise, the control-process will check the torrent for 278 | pieces missing and pieces which we have. It will then spawn a process 279 | in the supervisor, by adding a child, the [tracker_communication](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_tracker_communication.erl) 280 | process. 281 | 282 | Tracker communication will contact the tracker and get a list of 283 | peers. It will report to the tracker that we exist, that we started 284 | downloading the torrent, how many bytes we have 285 | downloaded/uploaded -- and how many bytes there are left to download. The 286 | tracker process lives throughout the life-cycle of a torrent. It 287 | periodically contacts the tracker and it will also make a last contact 288 | to the tracker when the torrent is *stopped*. This final contact is 289 | ensured since the tracker-communicator process traps exits. 290 | 291 | The peers are then sent to a *global* process, the 292 | [peer_mgr](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_peer_mgr.erl), 293 | which manages peers. Usually the peers will be started by adding them 294 | back into the peer pool of the right torrent right away. However, if 295 | we have too many connections to peers they will enter a queue, waiting 296 | in order. Also, peers will be filtered away, if we are already connected to 297 | them. There is little reason to connect to the same peer multiple times. 298 | 299 | **Incoming peer connections** To handle incoming connections, we have 300 | a global supervisor maintaining a small pool of accepting 301 | processes. When a peer connects to us, we perform part of the 302 | handshake in the acceptor-process. We confer with several other parts 303 | of the system. As soon as the remote peer transfers the InfoHash, we 304 | look it up locally to see if this is a torrent we are currently 305 | working on. If not, we kill the connection. If the torrent is alive, 306 | we check to see if too many peers are connected, otherwise we allow 307 | it, blindly (there is an opportunity for an optimization here). 308 | 309 | ## Peers 310 | 311 | A peer is governed by a supervisor as well, the 312 | [etorrent_peer_sup](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_peer_sup.erl). It will control three gen_servers: One for 313 | sending messages to the remote peer and keeping a queue of outgoing 314 | messages 315 | ([etorrent_peer_send](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_peer_send.erl)). One 316 | for receiving and decoding incoming messages ([etorrent_peer_recv](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_peer_recv.erl)). And 317 | finally one for controlling the communication and running the peer 318 | wire protocol we use to communicate with peers ([etorrent_peer_control](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_peer_control.erl)). 319 | 320 | The supervisor is configured to die at the *instant* one of the other 321 | processes die. And the peer pool supervisor parent assumes everything 322 | are temporary. This means that an error in a peer will kill all peer 323 | processes and it will remove the peer. There is a *monitor* set by the 324 | `peer_mgr` on the peer, so it may try to connect in more peers when it 325 | registers death of a peer. In turn, this behaviour ensures progress. 326 | 327 | Peers are rather *autonomous* to the rest of the system. But they are 328 | collaborating with several torrent-local and global processes. In the 329 | following, we will try to explain what these collaborators do and how 330 | they work. 331 | 332 | ## File system 333 | 334 | The file system code, hanging on [etorrent_torrent_sup](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_torrent_sup.erl)) as a 335 | supervision tree maintains the storage of the file on-disk. Peers 336 | communicate with these processes to read out pieces and store 337 | pieces. The FS processes is split so there is a process per file. And 338 | that file-process governs reading and writing. There is also a 339 | directory-process which knows about how to write and read a piece by 340 | its offsets and lengths into files. For multifile-torrents a single 341 | piece read may span several files and this directory makes it possible 342 | to know whom to ask. 343 | 344 | ## Chunk/Piece Management 345 | 346 | There are two, currently global, processes called the 347 | [etorrent_piece_mgr](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_piece_mgr.erl) 348 | and 349 | ([etorrent_chunk_mgr](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_chunk_mgr.erl). The 350 | first of these, the Piece Manager, keeps track of pieces for torrents 351 | we are downloading. In particular it maps what pieces we have 352 | downloaded and what pieces we are missing. It is used by peer 353 | processes so they can tell remote peers what pieces we have. 354 | 355 | The BitTorrent protocol does not exchange pieces. It exchanges 356 | *slices* of pieces. These are in etorrent terminology called *chunks* 357 | although other clients use *slices* or *sub-pieces* for the same 358 | thing. To keep track of *chunks* we have the Chunk Manager. When a 359 | piece is chosen for downloading, it is "chunked" -- split -- into 360 | smaller 16 Kilobyte chunks. These are then requested from peers. Note 361 | we prefer to get all chunks of a piece downloaded as fast as 362 | possible -- so we may mark the piece as done and then exchange it with 363 | other peers. Hence the chunk selecting algorithm prefers already 364 | chunked up pieces. 365 | 366 | Another important part of the chunk manager is to ensure exclusive 367 | access to a chunk. There is little reason to download the same chunk 368 | from multiple peers. We earmark chunks to peers. If the peer dies, a 369 | monitor ensures we get the pieces back into consideration for other 370 | peers. Finally, there is a special *endgame* mode which is triggered 371 | when we are close to having the complete file downloaded. In this 372 | mode, we ignore the exclusivity rule and spam requests to all 373 | connected peers. The current rule is that when there are no more 374 | pieces to chunk up and we can't get exclusive pieces, we "steal" from 375 | other peers. A random shuffle of remaining pieces tries to eliminate 376 | too many multiples. 377 | 378 | **Note:** This part of the code is under rework at the moment. In 379 | particular we seek to make the processes torrent-local rather than 380 | global and maintain the piece maps and chunk maps differently. Talk to 381 | Magnus Klaar for the details. 382 | 383 | ## Choking 384 | 385 | A central point to the BitTorrent protocol is that you keep, perhaps, 386 | 200 connections to peers; yet you only communicate to a few of them, 387 | some 10-20. This in turn avoids congestion problems in TCP/IP. The 388 | process of choosing whom to communicate with is called 389 | *choking/unchoking*. There is a glfobal server process, the 390 | [etorrent_choker](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_choker.erl), 391 | responsible for selecting the peers to communicate with. It wakes up 392 | every 10 seconds and then re-chokes peers. 393 | 394 | The rules are quite intricate and it is advised to study them in 395 | detail if you want to hack on this part. To make the decision several 396 | parameters are taken into account: 397 | 398 | * The rate of the peer, either in send or receive direction. 399 | * Is the peer choking us? 400 | * Is the peer interested in pieces we have? 401 | * Are we interested in pieces the peer has? 402 | * Is the peer connected to a torrent we seed or leech? 403 | 404 | Furthermore, we have a circular ring of peers used for *optimistic 405 | un-choking*. The ring is moved forward a notch every 30 seconds and 406 | ensures that *all* peers eventually get a chance at being unchoked. If 407 | a peer is better than those we already download from, we will by this 408 | mechanism eventually latch onto it. 409 | 410 | ## UDP tracking 411 | 412 | BEP 15 adds the ability to contact a tracker by UDP packets rather 413 | than use HTTP over TCP as one would normally do. The interface to the 414 | UDP tracking system is the process 415 | [etorrent_udp_tracker_mgr](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_udp_tracker_mgr.erl) 416 | which lets us do announce requests over UDP with timeouts. The UDP 417 | tracking system itself is controlled by a supervisor, 418 | [etorrent_udp_tracker_sup](https://github.com/jlouis/etorrent/tree/master/apps/etorrent/src/etorrent_udp_tracker_sup.erl). 419 | 420 | Under this supervisor, several processes are hanging. First and 421 | foremost, we have a pool supervisor of type `simple_one_for_one`. Each 422 | call to announce will use the *process-per-request* idiom and turn 423 | itself into a process underneath this pool. It will then register 424 | itself as the recipient of messages it sends off to the server. This 425 | registration happens in a local ETS table. 426 | 427 | Once, a UDP packet arrives back the `etorrent_udp_tracker_proto` 428 | process is a gen_server responsible for decoding of incoming 429 | messages. After a successful decode, it dispatches the incoming 430 | message by looking it up in the ETS table and handing it to the right 431 | process in the pool of handlers. The handler process then either replies back 432 | to the torrent process (with `gen_server:reply/2`) or alters its 433 | internal state, sending off another UDP packet in turn. 434 | 435 | The system has several timeouts at different levels which may 436 | occur. Much of the complexity is due to these, but usually timeouts 437 | are kept locally in handlers. Another important part is that the 438 | handshake is like TCP. Hence, we first obtain a token from the tracker 439 | we then use subsequently for requests until the token times out. Some 440 | of the complexity is due to the reuse of the token. 441 | 442 | ## DHT 443 | 444 | To be written. 445 | 446 | ## WebUI 447 | 448 | The WebUI is a quite simple system. We have a directory of static data 449 | served by the `inets` application. Some of this data is jQuery along 450 | with a couple of helper modules. Upon request of the status page, 451 | async requests are made back to the `inets` application and it feeds 452 | jQuery with the current status of the system. Then the jQuery system 453 | carries out the plotting of the data. The only relevant file on the 454 | Erlang-side is `etorrent_webui.erl`. 455 | 456 | ## Event handling 457 | 458 | To be written. 459 | 460 | # So you want to hack etorrent? Cool! 461 | 462 | I am generally liberal, but there is one thing you should not do: 463 | 464 | Patch Bombs. 465 | 466 | A patch bomb is when a developer sits in his own corner for 4-5 months 467 | and suddenly comes by with a patch of gargantuan size, say several 468 | thousand lines of code changes. I won't accept such a patch - ever. 469 | 470 | If you have a cool plan, I encourage you to mail me: 471 | jesper.louis.andersen@gmail.com so we can set up a proper mailing list 472 | and document our design discussions in the list archives. That way, we 473 | may be able to break the problem into smaller pieces and we may get a 474 | better solution by discussing it beforehand. 475 | 476 | If you have a bug-fix or smaller patch it is ok to just come with it, 477 | preferably on google code as a regular patch(1) or a git-patch. If you 478 | know it is going to take you a considerable amount of time fixing the 479 | problem, submit an issue first and then hack away. That way, others 480 | may be able to chime in and comment on the problem. 481 | 482 | *Remember to add yourself to the AUTHORS list.* 483 | 484 | ## "I just want to hack, what is there to do?" 485 | 486 | Check the Issue tracker for enhancement requests and bugs. Ask me. I 487 | might have something you would like to do. The TODO list are "Things 488 | we certainly need to do!" and the issue tracker is used for "Things we 489 | may do." Note that people can vote up stuff on the issue tracker. 490 | 491 | -------------------------------------------------------------------------------- /INSTALL: -------------------------------------------------------------------------------- 1 | ** INSTALLATION INSTRUCTIONS ** 2 | 3 | FOR NOW, READ THE README FILE AND FOLLOW THAT. THIS IS HOW I 4 | WOULD LIKE IT TO BE OVER TIME. 5 | 6 | DARK IN HERE, ISN'T IT? 7 | 8 | ------------------------------------------------------------ 9 | Consult the README file for now. 10 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ### Etorrent Makefile 2 | PROJECT = etorrent 3 | 4 | ## Version here is used by the test suite. It should be possible to figure 5 | ## it out automatically, but for now, hardcode. 6 | version=1.2.1 7 | 8 | dev-release: deps 9 | relx -o dev/$(PROJECT) -c relx-dev.config 10 | 11 | release: deps 12 | relx -o rel/$(PROJECT) 13 | 14 | clean-release: 15 | rm -fr rel/$(PROJECT) 16 | 17 | DEPS = etorrent_core 18 | dep_etorrent_core = git://github.com/jlouis/etorrent_core.git master 19 | 20 | include erlang.mk 21 | 22 | console: 23 | dev/etorrent/bin/etorrent console \ 24 | -pa ../../deps/*/ebin 25 | 26 | 27 | -------------------------------------------------------------------------------- /NEWS: -------------------------------------------------------------------------------- 1 | Version 1.2.0: 2 | 3 | This version sees a number of new highlights: 4 | 5 | Highlights: 6 | * (Magnus Klaar) A complete DHT implementation. This is outstanding 7 | work. Note that it should be disabled if you use private 8 | trackers. We don't heed the private-flag yet (BEP-27) 9 | * (Magnus Klaar) A completely new disk IO layer which is more parallel 10 | than the old one. Needless to say, it is much faster. 11 | * Further work via rebars release handler now lets us build proper releases, 12 | for production and/or development (See the README.md file for 13 | instructions). 14 | * Support for BEP-15: Udp tracker protocol 15 | * Support for BEP-12: Multitracker protocol 16 | * Numerous improvement to the webUI. 17 | * Etorrent now persists the amount of upload and download over sessions. 18 | This fixes correct ratios even across stops/starts of etorrent. 19 | * Use the riak_err error handler. This plugs a bug where death of many 20 | processes would make SASL use too much memory. 21 | 22 | Fixes: 23 | * Remove mnesia from the code base. The new system is running entirely on 24 | ETS. 25 | * Performance improvements in hot parts of the code. 26 | * Begin adding tests to the code, EUnit and Erlang QuickCheck mini. 27 | * Add profiling via eprof as a tunable knob. 28 | * Fix an outstanding bug in the choker code. Etorrent is now much better 29 | at giving back. 30 | * Fix a race condition on the gproc table. 31 | * Improve supervisor tree shutdown 32 | * Change the bcoding module to be less defensive and more erlang-like 33 | * Change from uwiger/gproc to esl/gproc. 34 | * Numerous fixes to robustness regarding slow disks. 35 | 36 | 37 | Version 1.1.2: 38 | 39 | Build system restructuring, 40 | 41 | * This version adds support for a new build system. The README has 42 | been updated with the information about how to build the software 43 | now. Compared to earlier, we can now build a release which is a 44 | stand-alone version of Erlang/OTP and etorrent bundled up nicely. 45 | * Locations of log files has been vastly improved. 46 | * Magnus Klaars work on DHT has been included. It is not prime-time 47 | ready yet, but it is a start. 48 | 49 | Version 1.1.1: 50 | 51 | Small quick bugfix release, 52 | 53 | * Include etorrent.erl, which was not tracked by the repository. 54 | 55 | Version 1.1.0: 56 | 57 | In this version, we have mostly cleaned up some code paths to make 58 | room for further improvements and adaptations. From v1.0.0 we have the 59 | following important changes: 60 | 61 | * Add boxplots next to the sparklines in the WebUI. Boxplots will 62 | quickly tell you the current speed of a torrent file which is 63 | being processed by etorrent. 64 | * Add backwards compatibility for Erlang R13B-x. 65 | * Change the chunk_mgr ETS table to a bag rather than a set. We used 66 | bag semantics by implementing them as lists anyway and there is a 67 | slight improvement in letting the C-code handle the bag. It also 68 | shaves off about 50 lines of code. 69 | * Use gproc for process registration. This greatly simplifies the 70 | code at the expensive of a dependency on gproc and gen_leader 71 | (gen_leader we don't use for anything, but gproc needs). 72 | * API calls are now again called etorrent:h() and such. Moved from 73 | the application code in etorrent_app. 74 | * Fix a bug in the WebUI where an icon was referenced incorrectly. 75 | 76 | Furthermore, we now support some new bittorrent protocol extensions: 77 | 78 | * Support for extended messaging (BEP-10). 79 | * Support for multi-tracker torrents (BEP-12). To be really useful, 80 | we also need support for BEP-15 which is UDP tracking. 81 | 82 | Version 1.0.1: 83 | 84 | Minor bug-fix release: 85 | 86 | * (WebUI) Sparklines are reset properly when torrents complete 87 | * (WebUI) Boxplots beside the sparklines. 88 | * (Bug) Tracker is now correctly told about completion. 89 | * (Bug) Up/Downloaded is now reported as the concensus in the BT community. 90 | 91 | Version 1.0: 92 | 93 | First release I admit others might find useful. It is also the first release 94 | in 2 years. Again, we incrementally improve over the latest release by 95 | increasing overall robustness of the application. CPU usage and Memory usage 96 | has in general been improved in this release. 97 | 98 | Highlights: 99 | 100 | (Tuncer Ayaz): Estimate the ETA of torrents. Guess at when they are done. 101 | 102 | When processes are stopping, correctly clean up ETS tables. The solution we 103 | use is to place monitors on key processes. Bookkeeping processes then act on 104 | 'DOWN' messages and remove ETS entries again. 105 | 106 | All module-exported functions now have edoc tags and -spec entries. 107 | 108 | We now use rebar for building the application. To come is a standalone-node 109 | provisioning. For now the 'run' makefile target is to be used. Also adapt the 110 | file layout mandated by rebar. 111 | 112 | Adaptive active/passive sockets. When a peer is slow, we manage the socket 113 | ourselves. This gives fine granularity of its speed. When the peer socket 114 | jumps above a high watermark, it is made active, pushing the overhead to the 115 | C-layer of the VM. 116 | 117 | Use an LRU-replacement scheme on open files. Limit the number of open files 118 | to 128 at once. This enables torrents with more than 1024 - OPEN_SOCKETS 119 | files to be downloaded. 120 | 121 | Add a WebUI. The inets server provides a simple RPC-service for a primarily 122 | Javascript-enabled web-frontend. It can only display the basic overview at 123 | the moment, but it should be easy to extend with new functionality. 124 | 125 | Numerous additional code cleanups here and there. It should be even easier to 126 | read and understand now. 127 | 128 | Make GitHub the official repository place: 129 | 130 | http://github.com/jlouis/etorrent 131 | 132 | Use http://github.com/jlouis/etorrent/issues for issues. Issues can be voted 133 | on, so if one prefers one thing over the other, don't hesitate to hint me :) 134 | 135 | Version 0.9: 136 | 137 | This is yet another Tech. Preview release. The torrent client works 138 | to the point where it can be used to download things, but it have 139 | not seen much testing yet and there are numerous places it can be 140 | improved still. Yet, there are so many changes, that it warrants a 141 | new release. 142 | 143 | The current regressions revolve around a high CPU usage at times. We 144 | expect to tackle this problem next with some profiling. We also 145 | expect to tackle fast resume support as the main "new thing". And 146 | some 10 things in the issue tracker and the TODO lists for the 147 | next release. I don't expect it to follow as fast as this one. 148 | 149 | One development methodology change worth mentioning: the git 150 | repository now uses several branches laid out as described in 151 | doc/git.txt. Tracking the 'master' branch ought to provide you with 152 | a system that is stable at all times, while tracking the 'next' 153 | branch gives you the 'cooking pot' of new things that ought to be 154 | tested. We'll try to keep 'next' stable, though it may have problems 155 | at times. 156 | 157 | Changes: 158 | 159 | - There is a set of new commands for viewing what is currently 160 | cooking in the torrent client. etorrent:l/0, etorrent:s/1, 161 | etorrent:h/0. Implemented with help from Tuncer Ayaz. 162 | 163 | - Changed build infrastructure. No more autoconf. It simplifies the 164 | build structure considerably. Introduce the use of EMakefile for 165 | building the erlang parts of the system. Reinstate all make targets 166 | and add a 'tags' target for building a TAGS file. 167 | 168 | - etorrent now correctly handles the 'min_interval' tracker response 169 | parameter. It is not strictly part of the spec, but everybody uses 170 | it. 171 | 172 | - Add support for installing etorrent. A shell-script, etorrentctl, 173 | is provided to control the etorrent daemon. The installer, while 174 | overly simple, has not seen much testing as of yet and may not 175 | work. It will be tested before v1.0. 176 | 177 | - etorrent no longer pre-fills files it want to download with 178 | junk. It uses the semantics of fseek() to make a file of the right 179 | size initially. 180 | 181 | - several ETS restructurings has brought the memory use 182 | down. Before, etorrent would take some 660 megabytes of memory 183 | running 20 torrents. Now it is more like 50-80 megabytes for 20 184 | torrents. More can be shaved but this is a good start. Also, memory 185 | usage still occasionally spikes because we are doing nothing in certain 186 | situations to limit it. 187 | 188 | - New choking/unchoking algorithm, based on a combination of 189 | BitTornado/BitTorrent/Transmission. This is not the smartest one can 190 | do, but it follows the spec more or less precisely. 191 | 192 | - Event Publisher. A gen_event OTP behaviour one can subscribe to and 193 | get information about the system. A logfile subscriber is there by 194 | default. 195 | 196 | - Rate calculation optimizations. etorrent now uses a running 197 | average over a period of up to 20 seconds to measure the rate of a 198 | peer. This yields a more fair measurement of individual peers so we 199 | claim the best peers. While here, change the sockets to be passive 200 | for now. It bumps the CPU-usage, but makes the rate calculation more 201 | precise. There are several optimizations possible revolving around 202 | passive/active sockets. 203 | 204 | - Robustize the supervisor tree. etorrent is now less likely to die 205 | due to a crash somewhere in the tree. It is not entirely safe yet, 206 | but it will be during the next releases. 207 | 208 | Version 0.8: 209 | 210 | First Technology Preview. 211 | 212 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ; -*- Mode: Markdown; -*- 2 | ; vim: filetype=none tw=76 expandtab 3 | 4 | # ETORRENT 5 | 6 | ETORRENT is a bittorrent client written in Erlang. The focus is on 7 | robustness and scalability in number of torrents rather than in pure 8 | speed. ETORRENT is mostly meant for unattended operation, where one 9 | just specifies what files to download and gets a notification when 10 | they are. 11 | 12 | ## Flag days 13 | 14 | Flag days are when you need to do something to your setup 15 | 16 | * *2010-12-10* You will need a rebar with commit 618b292c3d84 as we 17 | got some fixes into rebar. 18 | * *2010-12-06* We now depend on riak_err. You will need to regrab 19 | dependencies. 20 | * *2010-12-02* The fast-resume-file format has been updated. You 21 | may have to delete your fast_resume_file though the system was 22 | configured to do a silent system upgrade. 23 | 24 | ## Build Status 25 | 26 | Currently, we are running build-bots through 27 | [http://travis-ci.org](Travis CI). [![Build Status](http://travis-ci.org/jlouis/etorrent.png?branch=master)](http://travis-ci.org//jlouis/etorrent) 28 | 29 | 30 | ## Why 31 | 32 | ETORRENT was mostly conceived as an experiment in how easy it would be 33 | to write a bittorrent client in Erlang. The hypothesis is that the 34 | code will be cleaner and smaller than comparative bittorrent clients. 35 | 36 | ## Maturity 37 | 38 | The code is at this point somewhat mature. It has been used in several 39 | scenarios with a good host of different clients and trackers. The code 40 | base is not known to act as a bad p2p citizen, although it may be 41 | possible that it is. 42 | 43 | The most important missing link currently is that only a few users 44 | have been testing it - so there may still be bugs in the code. The 45 | `master` branch has been quite stable for months now however, so it is 46 | time to get some more users for testing. Please report any bugs, 47 | especially if the client behaves badly. 48 | 49 | ## Currently supported BEPs: 50 | 51 | * BEP 03 - The BitTorrent Protocol Specification. 52 | * BEP 04 - Known Number Allocations. 53 | * BEP 05 - DHT Protocol 54 | * BEP 10 - Extension Protocol 55 | * BEP 12 - Multitracker Metadata Extension. 56 | * BEP 15 - UDP Tracker Protocol 57 | * BEP 23 - Tracker Returns Compact Peer Lists. 58 | 59 | ## Required software: 60 | 61 | * rebar - you need a working rebar installation to build etorrent. 62 | The rebar must have commit 618b292c3d84. It is known that rebar 63 | version `rebar version: 2 date: 20110310_155239 vcs: git 2e1b4da` 64 | works, and later versions probably will too. 65 | * Erlang/OTP R13B04 or R14 - the etorrent system is written in 66 | Erlang and thus requires a working Erlang distribution to 67 | work. It may work with older versions, but has mostly been tested 68 | with newer versions. 69 | 70 | If you have the option, running on R14B02 is preferred. The 71 | developers are usually running on fairly recent Erlang/OTP 72 | releases, so you have much better chances with these systems. 73 | * A UNIX-derivative or Windows as the operating system 74 | Support has been tested by mulander and is reported to work with 75 | some manual labor. For details see the Windows getting started section. 76 | 77 | ## GETTING STARTED 78 | 79 | 0. `make deps` - Pull the relevant dependencies into *deps/* 80 | 1. `make compile` - this compiles the source code 81 | 2. `make rel` - this creates an embedded release in *rel/etorrent* which 82 | can subsequently be moved to a location at your leisure. 83 | 3. edit `${EDITOR} rel/etorrent/etc/app.config` - there are a number of directories 84 | which must be set in order to make the system work. 85 | 4. check `${EDITOR} rel/etorrent/etc/vm.args` - Erlang args to supply 86 | 5. be sure to protect the erlang cookie or anybody can connect to 87 | your erlang system! See the Erlang user manual in [distributed operation](http://www.erlang.org/doc/reference_manual/distributed.html) 88 | 6. run `rel/etorrent/bin/etorrent console` 89 | 7. drop a .torrent file in the watched dir and see what happens. 90 | 8. call `etorrent:help()`. from the Erlang CLI to get a list of available 91 | commands. 92 | 9. If you enabled the webui, you can try browsing to its location. By default the location is 'http://localhost:8080'. 93 | 94 | ## GETTING STARTED (Windows) 95 | 0. Obviously get and install erlang from erlang.org. This process was tested with R14B01 release. 96 | You may want to add the bin directory to your PATH in order to reduce the length of the commands you will enter later on. 97 | 1. Install [msysgit](http://code.google.com/p/msysgit/). Tested with [1.7.3.1.msysgit.0](http://code.google.com/p/msysgit/downloads/detail?name=Git-1.7.3.1-preview20101002.exe&can=2&q=). 98 | 2. Install [Win32 OpenSSL](http://www.slproweb.com/products/Win32OpenSSL.html). 99 | The installer hang on me midway through the process but the libs were properly copied to C:\Windows\system32. 100 | 3. Confirm that your crypto works correctly by running `crypto:start().` from an erlang shell (Start->Erlang OTP R14B01->Erlang). 101 | The shell should respond with `ok`. If you get an error then your openssl libraries are still missing. 102 | 4. Open up a git bash shell and cd to a directory you want to work in. 103 | 5. Clone the rebar repository `git clone https://github.com/basho/rebar.git` 104 | 6. From a regular cmd.exe shell `cd rebar`. Now if you added the erlang bin directory to your PATH then you can simply run `bootstrap.bat`. 105 | If you didn't add Erlang's bin to your path then issue the following command: 106 | 107 | `"C:\Program Files\erl5.8.2\bin\escript.exe" bootstrap` 108 | 109 | Adjust the path to your Erlang installation directory. From now on, use this invocation for escript.exe and erl.exe. I'll assume it's on PATH. 110 | You should now have a `rebar` file created. If you have Erlangs bin dir on your PATH then you may want to also add the rebar directory to your 111 | PATH. This will allow you to use the rebar.bat script which will also reduce the amount of typing you will have to do. 112 | 7. Clone etorrent and copy the `rebar` file into it (unless you have it on path). 113 | 8. Now, we need to satisfy the dependencies. This should be done by rebar itself but I couldn't get it to work correctly with git. 114 | This point describes how to do it manually. 115 | 116 | `mkdir deps` # this is where rebar will look for etorrent dependencies 117 | 118 | `escript.exe rebar check-deps` # this will give you a list of the missing dependencies and their git repos. 119 | 120 | For each dependency reported by check-deps perform the following command in a git bash shell (be sure to be in the etorrent directory) 121 | 122 | `git clone git://path_to_repo/name.git deps/name` 123 | 124 | Be sure to run `escript.exe rebar check-deps` after cloning each additional repo as new dependencies might be added by them. 125 | 126 | For the time of this writing (2011-02-13) I had to clone the following repositories: 127 | 128 | `git clone git://github.com/esl/gproc.git deps/gproc` 129 | 130 | `git clone git://github.com/esl/edown.git deps/edown` 131 | 132 | `git clone git://github.com/basho/riak_err.git deps/riak_err` 133 | 9. `escript.exe rebar compile` to compile the application from a cmd.exe prompt. 134 | 10. `escript.exe rebar generate` this creates an embedded release in *rel/etorrent* which 135 | can subsequently be moved to a location at your leisure. This command may take some time so be patient. 136 | 11. Edit `rel/etorrent/etc/app.config` - there are a number of directories which must be set in order to make the system work. 137 | Be sure each directory exists before starting etorrent. You also have to change all paths starting with `/` (ie. `/var/lib...`). 138 | When setting paths use forward slashes. For example `{dir, "D:/etorrent/torrents"},`. 139 | 12. Check `rel/etorrent/etc/vm.args` - Erlang args to supply 140 | 13. Be sure to protect the erlang cookie or anybody can connect to 141 | your erlang system! See the Erlang user manual in [distributed operation](http://www.erlang.org/doc/reference_manual/distributed.html) 142 | 14. We can't run the etorrent script because it's written in bash. So in order to start ettorent cd from a cmd.exe shell into the `rel\etorrent` 143 | directory. And perform the following command 144 | 145 | `erts-5.8.2\bin\erl.exe -boot release\1.2.1\etorrent -embedded -config etc\app.config -args_file etc\vm.args` 146 | 147 | Be sure to substitute the version number in the release path to the current etorrent release version. Do the same for the erts version. 148 | Allow port communication when/if the Windows firewall asks for it. 149 | 15. drop a .torrent file in the watched dir and see what happens. 150 | 16. call `etorrent:help()`. from the Erlang CLI to get a list of available commands. 151 | 17. If you enabled the webui, you can try browsing to its location. By default the location is 'http://localhost:8080'. 152 | 153 | ## Testing etorrent 154 | 155 | Read the document [etorrent/TEST.md](/jlouis/etorrent/tree/master/TEST.md) 156 | for how to run tests of the system. 157 | 158 | ## Troubleshooting 159 | 160 | If the above commands doesn't work, we want to hear about it. This is 161 | a list of known problems: 162 | 163 | * General: Many distributions are insane and pack erlang in split 164 | packages, so each part of erlang is in its own package. This 165 | *always* leads to build problems due to missing stuff. Be sure 166 | you have all relevant packages installed. And when you find which 167 | packages are needed, please send a patch to this file for the 168 | distribution and version so we can keep it up-to-date. 169 | 170 | * Ubuntu 10.10: Ubuntu has a symlink `/usr/lib/erlang/man -> 171 | /usr/share/man`. This is insane and generates problems when 172 | building a release (spec errors on missing files if a symlink 173 | points to nowhere). The easiest fix is to remove the man symlink 174 | from `/usr/lib/erlang`. A way better fix is to install a recent 175 | Erlang/OTP yourself and use **Begone(tm)** on the supplied version. 176 | 177 | ### Installing Erlang 178 | 179 | I (jlouis@) use the following commands to install Erlang: 180 | 181 | * Install *stow*, `sudo aptitude install stow` 182 | * Execute `sudo aptitude build-dep erlang` to pull in all build 183 | dependencies we need. 184 | * Execute `git clone git://github.com/erlang/otp.git` 185 | * Get into the directory and fire away `git checkout dev && ./otp_build autoconf` 186 | * Get a useful tag for the otp version you have built `otp_version=$(git describe)` 187 | * `./configure --prefix=/usr/local/stow/${otp_version}` 188 | * `make; make docs` 189 | * `make install install-docs` 190 | 191 | And then I enable it in stow: 192 | 193 | cd /usr/local/stow && stow ${otp_version} 194 | 195 | You may have to use `stow -D` on an old 196 | 197 | Another way is to use [Basho kerl](https://github.com/spawngrid/kerl), a simple shell script: 198 | 199 | ``` bash 200 | $ curl -O https://raw.github.com/spawngrid/kerl/master/kerl; chmod a+x kerl 201 | $ ./kerl build R14B04 r14b04 202 | $ ./kerl install r14b04 /opt/erlang/r14b04 203 | $ . /opt/erlang/r14b04/activate 204 | ``` 205 | 206 | In the above example, kerl installs Erlang R14B04 to directory /opt/erlang/r14b04 and makes it default. For more information, please check [kerl readme](https://github.com/spawngrid/kerl/blob/master/README.md). 207 | 208 | ## QUESTIONS?? 209 | 210 | You can either mail them to `jesper.louis.andersen@gmail.com` or you 211 | can come by on IRC #etorrent/freenode and ask. 212 | 213 | # Development 214 | 215 | ## PATCHES 216 | 217 | To submit patches, we have documentation in `documentation/git.md`, 218 | giving tips to patch submitters. 219 | 220 | ## Setting up a development environment 221 | 222 | When developing for etorrent, you might end up generating a new 223 | environment quite often. So ease the configuration, the build 224 | infrastructure support this. 225 | 226 | * Create a file `rel/vars/etorrent-dev_vars.config` based upon the file 227 | `rel/vars.config`. 228 | * run `make compile etorrent-dev` 229 | * run `make console` 230 | 231 | Notice that we `-pa` add `../../apps/etorrent/ebin` so you can l(Mod) files 232 | from the shell directly into the running system after having 233 | recompiled them. 234 | 235 | ## Documentation 236 | 237 | Read the HACKING.md file in this directory. For how the git repository 238 | is worked, see `documentation/git.md`. 239 | 240 | ## ISSUES 241 | 242 | Either mail them to `jesper.louis.andersen@gmail.com` (We are 243 | currently lacking a mailing list) or use the [issue tracker](http://github.com/jlouis/etorrent/issues) 244 | 245 | ## Reading material for hacking Etorrent: 246 | 247 | - [Protocol specification - BEP0003](http://www.bittorrent.org/beps/bep_0003.html): 248 | This is the original protocol specification, tracked into the BEP 249 | process. It is worth reading because it explains the general overview 250 | and the precision with which the original protocol was written down. 251 | 252 | - [Bittorrent Enhancement Process - BEP0000](http://www.bittorrent.org/beps/bep_0000.html) 253 | The BEP process is an official process for adding extensions on top of 254 | the BitTorrent protocol. It allows implementors to mix and match the 255 | extensions making sense for their client and it allows people to 256 | discuss extensions publicly in a forum. It also provisions for the 257 | deprecation of certain features in the long run as they prove to be of 258 | less value. 259 | 260 | - [wiki.theory.org](http://wiki.theory.org/Main_Page) 261 | An alternative description of the protocol. This description is in 262 | general much more detailed than the BEP structure. It is worth a read 263 | because it acts somewhat as a historic remark and a side channel. Note 264 | that there are some commentary on these pages which can be disputed 265 | quite a lot. 266 | -------------------------------------------------------------------------------- /TEST.md: -------------------------------------------------------------------------------- 1 | # How to run unit tests 2 | 3 | ## Prerequisites 4 | 5 | To run the unit tests, you currently need a set of programs installed: 6 | 7 | * [OpenTracker](http://erdgeist.org/arts/software/opentracker/) - 8 | Needed for tracker operations. If you can provide an Erlang-based 9 | tracker, so much the better! 10 | * [QuickCheck Mini](http://www.quviq.com/news100621.html) - The small 11 | version of QuviQ's QuickCheck tool. It should be somewhere in your 12 | Erlang path. 13 | * [Transmission 2.21](www.transmissionbt.com) - To install this, I use 14 | a PPA in Ubuntu, namely `ppa:transmissionbt/ppa` use 15 | `add-apt-repository` to get it in. 16 | 17 | ## Performing tests 18 | 19 | To perform all tests, run 20 | 21 | make distclean test 22 | 23 | There are some important targets you can use for test cases. Note that 24 | dependency management could be improved, so you will have to run these 25 | manually for now: 26 | 27 | * `distclean` - We always run tests from the *release* in *rel* so 28 | when you do changes, you will have to rebuild the release in 29 | rel. The easiest way is to distclean everything and then have the 30 | `test` target build the release as a dependency. 31 | 32 | * `testclean` - The test system creates a number of files for testing 33 | purposes the first time it is run. This target removes these and is 34 | necessary if you alter the generated data already on disk. 35 | 36 | ## What tests are performed? 37 | 38 | ### EUnit 39 | 40 | Internal tests are done with EUnit inside the modules of the etorrent 41 | source code proper (underneath an IFDEF shield). There are both 42 | "normal" unit tests and QuickCheck tests in there and both are run if 43 | you execute `make eunit`. 44 | 45 | ### Common Test 46 | 47 | Common Test is our external test framework. It uses the release build 48 | to perform a series of does-it-work tests by trying to run the code 49 | and requiring correctness of the tested files. Currently performed 50 | tests: 51 | 52 | * Start a tracker and two instances of etorrent. Seed from one 53 | instance to the other instance to make sure we can transfer files. 54 | 55 | Output from Common Test are in the top-level directory `logs` by 56 | default. Point your browser to `logs/index.html`. 57 | 58 | 59 | -------------------------------------------------------------------------------- /WW3.md: -------------------------------------------------------------------------------- 1 | # Etorrent coding conventions 2 | 3 | ## Why? 4 | 5 | Every programmer has their own view on what counts as reabable code 6 | and they are all wrong. This document is my way of blessing the world 7 | with a set of rules outlining the one true way. 8 | 9 | # Prefix module names 10 | 11 | Module names should always be prefixed with __etorrent___. If the module 12 | can be considered to be a member of a logical group of modules the name should 13 | also be prefixed with the name of the group. An example of such a grouping is 14 | __etorrent_io___. 15 | 16 | # Suffixing module names 17 | 18 | Module names should always contain a suffix if the behaviour or purpose 19 | of the module falls into a general class of modules. The suffix should 20 | be short and descriptive suffix. 21 | 22 | Examples of such suffixes is ___sup__ and ___proto__. 23 | 24 | # Not suffixing module names 25 | 26 | If the module name does not fall into one of those general classes of modules 27 | a suffix should not be invented because this makes module suffixes meaningless. 28 | 29 | # Exports 30 | 31 | ## Grouped exports 32 | 33 | Separate export directives should be used to group the type of functions that a 34 | module exports If a module implements an OTP behaviour the callback functions should 35 | always be grouped into one export directive. If a module exports functions for 36 | working with entries in the gproc registry these should also be grouped into another 37 | export directive. 38 | 39 | ## Verical exports 40 | 41 | Functions in an export directive should always be separated by line breaks. 42 | This makes it easier to scan through the list of functions while also making 43 | it easier to spot what has been modified when viewing the changes in a commit. 44 | 45 | %% gen_server callbacks 46 | -export([init/1, 47 | handle_call/3, 48 | handle_cast/2, 49 | handle_info/2, 50 | terminate/2, 51 | code_change/3]). 52 | 53 | %% gproc registry entries 54 | -export([register_chunk_server/1, 55 | unregister_chunk_server/1, 56 | lookup_chunk_server/1]). 57 | 58 | # Function specifications 59 | 60 | All functions should be annotated with a type specification. This helps 61 | communicate what values a function expects and returns. Maximizing the 62 | chances of dialyzer finding embarassing bugs is always a good thing. 63 | Function specifications should also be provided for internal functions. 64 | 65 | # Records 66 | ## Defining records 67 | 68 | Fields in record defenitions should be separated by line breaks. All fields 69 | in a record defenitions should include a type specification. Default values 70 | should be ommitted if the record is only constructed in a single function, 71 | this is the common case with state records. 72 | 73 | Ommitting the default values in the defenition and assigning each field a value 74 | when creating a record saves readers from having to go back to the record 75 | defenition to find out what the final result is. 76 | 77 | ## Matching on records 78 | 79 | Fields and variable bindings should be separated by line breaks when a record 80 | is used on the left side of the match operator. If only one or two fields are 81 | matched a one-liner may be used. 82 | 83 | #state{ 84 | info_hash=Infohash, 85 | remote_pieces=Pieceset} = State, 86 | 87 | #state{info_hash=InfoHash, remote_pieces=Pieceset} = State, 88 | 89 | ## Updating fields in a record 90 | 91 | The same rules as matches on records apply. 92 | 93 | NewState = State#state{ 94 | info_hash=NewInfohash, 95 | remote_pieces=NewPieceset}, 96 | 97 | NewState = State#state{info_hash=NewInfohash, remote_pieces=NewPieceset}, 98 | 99 | 100 | ## Records in function heads 101 | 102 | Matches on records in function heads should be avoided if it's possible. 103 | In the case of gen_server state the first expression should be to unpack 104 | only the values in the state record that are necessary for the clause. 105 | 106 | ## When to use records 107 | 108 | A record should be used for all composite data types. For small and short 109 | lived groupings of values, such as returning two values from a function or 110 | gen_server calls use of records is discouraged. Use of other data structures 111 | where a record would have sufficied is discouraged. 112 | 113 | ## Including records 114 | 115 | A record defenition should never be shared by two modules. To prevent cases of 116 | this from slipping through the cracks records should never be defined in header 117 | files. 118 | 119 | # Variable names 120 | 121 | ## Camelcase 122 | 123 | Variable names should not be more camelcased than is absolutely necessary. 124 | 125 | __Infohash__ over __InfoHash__ 126 | __NewState__ over __Newstate__ 127 | 128 | Never use underscores in the middle of variable names. 129 | 130 | ## Abbreviations 131 | 132 | Variable names should not be abbreviated more than necessary. Ensuring 133 | that lines don't get to long by using cryptic variable names is almost 134 | always the wrong way to adhere to the line length limit. 135 | 136 | __State__ over __S__ 137 | __Index__ over __Idx__ 138 | __Infohash__ over __Hash__ 139 | 140 | # Recursive functions 141 | 142 | Use of recursive functions where a list comprehension or a foldr would 143 | have sufficed is discouraged. Recursive functions with multiple parameters 144 | that are updated on each call is also discouraged, try to find a more straight 145 | forward way to implement the same function. 146 | 147 | # Explicit return values 148 | 149 | If the return value of a function is the result of complex expression or the 150 | return value is tagged the returned value should be assigned to a variable 151 | prior to the last expression. This makes it easier for readers to visually 152 | determine where the function body ends. 153 | 154 | 155 | InitState = #state{ 156 | torrent=TorrentID, 157 | ... 158 | files_max=MaxFiles}, 159 | InitState. 160 | 161 | InitState = #state{ 162 | torrent=TorrentID, 163 | ... 164 | files_max=MaxFiles}, 165 | {ok, InitState}. 166 | 167 | # Nested case expressions 168 | 169 | Case expressions should not be nested if it isn't absolutely necessary. 170 | Assigning the result of a case expression and later matching on that 171 | is preferrable since it helps communicate the purpose of the case statement. 172 | 173 | # Do not be clever 174 | 175 | Saving lines of code by nesting expressions or being clever 176 | with formatting will make anyone who is reading your code have to second 177 | guess the final values of function parameters and results of expressions. 178 | 179 | # Polymorphic functions 180 | 181 | Functions that accepts values of multiple types are a pointless abstraction 182 | if they are too generic. An example could be amodule encapsulating the persistent 183 | state of etorrent. 184 | 185 | -type persistent_entity() :: {infohash(), bitfield()} | {dht_nodes, list()}. 186 | -spec etorrent_persistent:save(persistent_entity()) -> ok. 187 | -spec etorrent_persistent:query(entity_match()) -> [persistent_entity()]. 188 | 189 | This type of interface scatters the operations that are performed on the persistent 190 | state all over the codebase. A better alternative is to explicitly export these 191 | operations from the etorrent_persistent module 192 | 193 | -spec etorrent_persistent:save_valid_pieces(infohash(), bitfield()) -> ok. 194 | -spec etorrent_persistent:get_valid_pieces(infohash()) -> {ok, bitfield()}. 195 | 196 | -spec etorrent_persistent:save_dht_nodes(list()) -> ok. 197 | -spec etorrent_persistent:get_dht_nodes() -> list(). 198 | 199 | # Domain specific wrappers 200 | 201 | Although the datatypes provided by the standard library is sufficient to support 202 | the implementation of any non trivial program it is often a good idea to wrap them 203 | in a module that provides a more specific interface. If none of the datatypes 204 | in the standard library provides the necessary functionality, consider using a 205 | wrapper module to compose two or more of them. 206 | 207 | # Module summaries 208 | 209 | A summary of the purpose and implementation of a module should be included 210 | if the module is anything non trivial. Not doing so is the same as wasting 211 | the time spent designing the module. 212 | 213 | # Inline EUnit tests 214 | 215 | I prefer separating modules containg unit tests from the module implementing the 216 | interface that is being tested because it emphasises that unit tests should 217 | cover the public interface of a software component. 218 | 219 | A reasonable tradeoff is to include the unit tests in the module implementing 220 | the interface but restrict the tests to making fully qualified calls. 221 | 222 | In these cases it's good to provide a short module name alias over hardcoding 223 | the module name or using the ?MODULE macro because it makes the test code 224 | look less contrived. 225 | 226 | # False test coverage 227 | 228 | Unit tests should contain as many assertions as possible without going overboard. 229 | This is to avoid situations where a part of the code is covered by a test but 230 | the effect that the code has on the result is never verified to be correct. 231 | 232 | # Reasonable tests 233 | 234 | It's a known fact that unit tests are more prone to be deprecated by design 235 | changes than integration tests or system tests. The conclusion that should 236 | be drawn from this isn't that unit tests are a waste of time to write. 237 | 238 | A unit test that is quick and dirty is always better than one that would have 239 | adhered to all of the rules if it was ever written. 240 | 241 | # Line lengths 242 | 243 | Lines should not exceed 80 characters. This limit is imposed because 244 | it is difficult to read *wide* code, therefore it makes sense to make 245 | exceptions to this rule if the alternative is more difficult to read. 246 | 247 | # Separating functions and function clauses 248 | 249 | * Two blank lines should be used to separate function defenitions. 250 | * One blank line should be used to separate function clauses. 251 | 252 | # Header files 253 | 254 | The use of header files is discouraged because it burdens the reader with 255 | having to keep multiple files in mind when reading a module. 256 | -------------------------------------------------------------------------------- /bin/.gitignore: -------------------------------------------------------------------------------- 1 | etorrentctl 2 | -------------------------------------------------------------------------------- /bin/etorrentctl.in: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Most of this file was taken from the ejabberd project and then changed 4 | # until it matched what we had as a goal 5 | 6 | NODE=etorrent 7 | HOST=localhost 8 | 9 | BEAMDIR=%%%BEAMDIR%%% 10 | CONFIGFILE=%%%CONFIGFILE%%% 11 | ERL_FLAGS=%%%ERL_FLAGS%%% 12 | 13 | start () { 14 | erl ${ERL_FLAGS} -sname ${NODE}@${HOST} -pa ${BEAMDIR} \ 15 | -config ${CONFIGFILE} -noshell -noinput -detached \ 16 | -s etorrent 17 | } 18 | 19 | debug () { 20 | erl -sname debug${NODE}@${HOST} \ 21 | -pa ${BEAMDIR} \ 22 | -remsh ${NODE}@${HOST} 23 | } 24 | 25 | ctl () { 26 | erl -noinput -sname etorrentctl@${HOST} \ 27 | -pa ${BEAMDIR} \ 28 | -s etorrent_ctl start ${NODE}@${HOST} $@ 29 | } 30 | 31 | usage () { 32 | echo "Usage: $0 {start|stop|debug}" 33 | } 34 | 35 | [ $# -lt 1 ] && usage 36 | 37 | case $1 in 38 | start) start;; 39 | debug) debug;; 40 | *) ctl $@;; 41 | esac 42 | -------------------------------------------------------------------------------- /ct-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cd `dirname $0` 3 | ct_run -verbosity=50 -spec etorrent_test.spec -pa $PWD/ebin edit $PWD/deps/*/ebin 4 | 5 | -------------------------------------------------------------------------------- /documentation/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all dotfiles 2 | 3 | all: dotfiles 4 | 5 | dotfiles: sup_tree_20110106.png sup_tree_20110106.pdf 6 | 7 | clean: 8 | rm -f sup_tree_20110106.png 9 | rm -f sup_tree_20110106.pdf 10 | rm -f sup_tree_20110106.dot 11 | 12 | %.dot: %.m4 13 | m4 $< > $@ 14 | 15 | %.png: %.dot 16 | dot -Tpng $< > $@ 17 | 18 | %.pdf: %.dot 19 | dot -Tpdf $< > $@ 20 | -------------------------------------------------------------------------------- /documentation/auto/documentation.el: -------------------------------------------------------------------------------- 1 | (TeX-add-style-hook "documentation" 2 | (lambda () 3 | (LaTeX-add-environments 4 | "theorem" 5 | "lemma" 6 | "example" 7 | "remark") 8 | (LaTeX-add-labels 9 | "chap:requirements") 10 | (TeX-run-style-hooks 11 | "amssymb" 12 | "amsmath" 13 | "microtype" 14 | "charter" 15 | "fontenc" 16 | "T1" 17 | "latex2e" 18 | "memoir10" 19 | "memoir" 20 | "a4paper"))) 21 | 22 | -------------------------------------------------------------------------------- /documentation/bep-12-15-interaction.md: -------------------------------------------------------------------------------- 1 | # Handling both BEP-12 and BEP-15 simultaneously 2 | 3 | ## Abstract 4 | 5 | The Bittorrent Enhancement Process (BEP) contain two proposals, BEP-12 6 | and BEP-15. These two proposals are used in conjunction with each 7 | other. This document describes how to handle them simultaneously, 8 | based on what different clients do. 9 | 10 | ## Rationale 11 | 12 | In the bittorrent system, it is beneficial to handle multiple trackers 13 | at once. This proposal is BEP-12. Another beneficial proposal, BEP-15, 14 | is to handle tracker requests as UDP traffic, rather than relying on 15 | HTTP/TCP-traffic. The former provides redundancy, while the latter 16 | provides efficient bandwidth utilization. BEP-15 does not, however, 17 | describe how it is supposed that the client recognizes a tracker as 18 | being UDP capable. 19 | 20 | This proposal aims to remedy this shortcoming. It describes how some 21 | clients *currently* handle the situation by a clever interplay between 22 | BEP-12 and BEP-15. The goal is not to provide nor enforce a new 23 | solution -- rather we opt for describing the de-facto method of 24 | getting BEP-12 and 15 to interplay. 25 | 26 | ## Approach 27 | 28 | BEP-12 contains the concept of multiple announcement URLs. These are 29 | arranged in tiers, where each tier is a list of announcement URLs. We extend 30 | the usual http://domain.com/foo/announce announce url schema with a new one: 31 | 32 | udp://domain.com 33 | 34 | This schema designates an UDP capable tracker running at 35 | domain.com. Note the deliberate omission of the path-component from 36 | the URL. There is none for the UDP method. A torrent file MAY choose 37 | to supply both the http:// and udp:// schemas at the same time for 38 | a particular tracker. 39 | 40 | The udp:// schema MAY be used in any tier in a BEP-12 multi-tracker 41 | designation anywhere in the lists of individual tiers. 42 | 43 | #### Equivalence 44 | 45 | An http:// and udp:// schema are considered to be equivalent, provided 46 | they have the same domain name. Otherwise they are considered to be 47 | different. The domain names are to be evaulated as (lexicographic) 48 | string equality, not by DNS lookup of an IP-address. 49 | 50 | ## Client handling 51 | 52 | A client SHOULD ignore *any* schemas it doesn't know about, the udp:// 53 | schema included. Thus, a client not supporting BEP-15 SHOULD ignore 54 | the udp:// schema, and strip it from the BEP-12 tracker lists. 55 | 56 | Clients contact udp:// schema trackers as described in BEP-15. A 57 | failure to answer is regarded as if the tracker in the tier is 58 | unreachable and the usual BEP-12 rules apply for finding the next 59 | candidate tracker to try. 60 | 61 | It is paramount that a client still provides adequate shuffling of 62 | trackers as per BEP-12. The simple idea of letting udp:// schemas 63 | "bubble" to the front of the list in each tier MUST be avoided. It 64 | does not distribute the announce URLs evenly as it gives too much 65 | preference to UDP capable trackers. 66 | 67 | Rather, the client can use the following method: 68 | 69 | ### Preferral of UDP trackers 70 | 71 | Given tiers 72 | 73 | T1, T2, ..., Tk 74 | 75 | First shuffle each tier T1, T2, ..., Tk randomly as per the BEP-12 76 | specification. 77 | 78 | Then, treat this tier-list as a flat list of announce URLs. That is, 79 | concatenate T1, T2, ..., Tk to form a single list. udp:// and 80 | http:// announce URLs are *equivalent* as per the above definition of 81 | equivalence, swap them to make the udp:// schema come first, 82 | disregarding tiers. Note that this allows the udp:// schema url to 83 | move to an earlier tier. 84 | 85 | After this swapping has occurred, treat everything as BEP-12. Note 86 | that this will make a client prefer udp:// based trackers over http:// 87 | based trackers, even in the same tier. Yet, it still gives each tracker 88 | the same probability distribution as the unshuffled list. We assume 89 | that a tracker with both UDP and HTTP capability prefers to be 90 | contacted on UDP. 91 | 92 | The rationale for letting udp:// schemas move between tiers is that 93 | many torrents are created with a single tracker announceURL in each 94 | tier. Thus simply shuffling in each tier has no effect on such a 95 | torrent file. 96 | 97 | ### Ordering example 98 | 99 | Assume we have the following two tiers: 100 | 101 | {[http://one.com, udp://one.com, http://two.com], 102 | [http://three.com, udp://four.com, udp://two.com]} 103 | 104 | As per BEP-12, random shuffling of these lists are carried 105 | out. Here is one such random shuffling: 106 | 107 | {[http://one.com, http://two.com, udp://one.com], 108 | [http://four.com, udp://two.com, http://three.com]} 109 | 110 | The UDP preference rule now swaps the "one.com" and "two.com" domains 111 | to obtain the list: 112 | 113 | {[udp://one.com, udp://two.com, http://one.com], 114 | [http://four.com, http://two.com, http://three.com]} 115 | 116 | At this point, we follow the rules from BEP-12, included moving the 117 | succesful responses to the front of the list. 118 | 119 | ## Acknowledgements 120 | 121 | We acknowledge Arvid Nordberg, and his libtorrent-rasterbar C++ code, 122 | for the udp:// preference method. We also acknowledge Arvid for 123 | helpful discussions of the method and critique of this BEP. 124 | 125 | ## References 126 | 127 | * BEP-12 128 | * BEP-15 129 | 130 | ## Copyright 131 | 132 | This document has been placed in the public domain. 133 | -------------------------------------------------------------------------------- /documentation/configuration_options.txt: -------------------------------------------------------------------------------- 1 | This document describes the various configuration options of 2 | etorrent. 3 | 4 | Etorrent: 5 | 6 | * port: This is the port which etorrent should listen on for incoming 7 | connections. By default it is the taxicab number 1729, but it can 8 | of course be changed. We don't need a range of ports because we 9 | will find out what torrent we are transferring in the handshake. 10 | 11 | * dir: The directory to place .torrent files in and the directory to 12 | which torrent files will be downloaded. 13 | 14 | * logger_dir: The directory of the etorrent log file 15 | * logger_fname: The name of the log file. It is *not* currently 16 | rotated and it is appended upon. The format is a simple term-log. 17 | 18 | * max_peers: The maximum number of peers etorrent should connect 19 | to. If more connections come in that this number they will be 20 | rejected. On modern setups I expect one can set this fairly high. 21 | 22 | * max_upload_slots: How many peers should we try to simultaneously 23 | upload data to? May be an integer N or the value 'auto' in which 24 | case the slots will be estimated based on the upload rate. 25 | 26 | * max_upload_rate: A number N in kilobytes. Determines the maximum 27 | bandwidth available for etorrent. Is currently only used for 28 | calculation of the number of upload slots. 29 | 30 | * min_uploads: The number of optimistic unchokers running. 31 | 32 | Mnesia: 33 | 34 | * dir: The directory in which to store the database state. The 35 | database is used for fast-resume data among other things. 36 | 37 | You don't need to touch the kernel parameters. 38 | 39 | The SASL parameters can be uncommented to give you a SASL error 40 | log. For development this is good because you can leave the client on 41 | for a while and then come back to see if it has produced any kind of 42 | errors. 43 | -------------------------------------------------------------------------------- /documentation/documentation.tex: -------------------------------------------------------------------------------- 1 | \documentclass[a4paper]{memoir} 2 | \usepackage[T1]{fontenc} 3 | \usepackage{charter} 4 | \usepackage{microtype} 5 | 6 | \usepackage{amsmath} 7 | \usepackage{amssymb} 8 | 9 | \newtheorem{theorem}{Theorem} 10 | \newtheorem{lemma}{Lemma} 11 | \newtheorem{example}{Example} 12 | \newtheorem{remark}{Remark} 13 | 14 | \title{eTorrent documentation} 15 | \author{Jesper Louis Andersen \\ jesper.louis.andersen@gmail.com} 16 | \date{\today} 17 | \begin{document} 18 | \maketitle{} 19 | \tableofcontents{} 20 | 21 | \section{Introduction} 22 | Why write a complete analysis and documentation on the software? This 23 | is normally not the way Open Source software is written. Rather than 24 | sit down and think it seems most people are happy with writing code 25 | and let the code be documentation of what to do. But the problem with 26 | that approach is that I hop on-and-off on this project. It makes it 27 | impossible to do ad-hoc development. 28 | 29 | When you grow up, it becomes clear that writing extensive 30 | documentation for a piece of software gives a much better 31 | result. Documentation is key to correct and elegant 32 | software. Documentation is key to extensive analysis before 33 | code. Thus, we must write documentation. 34 | 35 | \chapter{Requirements} 36 | \label{chap:requirements} 37 | 38 | The plan is to build a Bittorrent client. What will set this client 39 | off from other clients is the fault-tolerance of the client. In 40 | general, it should not be possible to take the client down, even in 41 | the case of errors in the client, on the disk, system crashes etc. 42 | 43 | \paragraph{Fault tolerance} The client should be fault-tolerant. In 44 | general, if any part of the client has an error, it must not result in 45 | the client being brought down. The client should also be able to 46 | recover fast from such an error. 47 | 48 | The client should avoid disk-corruption at all costs. It is accepted 49 | if the client assumes the underlying operating system does not corrupt 50 | the disk. 51 | 52 | \paragraph{Unattended operation} The client must run unattended. The 53 | interface to the client is a directory hierachy: Torrent files in the 54 | hierachy are downloaded and when a file is removed, it is 55 | stopped. There are no requirement for an interactive interface. 56 | 57 | \paragraph{Performance} The client should run at an adequate speed. It 58 | should be able to run up to the usual limits of Disk I/O. The client 59 | should not pursue speed aggressively, use rtorrent for that. The 60 | client must be able to serve a large number of simultaneous 61 | torrents. We aim for thousands of torrents served at the same time, if 62 | the server is large enough. There are 3 iterations: 1 torrent, 100 63 | torrents and 1000 torrents which must be achieved in order in 64 | releases of the software. 65 | 66 | \chapter{Analysis} 67 | \section{Pieces} 68 | Pieces are the central elements we exchange between peers. A Torrent 69 | file consists of several pieces. Each are identified by a natural 70 | number indexed at $1$. This identification serves as the primary key 71 | in our implementation. Several informations are linked to the primary 72 | key. First, each piece has a size. This is important, since the last 73 | piece rarely have the size as the other pieces. Second, pieces have 74 | binary data associated with them of the given size. Third, this data 75 | has a checksum. Finally, the piece is mapped into a list of triples, 76 | $(path, offset, length)$ which designates to where the piece should 77 | go. 78 | \begin{example} 79 | A simple piece could have a list like the following: 80 | \begin{verbatim} 81 | [{foo, 10, 20}, 82 | {bar, 30, 50}] 83 | \end{verbatim} 84 | It should be interpreted as if we should write 20 bytes to 85 | \texttt{foo} at offset 10 and then write 50 bytes to \texttt{bar} at 86 | offset 30. 87 | \end{example} 88 | \begin{remark} 89 | Invariant: The sum of the sizes in the list of where the piece is 90 | stored should be equal to the size of the piece. 91 | \end{remark} 92 | 93 | \paragraph{On piece size} 94 | It seems correct to keep track of piece size all over the 95 | system. First, if we run several torrents, they may have different 96 | piece sizes. Second, it will greatly reduce the need to take special 97 | care of the last piece. 98 | 99 | \paragraph{On piece checksum} 100 | We should always checksum a piece which has been read. First, it alleviates 101 | disk-corruption. A corrupted piece can then never be transmitted over 102 | the network. Second, it is cheap to check a piece in memory. Third, it 103 | serves as a great assertion invariant in the system: All written 104 | pieces should preserve their checksum when read. 105 | 106 | When writing a piece, it should be checked as well. There is no 107 | thought in writing something which became accidentally corrupted. As 108 | we mostly retrieve the binary data associated with a piece from a 109 | peer, we really have no control over its correctness, so checked it 110 | must be. 111 | 112 | \section{Filesystem interaction} 113 | \subsection{Piece serving} 114 | When we wish to serve a piece from disk, we must carry out a number of 115 | operations: We must locate the piece on disk. We must load it into 116 | memory and we must break it up so it can be sent to the peer who 117 | requested it. 118 | 119 | Locating a piece is piece number. If we have the piece 120 | number, we deduce the files which comprises the piece in question and 121 | the (offsets, lengths) we have to read inside them. In other 122 | words, let $pids$ be piece identifications. Further, let $path$ be a 123 | file system path (UNIX notation). Finally, let $offset, length \in 124 | \mathbb{N}$. We have the function: 125 | \begin{equation*} 126 | \mathtt{locate\_piece} \colon (pid) \to (path, offset, length)\; list 127 | \end{equation*} 128 | 129 | Then, when the piece is located, we must load it. Assume the existence 130 | of a function that can read pieces 131 | \begin{equation*} 132 | \mathtt{read\_piece} \colon (path, offset, length) \; list \to 133 | binary 134 | \end{equation*} 135 | where $binary$ is binary data. When data has been read, we check 136 | its cryptographic checksum. If the check doesn't match at this point, 137 | we have an error situation which must be handled appropriately. 138 | 139 | Then the checksummed piece is sent to the process responsible for peer 140 | communication. Since peers can choose their block size as they see 141 | fit, the cut operation must not be handled centrally, but at the peer 142 | communication process. 143 | 144 | \subsection{Piece retrieval} 145 | 146 | When we get a piece from a peer, we begin by making a checksum 147 | check. If this check fails, we answer the peer communication process 148 | with an error and note it gave us a bad piece. This can be used by the 149 | piece communication process to mark its peer ``dirty'' and eventually 150 | for disconnecting and blacklisting. 151 | 152 | If the piece is ok, we look up the checksum in the map of 153 | checksums. It must match the identification of the piece. If not, it 154 | is an error as well. If both the checksum and identification matches, 155 | we will store the piece. 156 | 157 | There are several storage methods available to our disposal: 158 | 159 | \paragraph{Method 1} 160 | Create all files. Use the system call \texttt{fseek(3)} to 161 | fast-forward to the point in the file we want to write and write down 162 | the piece at its correct slot. 163 | 164 | The advantage of this approach is simplicity. It is easy to 165 | implement. It may introduce sparse files however. We may also pre-fill 166 | all files with an amount of zeros to avoid the sparse file 167 | production. However, this will be a problem because it takes time and 168 | it introduces files on-disk essentially without 169 | information. Pre-filling ensures that the file can always be written 170 | irregardless of free-space however. 171 | 172 | We note that the Azureus client seems to be using an approach like 173 | this. 174 | 175 | \paragraph{Method 2} 176 | Write the file contigously. Call the on-disk piece locations for 177 | slots. Then we first write to slot 1, then slot 2, then slot 3 and so 178 | forth. Pieces are written as they come in, so they may not be written 179 | in the correct slots in the first place. 180 | 181 | This can be alleviated by using a sorting algorithm on the 182 | pieces. There are several applicable sorting algorithms. A simple 183 | solution would be exchanging selection: 184 | 185 | Assume the pieces $1$ through $n$ are sorted correctly. We write 186 | pieces contigously to slot $n+1, n+2, \dotsc$. When piece $n+1$ is 187 | retrieved, we exchange the piece in slot $n+1$ with this new piece. To 188 | do this safely, we use a free slot on-disk as a temporary variable and 189 | ensure we copy the piece out of slot $n+1$ first. Thus, a crash will 190 | not result in the loss of data. Note that we then have pieces $1$ 191 | through $n+1$ placed correctly. We then run again for slot $n+2$ which 192 | we may have already retrieved. The question is how many exchanges this 193 | makes as disk I/O is pretty heavy and a major limiting factor in 194 | BitTorrent clients. 195 | 196 | For a slot there are a maximum of 3 writes: One for the contiguous 197 | write, one when the piece that fits gets written and one for making 198 | place for the fitting piece. Thus, the algorithm is $\mathcal{O}(n)$ 199 | with a constant factor of around 3. 200 | 201 | The original bittorrent client by Bram Cohen uses a variant of this 202 | approach. 203 | 204 | \paragraph{Method 3} 205 | Use \texttt{mmap(2)}. A file is mapped into memory at a given 206 | location. Writes are done to memory and the operating system is 207 | responsible for letting the write go to the disk at the correct 208 | location by the virtual memory subsystem. This is extremely easy. It 209 | is fast as well, but there are a couple of limitations. 210 | 211 | In a 32-bit architecture, we don't have enough memory to keep 212 | several gigabytes of data mapped in. Hence, we will either need to use 213 | a pure 64-bit operating system or we will need to devise an algorithm 214 | for mapping parts of files in and out. We need to do this anyway, 215 | since we can't expect to map several file descriptors at the same 216 | time. 217 | 218 | Rtorrent is using this approach. 219 | 220 | \paragraph{Method 4} 221 | Use internal storage. We can choose to represent the data internally 222 | in a on-disk persistent format. Then, when we have the whole file, we 223 | can write it out. Each piece will get written exactly 2 times, so it 224 | may seem to be better than method number 2. On the other hand, there 225 | are problems with the method: We can't look at data until everything 226 | is downloaded. 227 | 228 | \paragraph{Discussion} 229 | My intuition tells me, that method 1 with pre-fill is the easiest to 230 | implement. Thus, we choose to implement that solution first. We can 231 | change to another method later, when the client basics are there and 232 | works. 233 | 234 | \subsection{What to do at startup?} 235 | When the system starts, we have no idea of what we have piece-wise of 236 | a torrent. Hence, we must halt all communication with others until we 237 | know what pieces we have and what pieces we miss. We will check one 238 | torrent at a time, which will require some control. 239 | 240 | For each torrent, we will begin loading in pieces. Either pieces fail, 241 | or pieces will be checked. If method 2 is chosen for piece storage, we 242 | need to identify read pieces. There must be some error-handling in the 243 | loading code, so we gracefully handle mis-loads. 244 | 245 | If a file is missing on disk, we will create it and pre-fill it with 246 | zeros. Hence, we have the following invariant: ``File system processes 247 | can assume there is access to the needed files''. 248 | 249 | \subsection{Handling checksum read errors} 250 | What happens when a checksum read reports an error? There are 2 causes 251 | for this: Disk corruption and a system crash/reset. The most probable 252 | is that the system was reset. Thus, we mark the piece as bad and 253 | ignore it as if it did not exist. Done correctly, it seems we can then 254 | continue running. 255 | 256 | Disk corruption is much more fatal. We will assume data is not 257 | corrupted on the disk. Modern file systems like ZFS (see \cite{zfs}), 258 | will carry out checks of all read blocks and thus it is near 259 | impossible to have disk corruption in such a scheme. 260 | 261 | \section{Peer processes} 262 | 263 | General rule: we try to carry out bookkeeping as close to the peer as 264 | possible. Ie, we update mnesia tables whenever a message arrives or 265 | when a message gets sent in a early/late manner. Upon arrival, the 266 | first thing we do is to update database tables locally. Upon message 267 | sending, the last thing we do is to update. Sender/Receiver processes 268 | are responsible for updating and tracking the information. 269 | 270 | \chapter{Programming planning} 271 | \section{Filesystem} 272 | A central problem to the eTorrent project is the File system. The 273 | filesystem processes must be split because the death of one of them 274 | must not take all torrents down. It would rather bad architecture. 275 | 276 | \subsection{Processses} 277 | \subsubsection{File process} 278 | For each file which is managed, there is a process which is termed the 279 | ``file process''. This process is responsible for managing the file 280 | reads and writes. It has a very simple interface by which it accepts 281 | read and write operations on the file given by byte offset and number 282 | of bytes to read/write. It also contains a timeout for when no-one has 283 | requested any data on the file for some time in which case it closes 284 | down gracefully. 285 | 286 | \subsubsection{General idea} 287 | For each torrent, there is a managing proces. This process is 288 | responsible for managing the torrents access to the disk. The 289 | management process is created when a given torrent has been processed 290 | for checksumming and is handed its status upon spawn-time. 291 | 292 | When spawned, we get a mapping between piece identifications and the 293 | files we need to read from/write to in order to get the piece loaded 294 | or saved. We use this mapping for lookup in the code. 295 | 296 | There are 2 main functions that the management process accepts: 297 | \texttt{read\_piece} and \texttt{write\_piece}. Upon getting a read or 298 | write request the process will look if there is a file process serving 299 | already. If not, it will spawn one and ask it to read/write the data 300 | in question. The process is linked to the file processes, so if any of 301 | these dies, we know it and can act accordingly by cleaning up our map 302 | of files and $pid$s. Since a file process exists when it has done 303 | nothing for some time, it is expected that we will use this feature 304 | quite much. 305 | 306 | \subsubsection{File descriptor replacement} 307 | We want a simple algorithm for replacing file descriptors. A very way 308 | which is possible in erlang is to let each file be managed by a 309 | process. This process has a timeout on its main retrieval which will 310 | make it close down if no operations have been served for some time. A 311 | main process will keep track of all file processes and it will also 312 | have an LRU structure for the files. Thus file-descriptor processes can be 313 | purged if some new files has to be opened, but they auto-purge if 314 | no-one uses them. 315 | 316 | Ergo, whenever a file process is spawned, the LRU process is informed 317 | about it. It can then ask for a close of a given process if it runs 318 | out of file descriptors. 319 | \begin{remark} 320 | This is a long term optimization. It should not be implemented in 321 | the first release. 322 | \end{remark} 323 | 324 | \end{document} 325 | 326 | %%% Local Variables: 327 | %%% mode: latex 328 | %%% TeX-master: t 329 | %%% End: 330 | -------------------------------------------------------------------------------- /documentation/dynamic_sockets.txt: -------------------------------------------------------------------------------- 1 | Dynamic Active/Passive sockets for etorrent. 2 | 3 | *** 4 | 5 | Introduction: 6 | 7 | When I originally built the socket system for etorrent I opted for the 8 | simple solution. This was to use active sockets and use the socket 9 | option {packet, 4} on the wire-protocol. This is extremely fast an 10 | simple, yet it gave rather bad speeds. The reason for this is that we 11 | need to do rate calculations to choose the fastest peers and when a 12 | peer sends 16k packets he may not even get a rate calculation in the 13 | interval of 10 seconds due to this. 14 | 15 | So I opted for using passive sockets and queue the data myself. This 16 | gives many more rate calculations but it also uses a lot of CPU-time. 17 | 18 | The next thing that was the rate calculation itself. It used a simple 19 | scheme where we counted bytes for 10 seconds and then resat all 20 | counters. This is very unfair to some peers that enters late in the 21 | cycle (ie, gets unchoked late in the 10 second cycle) even if they 22 | produce really good speeds. So we now use a running average over an 23 | interval which periodically gets updated. This is much more fair since 24 | a peer which has only moved data for 3 seconds but has a good rate 25 | will get unchoked. 26 | 27 | But the problem with passive sockets remain in the code, even with 28 | this change. 29 | 30 | Goal: 31 | 32 | Minimize CPU usage. 33 | 34 | Methodology: 35 | 36 | It is clear that if a socket has a rate beyond a certain limit, we 37 | should just use [active, {packet, 4}] encoding on the socket. For 38 | slower sockets, you will have to do some measurements and 39 | thinking. Maybe [active, {packet, 4}] is best. Or you can use [active] 40 | only or maybe keep running with the passive socket. It depends on how 41 | much it hurts the rate calculation on slower lines. 42 | 43 | The idea is to dynamically shift between 2 modes, should it prove to 44 | be most efficient. When the speed jumps over a hi-mark then it goes 45 | [active, {packet, 4}] and when the speed falls below a certain lo-mark 46 | it changes to a more precise measurement that may eat more CPU-power. 47 | 48 | It is probably best if you measure what is best. You can also profile 49 | the rating code (think Amdahls law) and see if an improvement of that 50 | will yield significant speedup. 51 | -------------------------------------------------------------------------------- /documentation/fast_resume.txt: -------------------------------------------------------------------------------- 1 | Fast resume support for etorrent: 2 | 3 | * First, we should get etorrent to understand that there are mnesia 4 | tables which are being disk-logged rather than being in RAM. 5 | 6 | * Then we should construct a process which periodically checks the 7 | current state of torrents, builds their bitfields and adds them to 8 | the table that is disk-logged. 9 | 10 | - Every 5 minute we go through the #piece/#torrent/#tracking_map. 11 | 12 | - Build a bitfield for each. 13 | 14 | - Store the bitfield together with what torrent that bitfield is 15 | for. 16 | 17 | - Prune bitfields which has no torrents. 18 | 19 | * Finally, the fs_checker should use this information when starting. 20 | 21 | * And then, everything should be checked for odd errors. 22 | -------------------------------------------------------------------------------- /documentation/git.md: -------------------------------------------------------------------------------- 1 | This is stolen totally from git by Junio C. Hamano. I like it, 2 | so this is how we rock the main repository. 3 | 4 | The document contains the general policy and further down some hints 5 | on submitting patches. 6 | 7 | # The policy. 8 | 9 | * Feature releases are numbered as vX.Y and are meant to 10 | contain bugfixes and enhancements in any area, including 11 | functionality, performance and usability, without regression. 12 | 13 | * Maintenance releases are numbered as vX.Y.W and are meant 14 | to contain only bugfixes for the corresponding vX.Y feature 15 | release and earlier maintenance releases vX.Y.V (V < W). 16 | 17 | * 'master' branch is used to prepare for the next feature 18 | release. In other words, at some point, the tip of 'master' 19 | branch is tagged with vX.Y. 20 | 21 | * 'maint' branch is used to prepare for the next maintenance 22 | release. After the feature release vX.Y is made, the tip 23 | of 'maint' branch is set to that release, and bugfixes will 24 | accumulate on the branch, and at some point, the tip of the 25 | branch is tagged with vX.Y.1, vX.Y.2, and so on. 26 | 27 | * 'next' branch is used to publish changes (both enhancements 28 | and fixes) that (1) have worthwhile goal, (2) are in a fairly 29 | good shape suitable for everyday use, (3) but have not yet 30 | demonstrated to be regression free. New changes are tested 31 | in 'next' before merged to 'master'. 32 | 33 | * 'pu' branch is used to publish other proposed changes that do 34 | not yet pass the criteria set for 'next'. 35 | 36 | * The tips of 'master', 'maint' and 'next' branches will always 37 | fast forward, to allow people to build their own 38 | customization on top of them. 39 | 40 | * Usually 'master' contains all of 'maint', 'next' contains all 41 | of 'master' and 'pu' contains all of 'next'. 42 | 43 | * The tip of 'master' is meant to be more stable than any 44 | tagged releases, and the users are encouraged to follow it. 45 | 46 | * The 'next' branch is where new action takes place, and the 47 | users are encouraged to test it so that regressions and bugs 48 | are found before new topics are merged to 'master'. 49 | 50 | # Submitting patches 51 | 52 | ## Configuring gits user information 53 | 54 | First, configure git so the patches have the right names in them: 55 | 56 | git config --global user.name "Your Name Comes Here" 57 | git config --global user.email you@yourdomain.example.com 58 | 59 | ## Branching 60 | 61 | * Fork the project on github. 62 | * Clone the forked repo locally (Instructions are in the fork 63 | information) 64 | * Setup a remote, `upstream` to point to the main repository: 65 | 66 | git remote add upstream git://github.com/jlouis/etorrent 67 | 68 | * Now, create a topic-branch to hold your changes. You should pick 69 | the most stable branch which will support them. This is usually 70 | `master` though at times, it will be `next` or `maint`. Never use 71 | `pu` or any other branch, as they may be rewound and rewritten. If 72 | you *must*, make sure that the owner of the branch knows you are 73 | doing it, to avoid troubles later. 74 | 75 | You should pick a name for your topic branch which is as precise 76 | as you can make it. 77 | 78 | * In general, *avoid* merging `master` or `next` into your topic 79 | branch. Also, avoid blindly firing `git rebase master` to follow 80 | the development on `master`. For testing reintegration with 81 | master, git provides `git rerere`. 82 | 83 | The problem is that when your `topic` gets merged into master, 84 | there will be a large amount of 'merge' commits which are basically 85 | just noise (even though `git log` can filter them). Also, if merges 86 | from master to your branch is used sparingly, it conveys information: 87 | whenever you merge master in, you make it clear your `topic` depends 88 | on something new that arrived. 89 | 90 | Create a `test` branch and merge your `topic` plus `master` in to this 91 | to test your changes against master. Do the same with `topic`+`next` to 92 | test your branch against the cooking pot - so we get the cooking pot 93 | exercised a bit as well. You can use this to test different configurations 94 | of lurking patches and ideas with your code to flesh out problems long 95 | before they occur. 96 | 97 | the `git rerere` tool can track how conflicts are resolved, so you can 98 | destroy the `test` branch afterwards and recreate it since `rerere` 99 | will replay conflict resolutions for you. This also means that if you 100 | merge the tree differently or need to merge changes from master you need, 101 | then `rerere` will help you. 102 | 103 | * When you do several patches in a row, try to make it such that 104 | each commit provides a working tree. This helps tools like `git 105 | bisect` a lot. Aggressively cleanup your branches with `git rebase 106 | -i` before publishing. Other good patch-processing commands are 107 | `git add -i` and `git commit --amend`. 108 | 109 | Try to make each commit a separate change. I'd rather want 5 than 110 | one squashed. 111 | 112 | Try to write a good commit message: Give it a title that 113 | summarizes and a body that details the change, why it was done and 114 | how it was done. The why is more important than the how unless it 115 | is obvious (fixing a bug has an obvious why). 116 | 117 | * Publishing a branch is done with 118 | 119 | git push origin topic-feature-name 120 | 121 | I will usually first pull it to next and let it cook there for a 122 | couple of days before moving the patch further on to master -- 123 | unless the change is trivially correct (Make fixes, documentation 124 | change and so on). 125 | 126 | * Cleanup after the patch graduated to master is done with 127 | 128 | git push origin :topic-feature-name 129 | 130 | this makes it easier for all of us to track what is still alive 131 | and what is not. 132 | 133 | ## Caveats 134 | 135 | * I tried to push amended commit to remote git repo, but it was rejected. 136 | What happened? 137 | 138 | A: The short answer is "don't do it". If you really want to, though, 139 | you either need to change remote repo's config variable 140 | "receive.denynonfastforwards", which you probably can't, or as 141 | a dirty hack you delete and create the branch like this: 142 | 143 | git push origin :topic-branch-name 144 | git push origin topic-branch-name 145 | 146 | -------------------------------------------------------------------------------- /documentation/new_version.txt: -------------------------------------------------------------------------------- 1 | How to get a new version of etorrent out: 2 | 3 | - Make sure the master branch has everything you want it to have 4 | 5 | - Edit etorrent_version.hrl 6 | - Edit webui version tag. 7 | - Edit apps/etorrent/src/etorrent.app.src version 8 | - Edit rel/reltool.config version 9 | - Edit project.desc version 10 | 11 | - Write NEWS if not already up-to-date 12 | 13 | - git tag -s -u v0.9 14 | Tag the master branch with the version 15 | 16 | -------------------------------------------------------------------------------- /documentation/sup_tree_20110106.m4: -------------------------------------------------------------------------------- 1 | define(main_setup, ` 2 | /* General coloration and font setup */ 3 | rankdir=LR; 4 | node [fontname="URW Gothic L",fontsize=12,shape=plaintext,labelfontname=Helvetica]; 5 | labeljust = l; 6 | labelloc = t; 7 | 8 | fontsize = 24; 9 | fontname="URW Gothic L"; 10 | 11 | label = "Etorrent Supervisor Tree:"; 12 | ') 13 | 14 | define(sup_s, `$1 [label="$1", shape=box, color=red];') 15 | define(sup_11, `$1 [label="$1", shape=box, color=blue];') 16 | define(sup_1a, `$1 [label="$1", shape=box, color=green];') 17 | 18 | define(suggest_link, `$1 -> $2 [style=dashed,color=lightgrey];') 19 | define(link, `$1 -> $2') 20 | digraph etorrent_sup { 21 | 22 | main_setup 23 | 24 | subgraph cluster_legend { 25 | label="Legend"; 26 | color="deepskyblue4"; 27 | fontsize=14; 28 | { rank=same; 29 | sup_s(`simple_one_for_one') 30 | sup_11(`one_for_one') 31 | sup_1a(`one_for_all') 32 | } 33 | } 34 | 35 | /* Top level */ 36 | sup_1a(`etorrent_sup') 37 | 38 | /* Torrent Global */ 39 | { rank=same; 40 | sup_1a(`listen_sup') 41 | sup_11(`dht') 42 | sup_11(`dirwatcher_sup') 43 | sup_11(`torrent_pool') 44 | sup_1a(`udp_tracker_sup') 45 | } 46 | 47 | link(`etorrent_sup', `listen_sup') 48 | link(`etorrent_sup', `dht') 49 | link(`etorrent_sup', `dirwatcher_sup') 50 | link(`etorrent_sup', `torrent_pool') 51 | link(`etorrent_sup', `udp_tracker_sup') 52 | 53 | /* UDP Tracking */ 54 | sup_s(`udp_pool') 55 | sup_11(`udp_proto_sup') 56 | 57 | link(`udp_tracker_sup', `udp_pool') 58 | link(`udp_tracker_sup', `udp_proto_sup') 59 | 60 | /* Torrent Pool */ 61 | subgraph cluster_torrent { 62 | label="Torrent"; 63 | color="deepskyblue4"; 64 | fontsize=14; 65 | 66 | sup_1a(`torrent_sup') 67 | 68 | { rank=same; 69 | sup_11(`io_sup') 70 | sup_s(`peer_pool') 71 | } 72 | 73 | link(`torrent_sup', `io_sup') 74 | link(`torrent_sup', `peer_pool') 75 | 76 | sup_1a(`peer_pool') 77 | sup_1a(`file_io_sup') 78 | 79 | suggest_link(`peer_pool', `peer_sup') 80 | suggest_link(`io_sup', `file_io_sup') 81 | } 82 | 83 | suggest_link(`torrent_pool', `torrent_sup') 84 | } 85 | -------------------------------------------------------------------------------- /erlang.mk: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013, Loïc Hoguin 2 | # 3 | # Permission to use, copy, modify, and/or distribute this software for any 4 | # purpose with or without fee is hereby granted, provided that the above 5 | # copyright notice and this permission notice appear in all copies. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | # Verbosity and tweaks. 16 | 17 | V ?= 0 18 | 19 | appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src; 20 | appsrc_verbose = $(appsrc_verbose_$(V)) 21 | 22 | erlc_verbose_0 = @echo " ERLC " $(filter %.erl %.core,$(?F)); 23 | erlc_verbose = $(erlc_verbose_$(V)) 24 | 25 | xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F)); 26 | xyrl_verbose = $(xyrl_verbose_$(V)) 27 | 28 | dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F)); 29 | dtl_verbose = $(dtl_verbose_$(V)) 30 | 31 | gen_verbose_0 = @echo " GEN " $@; 32 | gen_verbose = $(gen_verbose_$(V)) 33 | 34 | .PHONY: all clean-all app clean deps clean-deps docs clean-docs \ 35 | build-tests tests build-plt dialyze 36 | 37 | # Deps directory. 38 | 39 | DEPS_DIR ?= $(CURDIR)/deps 40 | export DEPS_DIR 41 | 42 | REBAR_DEPS_DIR = $(DEPS_DIR) 43 | export REBAR_DEPS_DIR 44 | 45 | ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DEPS)) 46 | ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS)) 47 | 48 | # Application. 49 | 50 | ERLC_OPTS ?= -Werror +debug_info +warn_export_all +warn_export_vars \ 51 | +warn_shadow_vars +warn_obsolete_guard # +bin_opt_info +warn_missing_spec 52 | COMPILE_FIRST ?= 53 | COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST))) 54 | 55 | all: deps app 56 | 57 | clean-all: clean clean-deps clean-docs 58 | $(gen_verbose) rm -rf .$(PROJECT).plt $(DEPS_DIR) logs 59 | 60 | app: ebin/$(PROJECT).app 61 | $(eval MODULES := $(shell find ebin -name \*.beam \ 62 | | sed 's/ebin\///;s/\.beam/,/' | sed '$$s/.$$//')) 63 | $(appsrc_verbose) cat src/$(PROJECT).app.src \ 64 | | sed 's/{modules, \[\]}/{modules, \[$(MODULES)\]}/' \ 65 | > ebin/$(PROJECT).app 66 | 67 | define compile_erl 68 | $(erlc_verbose) ERL_LIBS=$(DEPS_DIR) erlc -v $(ERLC_OPTS) -o ebin/ \ 69 | -pa ebin/ -I include/ $(COMPILE_FIRST_PATHS) $(1) 70 | endef 71 | 72 | define compile_xyrl 73 | $(xyrl_verbose) erlc -v -o ebin/ $(1) 74 | $(xyrl_verbose) erlc $(ERLC_OPTS) -o ebin/ ebin/*.erl 75 | @rm ebin/*.erl 76 | endef 77 | 78 | define compile_dtl 79 | $(dtl_verbose) erl -noshell -pa ebin/ deps/erlydtl/ebin/ -eval ' \ 80 | Compile = fun(F) -> \ 81 | Module = list_to_atom( \ 82 | string:to_lower(filename:basename(F, ".dtl")) ++ "_dtl"), \ 83 | erlydtl_compiler:compile(F, Module, [{out_dir, "ebin/"}]) \ 84 | end, \ 85 | _ = [Compile(F) || F <- string:tokens("$(1)", " ")], \ 86 | init:stop()' 87 | endef 88 | 89 | ebin/$(PROJECT).app: src/*.erl $(wildcard src/*.core) \ 90 | $(wildcard src/*.xrl) $(wildcard src/*.yrl) \ 91 | $(wildcard templates/*.dtl) 92 | @mkdir -p ebin/ 93 | $(if $(strip $(filter %.erl %.core,$?)), \ 94 | $(call compile_erl,$(filter %.erl %.core,$?))) 95 | $(if $(strip $(filter %.xrl %.yrl,$?)), \ 96 | $(call compile_xyrl,$(filter %.xrl %.yrl,$?))) 97 | $(if $(strip $(filter %.dtl,$?)), \ 98 | $(call compile_dtl,$(filter %.dtl,$?))) 99 | 100 | clean: 101 | $(gen_verbose) rm -rf ebin/ test/*.beam erl_crash.dump 102 | 103 | # Dependencies. 104 | 105 | define get_dep 106 | @mkdir -p $(DEPS_DIR) 107 | git clone -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1) 108 | cd $(DEPS_DIR)/$(1) ; git checkout -q $(word 2,$(dep_$(1))) 109 | endef 110 | 111 | define dep_target 112 | $(DEPS_DIR)/$(1): 113 | $(call get_dep,$(1)) 114 | endef 115 | 116 | $(foreach dep,$(DEPS),$(eval $(call dep_target,$(dep)))) 117 | 118 | deps: $(ALL_DEPS_DIRS) 119 | @for dep in $(ALL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done 120 | 121 | clean-deps: 122 | @for dep in $(ALL_DEPS_DIRS) ; do $(MAKE) -C $$dep clean; done 123 | 124 | # Documentation. 125 | 126 | docs: clean-docs 127 | $(gen_verbose) erl -noshell \ 128 | -eval 'edoc:application($(PROJECT), ".", []), init:stop().' 129 | 130 | clean-docs: 131 | $(gen_verbose) rm -f doc/*.css doc/*.html doc/*.png doc/edoc-info 132 | 133 | # Tests. 134 | 135 | $(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep)))) 136 | 137 | build-test-deps: $(ALL_TEST_DEPS_DIRS) 138 | @for dep in $(ALL_TEST_DEPS_DIRS) ; do $(MAKE) -C $$dep; done 139 | 140 | build-tests: build-test-deps 141 | $(gen_verbose) ERL_LIBS=deps erlc -v $(ERLC_OPTS) -o test/ \ 142 | $(wildcard test/*.erl test/*/*.erl) -pa ebin/ 143 | 144 | CT_RUN = ct_run \ 145 | -no_auto_compile \ 146 | -noshell \ 147 | -pa ebin $(DEPS_DIR)/*/ebin \ 148 | -dir test \ 149 | -logdir logs 150 | # -cover test/cover.spec 151 | 152 | CT_SUITES ?= 153 | CT_SUITES_FULL = $(addsuffix _SUITE,$(CT_SUITES)) 154 | 155 | tests: ERLC_OPTS += -DTEST=1 +'{parse_transform, eunit_autoexport}' 156 | tests: clean deps app build-tests 157 | @mkdir -p logs/ 158 | @$(CT_RUN) -suite $(CT_SUITES_FULL) 159 | $(gen_verbose) rm -f test/*.beam 160 | 161 | # Dialyzer. 162 | 163 | PLT_APPS ?= 164 | DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions \ 165 | -Wunmatched_returns # -Wunderspecs 166 | 167 | build-plt: deps app 168 | @dialyzer --build_plt --output_plt .$(PROJECT).plt \ 169 | --apps erts kernel stdlib $(PLT_APPS) $(ALL_DEPS_DIRS) 170 | 171 | dialyze: 172 | @dialyzer --src src --plt .$(PROJECT).plt --no_native $(DIALYZER_OPTS) 173 | -------------------------------------------------------------------------------- /etorrent_test.cfg: -------------------------------------------------------------------------------- 1 | {etorrent_common_config, [{dirwatch_interval, 20 }, 2 | {use_upnp, false}, 3 | {dht, false }, 4 | {dht_bootstrap_nodes, []}, 5 | {max_peers, 200}, 6 | {max_download_rate, 2000}, 7 | {max_upload_rate, 2000}, 8 | {max_upload_slots, auto}, 9 | {fs_watermark_high, 128}, 10 | {fs_watermark_low, 100}, 11 | {min_uploads, 2}, 12 | {preallocation_strategy, sparse }, 13 | {profiling, false}]}. 14 | -------------------------------------------------------------------------------- /etorrent_test.spec: -------------------------------------------------------------------------------- 1 | {logdir, "logs"}. 2 | {config, "etorrent_test.cfg"}. 3 | {alias, test, "test"}. 4 | {cover, "test/etorrent.cover"}. 5 | {suites, test, etorrent_SUITE}. 6 | -------------------------------------------------------------------------------- /rel/dev.config: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jlouis/etorrent/4d169330817c3f6d39769de4503cacef855c6cd5/rel/dev.config -------------------------------------------------------------------------------- /rel/dev.sys.config: -------------------------------------------------------------------------------- 1 | %% -*- mode: Erlang; -*- 2 | [{etorrent_core, 3 | [{azdht, false}]}, 4 | {azdht, 5 | [{state_filename, "spool/azdht.state"}]}, 6 | {lager, 7 | [{handlers, [{lager_console_backend, notice}, 8 | {lager_file_backend, [{file, "error.log"}, {level, error}]}, 9 | {lager_file_backend, [{file, "console.log"}, {level, info}]} 10 | ]} 11 | ]}, 12 | {kernel, 13 | [{start_timer, true}]} 14 | ]. 15 | -------------------------------------------------------------------------------- /rel/release.config: -------------------------------------------------------------------------------- 1 | {port, 3721}. 2 | {dht, true}. 3 | {azdht, true}. 4 | {pex, true}. 5 | {udp_port, 3722}. 6 | {dht_port, 3723}. 7 | {azdht_port, 3724}. 8 | {dht_state, "/home/user/etorrent/spool/dht_state.bin"}. 9 | {azdht_state, "/home/user/etorrent/spool/azdht_state.bin"}. 10 | {dir, "/home/user/etorrent/torrent_data"}. 11 | {download_dir, "/home/user/etorrent/torrent_data"}. 12 | {dirwatch_interval, 20}. 13 | {fast_resume_file, "/home/user/etorrent/spool/fast_resume_state.dets"}. 14 | 15 | {etorrent_logger_dir, "log"}. 16 | {webui, true}. 17 | {webui_logger_dir, "log/webui"}. 18 | 19 | {max_download_rate, 1200}. 20 | {max_upload_rate , 1200}. 21 | 22 | {use_upnp, false}. 23 | 24 | {sasl_error_log, "log/sasl/sasl-error.log"}. 25 | 26 | {preallocation, sparse}. 27 | -------------------------------------------------------------------------------- /rel/reltool.config: -------------------------------------------------------------------------------- 1 | {sys, [ 2 | {lib_dirs, ["../deps"]}, 3 | {rel, "etorrent", "1.2.1", 4 | [ 5 | kernel, 6 | stdlib, 7 | crypto, 8 | inets, 9 | % sasl, 10 | gproc, 11 | etorrent_core, 12 | cascadae 13 | ]}, 14 | {rel, "start_clean", "", 15 | [ 16 | kernel, 17 | stdlib 18 | ]}, 19 | {boot_rel, "etorrent"}, 20 | {profile, embedded}, 21 | %% This parameter is supported starting from R15B02 22 | {excl_lib, otp_root}, 23 | {excl_sys_filters, ["^bin/.*", 24 | "^erts.*/bin/(dialyzer|typer)"]}, 25 | % {app, sasl, [{incl_cond, include}]}, 26 | {app, common_test, [{incl_cond, include}]} 27 | ]}. 28 | 29 | {target_dir, "etorrent"}. 30 | 31 | {overlay_vars, "release.config"}. 32 | 33 | {overlay, [ 34 | {mkdir, "log"}, 35 | % {mkdir, "log/sasl"}, 36 | {mkdir, "custom"}, 37 | {copy, "files/erl", "\{\{erts_vsn\}\}/bin/erl"}, 38 | {copy, "files/nodetool", "custom/nodetool"}, 39 | {copy, "files/erts_dir", "custom/erts_dir"}, 40 | {copy, "files/etorrent", "bin/etorrent"}, 41 | {template, "files/sys.config", "releases/\{\{rel_vsn\}\}/sys.config"}, 42 | {copy, "files/etorrent.cmd", "bin/etorrent.cmd"}, 43 | {copy, "files/start_erl.cmd", "bin/start_erl.cmd"}, 44 | {copy, "files/vm.args", "releases/\{\{rel_vsn\}\}/vm.args"} 45 | ]}. 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /rel/sys.config: -------------------------------------------------------------------------------- 1 | %% -*- mode: Erlang; -*- 2 | [{etorrent_core, 3 | [ 4 | %% The port entry tells etorrent which port it should listen on. It 5 | %% can currently not be limited to listen on certain interfaces 6 | %% only. It will instead bind to every available interface present. 7 | {port, {{port}} }, 8 | 9 | %% The port to listen on when retrieving UDP responses from the tracker 10 | {udp_port, {{udp_port}} }, 11 | 12 | %% Use azDHT (DHT used by Vuze). 13 | {azdht, {{azdht}} }, 14 | 15 | %% Use Peer Exchange (ut_pex). 16 | {pex, {{pex}} }, 17 | 18 | %% The dht entry enables the DHT subsystem, it is used to 19 | %% retrieve information of which peers are available if there 20 | %% are no trackers available. 21 | {dht, {{ dht }} }, 22 | 23 | %% The DHT subsystem will also bind to all interfaces. 24 | {dht_port, {{dht_port}} }, 25 | 26 | %% The DHT subsystem stores its internal state between runs in a state file 27 | %% The following setting defines the location of this file 28 | {dht_state, "{{dht_state}}"}, 29 | 30 | %% Enable UPnP subsystem, which tries to open port mappings in 31 | %% UPnP-aware routers for etorrent. 32 | {use_upnp, {{use_upnp}} }, 33 | 34 | %% The directory to watch for .torrent files and the directory to download data into 35 | {dir, "{{dir}}"}, 36 | 37 | %% The directory to download data into. It is optional, if not defined used 'dir' value. 38 | {download_dir, "{{download_dir}}"}, 39 | 40 | %% Interval in seconds to check directory for new .torrent files 41 | {dirwatch_interval, {{dirwatch_interval}} }, 42 | 43 | %% Location of the log file 44 | {logger_dir, "{{etorrent_logger_dir}}"}, 45 | 46 | %% Name of the log file. Etorrent will stamp out simple messages here whenever progress 47 | %% is made in the system. 48 | {logger_fname, "etorrent.log"}, 49 | 50 | %% Location of the fast resume file. If present this file is used to populate the fast- 51 | %% resume table, so startup is much faster. Every 5 minutes the file is stamped out, 52 | %% so an eventual death of the system won't affect too much. It is also written upon 53 | %% graceful termination. 54 | %% NOTE: The directory for the fast resume file must exist, or etorrent will crash. 55 | {fast_resume_file, "{{fast_resume_file}}"}, 56 | 57 | %% Limit on the number of peers the system can maximally be connected to 58 | {max_peers, 200}, 59 | 60 | %% The download rate of the system. 61 | {max_download_rate, {{max_download_rate}} }, 62 | 63 | %% The upload rate of the system. 64 | {max_upload_rate, {{max_upload_rate}} }, 65 | 66 | %% Number of upload slots. Either an integer or 'auto'. We recommend 'auto' as this 67 | %% will calculate a sane number of upload slots from the upload_rate. If this is set 68 | %% too low, you will not saturate the outbound bandwidth. If set too high, peers will 69 | %% not like the client as it can only give bad rates to all peers. 70 | {max_upload_slots, auto}, 71 | 72 | %% High and low watermarks for the file system processes. Etorrent will not open more 73 | %% on-disk files than the limit given here. 74 | {fs_watermark_high, 128}, 75 | {fs_watermark_low, 100}, 76 | 77 | %% Number of optimistic upload slots. If your line is really fast, consider increasing 78 | %% this a little bit. 79 | {min_uploads, 2}, 80 | 81 | %% The preallocation strategy to use when creating new files. The default is "sparse" 82 | %% which creates a sparse file on the disk. Some file systems are bad at working with 83 | %% sparse files, most notably FreeBSDs default file system. The other option here is 84 | %% "preallocate" which means the system will fill the file up on disk before using it. 85 | {preallocation_strategy, {{preallocation}} }, 86 | 87 | %% Enable the Web user interface in etorrent, on 127.0.0.1:8080 88 | {webui, {{webui}} }, 89 | 90 | %% Enable logging in the webui 91 | {webui_logger_dir, "{{webui_logger_dir}}"}, 92 | 93 | %% The address to bind the webui on. Notice that is has to be given as a tuple for an IP address 94 | %% and as a string for a domain name. 95 | {webui_bind_address, {127,0,0,1}}, 96 | 97 | %% The port to use for the webui 98 | {webui_port, 8080}, 99 | 100 | %% Enable profiling; do not enable unless you need it 101 | {profiling, false} 102 | ]}, 103 | {cascadae, 104 | [{webui, true}, 105 | {webui_port, 1080}]}, 106 | {azdht, 107 | [{listen_port, {{azdht_port}}}, 108 | {state_filename, "{{azdht_state}}"}]}, 109 | {lager, 110 | [{handlers, 111 | [{lager_console_backend, notice}, 112 | {lager_file_backend, 113 | [{"log/error.log", error, 10485760, "$D0", 5}, 114 | {"log/console.log", info, 10485760, "$D0", 5}, 115 | {"log/debug.log", debug, 10485760, "$D0", 5} 116 | ]} 117 | ]} 118 | ]}, 119 | {kernel, 120 | [{start_timer, true}]}, 121 | {sasl, 122 | [{sasl_error_logger, {file, "{{sasl_error_log}}"}}, 123 | {errlog_type, error}]} 124 | ]. 125 | -------------------------------------------------------------------------------- /rel/vm.args: -------------------------------------------------------------------------------- 1 | ## Name of the node 2 | -name etorrent@127.0.0.1 3 | 4 | ## Cookie for distributed erlang 5 | -setcookie etorrent 6 | 7 | ## Heartbeat management; auto-restarts VM if it dies or becomes unresponsive 8 | ## (Disabled by default..use with caution!) 9 | ##-heart 10 | 11 | ## Enable kernel poll and a few async threads 12 | +K true 13 | +A 12 14 | 15 | ## Increase number of concurrent ports/sockets 16 | -env ERL_MAX_PORTS 4096 17 | 18 | ## Tweak GC to run more often 19 | -env ERL_FULLSWEEP_AFTER 10 20 | -------------------------------------------------------------------------------- /relx-dev.config: -------------------------------------------------------------------------------- 1 | {release, {etorrent, "1.2.1"}, 2 | [{upnp, load}, 3 | {azdht, load}, 4 | {etorrent_core, permanent} ]}. 5 | 6 | {extended_start_script, true}. 7 | {sys_config, "rel/dev.sys.config"}. 8 | 9 | {overlay, [{mkdir, "log"}, 10 | {mkdir, "spool"}, 11 | {mkdir, "data"}, 12 | {copy, "rel/vm.args", 13 | "releases/\{\{release_version\}\}/vm.args"} 14 | ]}. 15 | 16 | 17 | -------------------------------------------------------------------------------- /relx.config: -------------------------------------------------------------------------------- 1 | {release, {etorrent, "1.2.1"}, 2 | [{etorrent_core, permanent} ]}. 3 | 4 | {extended_start_script, true}. 5 | {sys_config, "rel/sys.config"}. 6 | 7 | {overlay, [{mkdir, "log"}, 8 | {mkdir, "spool"}, 9 | {mkdir, "data"}, 10 | {copy, "rel/vm.args", 11 | "releases/\{\{release_version\}\}/vm.args"} 12 | ]}. 13 | 14 | 15 | -------------------------------------------------------------------------------- /scratch: -------------------------------------------------------------------------------- 1 | Links: 2 | 3 | http://github.com/jlouis/etorrent 4 | http://github.com/jlouis/etorrent_core 5 | http://localhost:8888 - Normal old-style webui code here. 6 | 7 | TODO: 8 | · [GH/jlouis/etorrent#140] Fix the release! 9 | · DONE Push change to the upnp repository: It needs ranch! 10 | fork! 11 | Push! 12 | · DONE Push change to etorrent_core since it refers cascadae as part of it. We then need this dependency. 13 | · DONE Push changes to etorrent which will fix stuff. 14 | · DONE Make it possible to run tests from etorrent release again. 15 | · DONE We need common_test in the release. 16 | 17 | · [GH/jlouis/etorrent#143] make test investigation 18 | · Investigate what happens on a `make test' 19 | 20 | · GH/jlouis/etorrent_core#8 Fix the problem when completing a torrent: 21 | {{badarg,[{etorrent_peerstate,choked,2, 22 | [{file,"src/etorrent_peerstate.erl"}, 23 | {line,120}]}, 24 | {etorrent_peer_control,handle_message,2, 25 | [{file,"src/etorrent_peer_control.erl"}, 26 | {line,535}]}, 27 | {etorrent_peer_control,handle_cast,2, 28 | [{file,"src/etorrent_peer_control.erl"}, 29 | {line,319}]}, 30 | {gen_server,handle_msg,5,[{file,"gen_server.erl"},{line,607}]}, 31 | {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]}, 32 | [{gen_server,terminate,6,[{file,"gen_server.erl"},{line,747}]}, 33 | {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} 34 | · Investigate the following crash report: 35 | 36 | The problem here seems to manifest itself just after a switch to endgame mode. This is 37 | peculiar since it looks like this endgame mode switch messes up the current state. 38 | 39 | 2012-10-20 17:52:16 =CRASH REPORT==== 40 | crasher: 41 | initial call: etorrent_peer_control:init/1 42 | pid: <0.3631.0> 43 | registered_name: [] 44 | exception exit: 45 | {{noproc, 46 | {gen_server,call, 47 | ['<0.1110.0>', 48 | {chunk, 49 | {request,15, 50 | {pieceset,1385,undefined, 51 | «255,255,255,255,255,255,255,255,255,255,255,255,255, 52 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 53 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 54 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 55 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 56 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 57 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 58 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 59 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 60 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 61 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 62 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 63 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 64 | 255,255,255,255,1:1»}, 65 | '<0.3631.0>'}}]}}, 66 | [{gen_server,terminate,6,[{file,"gen_server.erl"},{line,747}]}, 67 | {proc_lib,init_p_do_apply,3,[{file,"proc_lib.erl"},{line,227}]}]} 68 | 69 | ancestors: [<0.1113.0>,<0.1099.0>,etorrent_torrent_pool,etorrent_sup,<0.1064.0>] 70 | messages: [] 71 | links: [<0.3632.0>,<0.3634.0>,<0.1113.0>] 72 | dictionary: [{random_seed,{1351,20413,9934}}] 73 | trap_exit: false 74 | status: running 75 | heap_size: 2584 76 | stack_size: 24 77 | reductions: 177260 78 | neighbours: 79 | neighbour: 80 | [{pid,'<0.3634.0>'}, 81 | {registered_name,[]}, 82 | {initial_call,{etorrent_peer_send,init,['Argument__1']}}, 83 | {current_function,{gen_server,loop,6}}, 84 | {ancestors,['<0.3631.0>','<0.1113.0>','<0.1099.0>',etorrent_torrent_pool, 85 | etorrent_sup,'<0.1064.0>']}, 86 | {messages,[]}, 87 | {links,['<0.3631.0>']}, 88 | {dictionary,[]}, 89 | {trap_exit,false}, 90 | {status,waiting}, 91 | {heap_size,610}, 92 | {stack_size,9}, 93 | {reductions,210956}] 94 | 95 | neighbour: 96 | [{pid,'<0.3632.0>'}, 97 | {registered_name,[]}, 98 | {initial_call,{etorrent_peer_recv,init,['Argument__1']}}, 99 | {current_function,{gen_server,loop,6}}, 100 | {ancestors,['<0.3631.0>','<0.1113.0>','<0.1099.0>',etorrent_torrent_pool, 101 | etorrent_sup,'<0.1064.0>']}, 102 | {messages,[]}, 103 | {links,['<0.3631.0>','<0.10258.19>','#Port<0.5951>']}, 104 | {dictionary,[]}, 105 | {trap_exit,false}, 106 | {status,waiting}, 107 | {heap_size,1597}, 108 | {stack_size,9}, 109 | {reductions,486780}] 110 | 111 | 2012-10-20 17:52:16 =SUPERVISOR REPORT==== 112 | Supervisor: {<0.1113.0>,etorrent_peer_pool} 113 | Context: child_terminated 114 | 115 | Reason: {noproc, 116 | {gen_server,call, 117 | ['<0.1110.0>', 118 | {chunk, 119 | {request,15, 120 | {pieceset,1385,undefined, 121 | «255,255,255,255,255,255,255,255,255,255,255,255,255, 122 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 123 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 124 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 125 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 126 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 127 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 128 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 129 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 130 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 131 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 132 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 133 | 255,255,255,255,255,255,255,255,255,255,255,255,255, 134 | 255,255,255,255,1:1»}, 135 | '<0.3631.0>'}}]}} 136 | 137 | Offender: 138 | [{pid,'<0.3631.0>'}, 139 | {name,child}, 140 | {mfargs,{etorrent_peer_control,start_link,undefined}}, 141 | {restart_type,temporary}, 142 | {shutdown,5000}, 143 | {child_type,worker}] 144 | 145 | · Split off the DHT code after looking a bit at it. 146 | It does not look like the DHT code should be part of etorrent directly. Rather, 147 | I'd like it to be separate and then take it from there. 148 | 149 | So, it turns out that we can probably move it out. You need to grab dht_state and 150 | dht_net and pick them outside as well as their general supervisor. The dht_tracker 151 | code is actually the etorrent-proper parts. 152 | 153 | But it also means we must redefine the code such that configuration becomes injected 154 | rather than asked for. This way, it should be possible to maintain a proper, separate, 155 | application which maintains the state of the DHT tables for us inside the application. 156 | · Test single-torrent download of something 157 | This ought to work if we try it out. Let us try with an ubuntu image: 158 | 159 | http://releases.ubuntu.com/12.04/ubuntu-12.04.1-alternate-i386.iso.torrent 160 | 161 | · Split webui into its own piece. 162 | There is no need to have this in the main etorrent application, since it is a thing 163 | which is UI. 164 | · Consider how to split etorrent_core into multiple pieces. 165 | · DHT 166 | · WebUI stuff 167 | · Think about how to run a system like cascadae together with etorrent. 168 | · Analysis: 169 | Obviously, we should not need to have a dependency on cascadae in etorrent. 170 | It is the other way around: cascadae should use etorrent as a dependency, specifically 171 | it should be etorrent_core. 172 | 173 | Also, etorrent should not have to be bound together with cascadae. It is the wrong 174 | type of coupling we are looking into there. Instead, we would like to have a thin 175 | modular layer of commands which can be used by cascadae to communicate 176 | with etorrent_core and obtain information about it. 177 | · Solution: 178 | Make etorrent non-dependent on cascadae. We should be able to build a release 179 | entirely without any mention of it. Otherwise we failed decoupling. 180 | · Ranch support, see GH/jlouis/etorrent_core#5 181 | · Use more canonical t() types. 182 | · Kill renames of types where possible. 183 | I don't like to have to dig for a type. I'd much rather know the exact type 184 | at the beginning. Writing it down is not much more space and it is way more 185 | readable if you have the full type right there. It is easier to read and it is easier 186 | to understand what is going on then. 187 | 188 | Aliasing and renames does not help readability. It lowers it. The sacrifice is that 189 | it takes longer to write the code, but I don't care that much about that. Code is read 190 | more than it is written anyway. 191 | 192 | DONE: 193 | · Wrong supervisor kills, see GH/jlouis/etorrent_core#7 194 | There is a problem when closing down which generates crashes. This is wrong: 195 | 2012-10-14 19:57:10.201 [error] <0.26576.0> 196 | Supervisor etorrent_peer_sup had child receiver started with 197 | etorrent_peer_recv:start_link(1, #Port<0.5987>) at <0.26579.0> exit with reason normal 198 | in context child_terminated 199 | The solution to this problem is to avoid having a supervisor called `etorrent_peer_sup` for it. We should 200 | just use the peer_control process to manage the subtree. 201 | 202 | · Make new dialyzer code part of etorrent 203 | · Make it possible to dialyze etorrent again. 204 | The problem here is mimetypes which can not be dialyzed, 205 | so we probably need to exclude it from the list of things we want 206 | to consider when we are running dialyzer checks. 207 | · Make etorrent_core non-dependent on cascadae. 208 | This should not be the case. 209 | · GH/jlouis/etorrent#136 210 | · This makes the release construction fail 211 | · GH/jlouis/etorrent_core#4 212 | · Make the code compile again 213 | This requires a step where the cascadae code gets updated. 214 | We have prodded Mr. Uvarov about it. 215 | · Fix the following error: GH/jlouis/etorrent_core#6 216 | (etorrent@127.0.0.1)1> 2012-10-14 18:04:22.347 [error] <0.1072.0> 217 | CRASH REPORT Process <0.1072.0> with 0 neighbours exited with reason: 218 | no case clause matching 219 | {error,{file_error,"/Users/jlouis/etorrent/spool/fast_resume_state.dets", 220 | enoent}} 221 | in etorrent_fast_resume:init/1 line 107 in gen_server:init_it/6 line 328 222 | · Test etorrent 223 | · First step: Compile Erlang for Myrddraal 224 | · Second step: Compile etorrent on Myrddraal so we have a working beast. 225 | · Third step: Test etorrent on Myrddraal, make sure the correct hole is punched in the FW 226 | There is a hole in the firewall. 227 | 228 | 229 | 230 | DONE 231 | ---------------------------------------------------- 232 | 233 | -------------------------------------------------------------------------------- /start-dev.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | cd `dirname $0` 3 | 4 | # NOTE: mustache templates need \ because they are not awesome. 5 | exec erl -pa $PWD/ebin edit $PWD/deps/*/ebin \ 6 | -boot start_sasl \ 7 | -sname etorrent \ 8 | -s etorrent_app \ 9 | -config ~/.config/etorrent 10 | 11 | -------------------------------------------------------------------------------- /test/etorrent.cover: -------------------------------------------------------------------------------- 1 | {incl_app, etorrent_core, details}. 2 | -------------------------------------------------------------------------------- /test/etorrent_SUITE.erl: -------------------------------------------------------------------------------- 1 | -module(etorrent_SUITE). 2 | 3 | -include_lib("common_test/include/ct.hrl"). 4 | 5 | -export([suite/0, all/0, groups/0, 6 | init_per_group/2, end_per_group/2, 7 | init_per_suite/1, end_per_suite/1, 8 | init_per_testcase/2, end_per_testcase/2]). 9 | 10 | -export([seed_leech/0, seed_leech/1, 11 | udp_seed_leech/0, udp_seed_leech/1, 12 | down_udp_tracker/0, down_udp_tracker/1, 13 | choked_reject/0, choked_reject/1, 14 | seed_transmission/0, seed_transmission/1, 15 | leech_transmission/0, leech_transmission/1, 16 | bep9/0, bep9/1, 17 | partial_downloading/0, partial_downloading/1, 18 | checking/0, checking/1, 19 | find_local_peers/0, find_local_peers/1 20 | ]). 21 | 22 | 23 | suite() -> 24 | application:start(crypto), 25 | [{timetrap, {minutes, 5}}]. 26 | 27 | %% Setup/Teardown 28 | %% ---------------------------------------------------------------------- 29 | init_per_group(_Group, Config) -> 30 | Config. 31 | 32 | end_per_group(_Group, _Config) -> 33 | ok. 34 | 35 | init_per_suite(Config) -> 36 | %% Check or create read-only files, put them into data_dir. 37 | %% Many of this files will be copied into node directories. 38 | Directory = ?config(data_dir, Config), 39 | %% file:set_cwd(Directory), 40 | ct:pal("Data directory: ~s~n", [Directory]), 41 | %% Autogenerated random files. 42 | AutoDir = filename:join(Directory, autogen), 43 | %% This is a directory, with partically downloaded versions of files 44 | %% from AutoDir. 45 | BrokenDir = filename:join(AutoDir, broken), 46 | file:make_dir(AutoDir), 47 | file:make_dir(BrokenDir), 48 | Fn = filename:join([AutoDir, "file30m.random"]), 49 | BrokenFn = filename:join([BrokenDir, "file30m.random"]), 50 | Dir = filename:join([AutoDir, "dir2x30m"]), 51 | DumpTorrentFn = filename:join([AutoDir, "file30m-trackerless.torrent"]), 52 | HTTPTorrentFn = filename:join([AutoDir, "file30m-http.torrent"]), 53 | UDPTorrentFn = filename:join([AutoDir, "file30m-udp.torrent"]), 54 | DirTorrentFn = filename:join([AutoDir, "dir2x30m.torrent"]), 55 | BadUDPTorrentFn = filename:join([AutoDir, "file30m-bad-udp.torrent"]), 56 | BadUDPDirTorrentFn = filename:join([AutoDir, "dir2x30m-bad-udp.torrent"]), 57 | 58 | ensure_random_file(Fn), 59 | ensure_broken_file(Fn, BrokenFn), 60 | ct:pal("Checking, that a broken version is different... "), 61 | [error(files_are_equal) || compare_file_contents(Fn, BrokenFn)], 62 | ensure_random_dir(Dir), 63 | 64 | ensure_torrent_file(Fn, HTTPTorrentFn, "http://localhost:6969/announce"), 65 | ensure_torrent_file(Fn, UDPTorrentFn, "udp://localhost:6969/announce"), 66 | ensure_torrent_file(Dir, DirTorrentFn, "http://localhost:6969/announce"), 67 | ensure_torrent_file(Fn, BadUDPTorrentFn, "udp://localhost:6666/announce"), 68 | ensure_torrent_file(Dir, BadUDPDirTorrentFn, "udp://localhost:6666/announce"), 69 | ensure_torrent_file(Fn, DumpTorrentFn), 70 | %% Literal infohash. 71 | %% Both HTTP and UDP versions have the same infohash. 72 | {ok, TorrentIH} = etorrent_dotdir:info_hash(HTTPTorrentFn), 73 | {ok, DirTorrentIH} = etorrent_dotdir:info_hash(DirTorrentFn), 74 | %% Start slave nodes. 75 | {ok, SeedNode} = test_server:start_node(seeder, slave, []), 76 | {ok, LeechNode} = test_server:start_node(leecher, slave, []), 77 | {ok, MiddlemanNode} = test_server:start_node(middleman, slave, []), 78 | {ok, ChokedSeedNode} = test_server:start_node(choked_seeder, slave, []), 79 | %% Run logger on the slave nodes 80 | [prepare_node(Node) 81 | || Node <- [SeedNode, LeechNode, MiddlemanNode, ChokedSeedNode]], 82 | [{trackerless_torrent_file, DumpTorrentFn}, 83 | {http_torrent_file, HTTPTorrentFn}, 84 | {udp_torrent_file, UDPTorrentFn}, 85 | {dir_torrent_file, DirTorrentFn}, 86 | {bad_udp_torrent_file, BadUDPTorrentFn}, 87 | {bad_udp_dir_torrent_file, BadUDPDirTorrentFn}, 88 | 89 | %% Names of data on which torrents are based. 90 | {data_filename, Fn}, 91 | {broken_data_filename, BrokenFn}, 92 | {data_dirname, Dir}, 93 | 94 | {info_hash_hex, TorrentIH}, 95 | {dir_info_hash_hex, DirTorrentIH}, 96 | 97 | {info_hash_int, hex_to_int_hash(TorrentIH)}, 98 | {dir_info_hash_int, hex_to_int_hash(DirTorrentIH)}, 99 | 100 | {info_hash_bin, hex_to_bin_hash(TorrentIH)}, 101 | {dir_info_hash_bin, hex_to_bin_hash(DirTorrentIH)}, 102 | 103 | {leech_node, LeechNode}, 104 | {middleman_node, MiddlemanNode}, 105 | {choked_seed_node, ChokedSeedNode}, 106 | {seed_node, SeedNode} | Config]. 107 | 108 | 109 | 110 | end_per_suite(Config) -> 111 | LN = ?config(leech_node, Config), 112 | SN = ?config(seed_node, Config), 113 | MN = ?config(middleman_node, Config), 114 | CN = ?config(choked_seed_node, Config), 115 | test_server:stop_node(SN), 116 | test_server:stop_node(LN), 117 | test_server:stop_node(MN), 118 | test_server:stop_node(CN), 119 | ok. 120 | 121 | 122 | init_per_testcase(leech_transmission, Config) -> 123 | %% transmission => etorrent 124 | PrivDir = ?config(priv_dir, Config), 125 | DataDir = ?config(data_dir, Config), 126 | TorrentFn = ?config(http_torrent_file, Config), 127 | Node = ?config(leech_node, Config), 128 | Fn = ?config(data_filename, Config), 129 | TrackerPid = start_opentracker(DataDir), 130 | %% Transmission's working directory 131 | TranDir = filename:join([PrivDir, transmission]), 132 | NodeDir = filename:join([PrivDir, leech]), 133 | BaseFn = filename:basename(Fn), 134 | SrcFn = filename:join([TranDir, BaseFn]), 135 | DestFn = filename:join([NodeDir, "downloads", BaseFn]), 136 | file:make_dir(TranDir), 137 | {ok, _} = copy_to(filename:join([DataDir, "transmission", "settings.json"]), 138 | TranDir), 139 | %% Feed transmission the file to work with 140 | {ok, _} = file:copy(Fn, SrcFn), 141 | {Ref, Pid} = start_transmission(DataDir, TranDir, TorrentFn), 142 | ok = ct:sleep({seconds, 10}), %% Wait for transmission to start up 143 | create_standard_directory_layout(NodeDir), 144 | NodeConf = leech_configuration(NodeDir), 145 | start_app(Node, NodeConf), %% Start etorrent on the leecher node 146 | [{tracker_port, TrackerPid}, 147 | {transmission_port, {Ref, Pid}}, 148 | {src_filename, SrcFn}, 149 | {dest_filename, DestFn}, 150 | {transmission_dir, TranDir}, 151 | {node_dir, NodeDir} | Config]; 152 | init_per_testcase(seed_transmission, Config) -> 153 | %% etorrent => transmission 154 | PrivDir = ?config(priv_dir, Config), 155 | DataDir = ?config(data_dir, Config), 156 | TorrentFn = ?config(http_torrent_file, Config), 157 | Node = ?config(seed_node, Config), 158 | Fn = ?config(data_filename, Config), 159 | TrackerPid = start_opentracker(DataDir), 160 | %% Transmission's working directory 161 | TranDir = filename:join([PrivDir, transmission]), 162 | NodeDir = filename:join([PrivDir, seed]), 163 | BaseFn = filename:basename(Fn), 164 | SrcFn = filename:join([NodeDir, "downloads", BaseFn]), 165 | DestFn = filename:join([TranDir, BaseFn]), 166 | file:make_dir(TranDir), 167 | {ok, _} = copy_to(filename:join([DataDir, "transmission", "settings.json"]), 168 | TranDir), 169 | {Ref, Pid} = start_transmission(DataDir, TranDir, TorrentFn), 170 | ok = ct:sleep({seconds, 8}), %% Wait for transmission to start up 171 | NodeDir = filename:join([PrivDir, seed]), 172 | create_standard_directory_layout(NodeDir), 173 | NodeConf = seed_configuration(NodeDir), 174 | %% Feed etorrent the file to work with 175 | {ok, _} = file:copy(Fn, SrcFn), 176 | %% Copy torrent-file to torrents-directory 177 | {ok, _} = copy_to(TorrentFn, ?config(dir, NodeConf)), 178 | start_app(Node, NodeConf), 179 | [{tracker_port, TrackerPid}, 180 | {transmission_port, {Ref, Pid}}, 181 | {src_filename, SrcFn}, 182 | {dest_filename, DestFn}, 183 | {transmission_dir, TranDir}, 184 | {node_dir, NodeDir} | Config]; 185 | init_per_testcase(seed_leech, Config) -> 186 | %% etorrent => etorrent 187 | PrivDir = ?config(priv_dir, Config), 188 | DataDir = ?config(data_dir, Config), 189 | TorrentFn = ?config(http_torrent_file, Config), 190 | SNode = ?config(seed_node, Config), 191 | LNode = ?config(leech_node, Config), 192 | Fn = ?config(data_filename, Config), 193 | TrackerPid = start_opentracker(DataDir), 194 | SNodeDir = filename:join([PrivDir, seed]), 195 | LNodeDir = filename:join([PrivDir, leech]), 196 | BaseFn = filename:basename(Fn), 197 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 198 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 199 | create_standard_directory_layout(SNodeDir), 200 | create_standard_directory_layout(LNodeDir), 201 | SNodeConf = seed_configuration(SNodeDir), 202 | LNodeConf = leech_configuration(LNodeDir), 203 | %% Feed etorrent the file to work with 204 | {ok, _} = file:copy(Fn, SrcFn), 205 | %% Copy torrent-file to torrents-directory 206 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 207 | start_app(SNode, SNodeConf), 208 | start_app(LNode, LNodeConf), 209 | [{tracker_port, TrackerPid}, 210 | {src_filename, SrcFn}, 211 | {dest_filename, DestFn}, 212 | {seed_node_dir, SNodeDir}, 213 | {leech_node_dir, LNodeDir} | Config]; 214 | init_per_testcase(checking, Config) -> 215 | %% etorrent => etorrent 216 | PrivDir = ?config(priv_dir, Config), 217 | DataDir = ?config(data_dir, Config), 218 | TorrentFn = ?config(http_torrent_file, Config), 219 | SNode = ?config(seed_node, Config), 220 | LNode = ?config(leech_node, Config), 221 | Fn = ?config(data_filename, Config), 222 | BrokenFn = ?config(broken_data_filename, Config), 223 | TrackerPid = start_opentracker(DataDir), 224 | SNodeDir = filename:join([PrivDir, seed]), 225 | LNodeDir = filename:join([PrivDir, leech]), 226 | BaseFn = filename:basename(Fn), 227 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 228 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 229 | create_standard_directory_layout(SNodeDir), 230 | create_standard_directory_layout(LNodeDir), 231 | SNodeConf = seed_configuration(SNodeDir), 232 | LNodeConf = leech_configuration(LNodeDir), 233 | %% Feed etorrent the file to work with 234 | {ok, _} = file:copy(Fn, SrcFn), 235 | {ok, _} = file:copy(BrokenFn, DestFn), 236 | [error(files_are_equal) || compare_file_contents(SrcFn, DestFn)], 237 | %% Copy torrent-file to torrents-directory 238 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 239 | start_app(SNode, SNodeConf), 240 | start_app(LNode, LNodeConf), 241 | [{tracker_port, TrackerPid}, 242 | {src_filename, SrcFn}, 243 | {dest_filename, DestFn}, 244 | {seed_node_dir, SNodeDir}, 245 | {leech_node_dir, LNodeDir} | Config]; 246 | init_per_testcase(udp_seed_leech, Config) -> 247 | %% etorrent => etorrent 248 | PrivDir = ?config(priv_dir, Config), 249 | DataDir = ?config(data_dir, Config), 250 | TorrentFn = ?config(udp_torrent_file, Config), 251 | SNode = ?config(seed_node, Config), 252 | LNode = ?config(leech_node, Config), 253 | Fn = ?config(data_filename, Config), 254 | TrackerPid = start_opentracker(DataDir), 255 | SNodeDir = filename:join([PrivDir, seed]), 256 | LNodeDir = filename:join([PrivDir, leech]), 257 | BaseFn = filename:basename(Fn), 258 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 259 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 260 | create_standard_directory_layout(SNodeDir), 261 | create_standard_directory_layout(LNodeDir), 262 | SNodeConf = seed_configuration(SNodeDir), 263 | LNodeConf = leech_configuration(LNodeDir), 264 | %% Feed etorrent the file to work with 265 | {ok, _} = file:copy(Fn, SrcFn), 266 | %% Copy torrent-file to torrents-directory 267 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 268 | start_app(SNode, SNodeConf), 269 | start_app(LNode, LNodeConf), 270 | ok = ct:sleep({seconds, 5}), 271 | [{tracker_port, TrackerPid}, 272 | {src_filename, SrcFn}, 273 | {dest_filename, DestFn}, 274 | {seed_node_dir, SNodeDir}, 275 | {leech_node_dir, LNodeDir} | Config]; 276 | init_per_testcase(down_udp_tracker, Config) -> 277 | PrivDir = ?config(priv_dir, Config), 278 | LNode = ?config(leech_node, Config), 279 | LNodeDir = filename:join([PrivDir, leech]), 280 | create_standard_directory_layout(LNodeDir), 281 | LNodeConf = leech_configuration(LNodeDir), 282 | start_app(LNode, LNodeConf), 283 | ok = ct:sleep({seconds, 5}), 284 | [{leech_node_dir, LNodeDir} | Config]; 285 | init_per_testcase(choked_seed_leech, Config) -> 286 | %% etorrent => etorrent, one seed is choked (refuse to work). 287 | PrivDir = ?config(priv_dir, Config), 288 | DataDir = ?config(data_dir, Config), 289 | TorrentFn = ?config(http_torrent_file, Config), 290 | SNode = ?config(seed_node, Config), 291 | CNode = ?config(choked_seed_node, Config), 292 | LNode = ?config(leech_node, Config), 293 | Fn = ?config(data_filename, Config), 294 | TrackerPid = start_opentracker(DataDir), 295 | SNodeDir = filename:join([PrivDir, seed]), 296 | LNodeDir = filename:join([PrivDir, leech]), 297 | CNodeDir = filename:join([PrivDir, choked_seed]), 298 | BaseFn = filename:basename(Fn), 299 | SSrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 300 | CSrcFn = filename:join([CNodeDir, "downloads", BaseFn]), 301 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 302 | create_standard_directory_layout(SNodeDir), 303 | create_standard_directory_layout(LNodeDir), 304 | create_standard_directory_layout(CNodeDir), 305 | SNodeConf = seed_configuration(SNodeDir), 306 | LNodeConf = leech_configuration(LNodeDir), 307 | CNodeConf = choked_seed_configuration(CNodeDir), 308 | %% Feed etorrent the file to work with 309 | {ok, _} = file:copy(Fn, SSrcFn), 310 | {ok, _} = file:copy(Fn, CSrcFn), 311 | %% Copy torrent-file to torrents-directory 312 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 313 | {ok, _} = copy_to(TorrentFn, ?config(dir, CNodeConf)), 314 | start_app(CNode, CNodeConf), 315 | start_app(SNode, SNodeConf), 316 | start_app(LNode, LNodeConf), 317 | ok = ct:sleep({seconds, 5}), 318 | [{tracker_port, TrackerPid}, 319 | {src_filename, SSrcFn}, 320 | {dest_filename, DestFn}, 321 | {choked_seed_node_dir, CNodeDir}, 322 | {seed_node_dir, SNodeDir}, 323 | {leech_node_dir, LNodeDir} | Config]; 324 | init_per_testcase(choked_reject, Config) -> 325 | %% etorrent => etorrent, seed is choked (refuse to work). 326 | PrivDir = ?config(priv_dir, Config), 327 | DataDir = ?config(data_dir, Config), 328 | TorrentFn = ?config(http_torrent_file, Config), 329 | SNode = ?config(seed_node, Config), 330 | LNode = ?config(leech_node, Config), 331 | Fn = ?config(data_filename, Config), 332 | TrackerPid = start_opentracker(DataDir), 333 | SNodeDir = filename:join([PrivDir, seed]), 334 | LNodeDir = filename:join([PrivDir, leech]), 335 | BaseFn = filename:basename(Fn), 336 | SSrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 337 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 338 | create_standard_directory_layout(SNodeDir), 339 | create_standard_directory_layout(LNodeDir), 340 | SNodeConf = seed_configuration(SNodeDir), 341 | LNodeConf = leech_configuration(LNodeDir), 342 | %% Feed etorrent the file to work with 343 | {ok, _} = file:copy(Fn, SSrcFn), 344 | %% Copy torrent-file to torrents-directory 345 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 346 | start_app(SNode, SNodeConf), 347 | start_app(LNode, LNodeConf), 348 | ok = ct:sleep({seconds, 5}), 349 | [{tracker_port, TrackerPid}, 350 | {src_filename, SSrcFn}, 351 | {dest_filename, DestFn}, 352 | {seed_node_dir, SNodeDir}, 353 | {leech_node_dir, LNodeDir} | Config]; 354 | init_per_testcase(partial_downloading, Config) -> 355 | %% etorrent => etorrent 356 | %% passing a part of a directory 357 | PrivDir = ?config(priv_dir, Config), 358 | DataDir = ?config(data_dir, Config), 359 | TorrentFn = ?config(dir_torrent_file, Config), 360 | SNode = ?config(seed_node, Config), 361 | LNode = ?config(leech_node, Config), 362 | Fn = ?config(data_dirname, Config), 363 | TrackerPid = start_opentracker(DataDir), 364 | SNodeDir = filename:join([PrivDir, seed]), 365 | LNodeDir = filename:join([PrivDir, leech]), 366 | BaseFn = filename:basename(Fn), 367 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 368 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 369 | create_standard_directory_layout(SNodeDir), 370 | create_standard_directory_layout(LNodeDir), 371 | SNodeConf = seed_configuration(SNodeDir), 372 | LNodeConf = leech_configuration(LNodeDir), 373 | %% Feed etorrent the directory to work with 374 | copy_r(Fn, SrcFn), 375 | %% Copy torrent-file to torrents-directory 376 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 377 | start_app(SNode, SNodeConf), 378 | start_app(LNode, LNodeConf), 379 | ok = ct:sleep({seconds, 5}), 380 | [{tracker_port, TrackerPid}, 381 | {src_filename, SrcFn}, 382 | {dest_filename, DestFn}, 383 | {seed_node_dir, SNodeDir}, 384 | {leech_node_dir, LNodeDir} | Config]; 385 | init_per_testcase(find_local_peers, Config) -> 386 | %% etorrent => etorrent, using mDNS and bep26. 387 | PrivDir = ?config(priv_dir, Config), 388 | TorrentFn = ?config(trackerless_torrent_file, Config), 389 | SNode = ?config(seed_node, Config), 390 | LNode = ?config(leech_node, Config), 391 | Fn = ?config(data_filename, Config), 392 | SNodeDir = filename:join([PrivDir, seed]), 393 | LNodeDir = filename:join([PrivDir, leech]), 394 | BaseFn = filename:basename(Fn), 395 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 396 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 397 | create_standard_directory_layout(SNodeDir), 398 | create_standard_directory_layout(LNodeDir), 399 | SNodeConf = enable_mdns(seed_configuration(SNodeDir)), 400 | LNodeConf = enable_mdns(leech_configuration(LNodeDir)), 401 | %% Feed etorrent the file to work with 402 | {ok, _} = file:copy(Fn, SrcFn), 403 | %% Copy torrent-file to torrents-directory 404 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 405 | start_app(SNode, SNodeConf), 406 | start_app(LNode, LNodeConf), 407 | [{src_filename, SrcFn}, 408 | {dest_filename, DestFn}, 409 | {seed_node_dir, SNodeDir}, 410 | {leech_node_dir, LNodeDir} | Config]; 411 | init_per_testcase(bep9, Config) -> 412 | %% etorrent => etorrent, using DHT and bep9. 413 | %% Middleman is an empty node. 414 | PrivDir = ?config(priv_dir, Config), 415 | TorrentFn = ?config(trackerless_torrent_file, Config), 416 | SNode = ?config(seed_node, Config), 417 | MNode = ?config(middleman_node, Config), 418 | LNode = ?config(leech_node, Config), 419 | Fn = ?config(data_filename, Config), 420 | SNodeDir = filename:join([PrivDir, seed]), 421 | LNodeDir = filename:join([PrivDir, leech]), 422 | MNodeDir = filename:join([PrivDir, middleman]), 423 | BaseFn = filename:basename(Fn), 424 | SrcFn = filename:join([SNodeDir, "downloads", BaseFn]), 425 | DestFn = filename:join([LNodeDir, "downloads", BaseFn]), 426 | create_standard_directory_layout(SNodeDir), 427 | create_standard_directory_layout(LNodeDir), 428 | create_standard_directory_layout(MNodeDir), 429 | SNodeConf = enable_dht(seed_configuration(SNodeDir)), 430 | LNodeConf = enable_dht(leech_configuration(LNodeDir)), 431 | MNodeConf = enable_dht(middleman_configuration(MNodeDir)), 432 | %% Feed etorrent the file to work with 433 | {ok, _} = file:copy(Fn, SrcFn), 434 | %% Copy torrent-file to torrents-directory 435 | {ok, _} = copy_to(TorrentFn, ?config(dir, SNodeConf)), 436 | start_app(MNode, MNodeConf), 437 | start_app(SNode, SNodeConf), 438 | start_app(LNode, LNodeConf), 439 | [{src_filename, SrcFn}, 440 | {dest_filename, DestFn}, 441 | {middleman_node_dir, MNodeDir}, 442 | {seed_node_dir, SNodeDir}, 443 | {leech_node_dir, LNodeDir} | Config]. 444 | 445 | 446 | end_per_testcase(leech_transmission, Config) -> 447 | LNode = ?config(leech_node, Config), 448 | {_Ref, Pid} = ?config(transmission_port, Config), 449 | TranDir = ?config(transmission_dir, Config), 450 | NodeDir = ?config(node_dir, Config), 451 | stop_transmission(Pid), 452 | stop_app(LNode), 453 | clean_transmission_directory(TranDir), 454 | clean_standard_directory_layout(NodeDir), 455 | stop_opentracker(?config(tracker_port, Config)), 456 | ok; 457 | end_per_testcase(seed_transmission, Config) -> 458 | SNode = ?config(seed_node, Config), 459 | {_Ref, Pid} = ?config(transmission_port, Config), 460 | TranDir = ?config(transmission_dir, Config), 461 | NodeDir = ?config(node_dir, Config), 462 | stop_transmission(Pid), 463 | stop_app(SNode), 464 | clean_transmission_directory(TranDir), 465 | clean_standard_directory_layout(NodeDir), 466 | stop_opentracker(?config(tracker_port, Config)), 467 | ok; 468 | end_per_testcase(seed_leech, Config) -> 469 | SNode = ?config(seed_node, Config), 470 | LNode = ?config(leech_node, Config), 471 | SNodeDir = ?config(seed_node_dir, Config), 472 | LNodeDir = ?config(leech_node_dir, Config), 473 | stop_app(SNode), 474 | stop_app(LNode), 475 | clean_standard_directory_layout(SNodeDir), 476 | clean_standard_directory_layout(LNodeDir), 477 | stop_opentracker(?config(tracker_port, Config)), 478 | ok; 479 | end_per_testcase(checking, Config) -> 480 | SNode = ?config(seed_node, Config), 481 | LNode = ?config(leech_node, Config), 482 | SNodeDir = ?config(seed_node_dir, Config), 483 | LNodeDir = ?config(leech_node_dir, Config), 484 | stop_app(SNode), 485 | stop_app(LNode), 486 | clean_standard_directory_layout(SNodeDir), 487 | clean_standard_directory_layout(LNodeDir), 488 | stop_opentracker(?config(tracker_port, Config)), 489 | ok; 490 | end_per_testcase(udp_seed_leech, Config) -> 491 | SNode = ?config(seed_node, Config), 492 | LNode = ?config(leech_node, Config), 493 | SNodeDir = ?config(seed_node_dir, Config), 494 | LNodeDir = ?config(leech_node_dir, Config), 495 | stop_app(SNode), 496 | stop_app(LNode), 497 | clean_standard_directory_layout(SNodeDir), 498 | clean_standard_directory_layout(LNodeDir), 499 | stop_opentracker(?config(tracker_port, Config)), 500 | ok; 501 | end_per_testcase(down_udp_tracker, Config) -> 502 | LNode = ?config(leech_node, Config), 503 | LNodeDir = ?config(leech_node_dir, Config), 504 | stop_app(LNode), 505 | clean_standard_directory_layout(LNodeDir), 506 | ok; 507 | end_per_testcase(choked_seed_leech, Config) -> 508 | SNode = ?config(seed_node, Config), 509 | LNode = ?config(leech_node, Config), 510 | CNode = ?config(choked_seed_node, Config), 511 | SNodeDir = ?config(seed_node_dir, Config), 512 | LNodeDir = ?config(leech_node_dir, Config), 513 | CNodeDir = ?config(choked_seed_node_dir, Config), 514 | stop_app(SNode), 515 | stop_app(LNode), 516 | stop_app(CNode), 517 | clean_standard_directory_layout(SNodeDir), 518 | clean_standard_directory_layout(LNodeDir), 519 | clean_standard_directory_layout(CNodeDir), 520 | stop_opentracker(?config(tracker_port, Config)), 521 | ok; 522 | end_per_testcase(choked_reject, Config) -> 523 | SNode = ?config(seed_node, Config), 524 | LNode = ?config(leech_node, Config), 525 | SNodeDir = ?config(seed_node_dir, Config), 526 | LNodeDir = ?config(leech_node_dir, Config), 527 | stop_app(SNode), 528 | stop_app(LNode), 529 | clean_standard_directory_layout(SNodeDir), 530 | clean_standard_directory_layout(LNodeDir), 531 | stop_opentracker(?config(tracker_port, Config)), 532 | ok; 533 | end_per_testcase(partial_downloading, Config) -> 534 | SNode = ?config(seed_node, Config), 535 | LNode = ?config(leech_node, Config), 536 | SNodeDir = ?config(seed_node_dir, Config), 537 | LNodeDir = ?config(leech_node_dir, Config), 538 | stop_app(SNode), 539 | stop_app(LNode), 540 | clean_standard_directory_layout(SNodeDir), 541 | clean_standard_directory_layout(LNodeDir), 542 | stop_opentracker(?config(tracker_port, Config)), 543 | ok; 544 | end_per_testcase(find_local_peers, Config) -> 545 | SNode = ?config(seed_node, Config), 546 | LNode = ?config(leech_node, Config), 547 | SNodeDir = ?config(seed_node_dir, Config), 548 | LNodeDir = ?config(leech_node_dir, Config), 549 | stop_app(SNode), 550 | stop_app(LNode), 551 | clean_standard_directory_layout(SNodeDir), 552 | clean_standard_directory_layout(LNodeDir), 553 | ok; 554 | end_per_testcase(bep9, Config) -> 555 | SNode = ?config(seed_node, Config), 556 | LNode = ?config(leech_node, Config), 557 | MNode = ?config(middleman_node, Config), 558 | SNodeDir = ?config(seed_node_dir, Config), 559 | LNodeDir = ?config(leech_node_dir, Config), 560 | MNodeDir = ?config(middleman_node_dir, Config), 561 | stop_app(SNode), 562 | stop_app(LNode), 563 | stop_app(MNode), 564 | clean_standard_directory_layout(SNodeDir), 565 | clean_standard_directory_layout(LNodeDir), 566 | clean_standard_directory_layout(MNodeDir), 567 | ok. 568 | 569 | %% Configuration 570 | %% ---------------------------------------------------------------------- 571 | standard_directory_layout(Dir, AppConf) -> 572 | [{dir, filename:join([Dir, "torrents"])}, 573 | {download_dir, filename:join([Dir, "downloads"])}, 574 | {dht_state, filename:join([Dir, "spool", "dht_state.dets"])}, 575 | {fast_resume_file, filename:join([Dir, "spool", "fast_resume.dets"])}, 576 | {logger_dir, filename:join([Dir, "logs"])}, 577 | {logger_fname, "leech_etorrent.log"} | AppConf]. 578 | 579 | create_standard_directory_layout(Dir) -> 580 | case filelib:is_dir(Dir) of 581 | true -> ct:pal("Directory exists ~ts.", [Dir]), error(dir_exists); 582 | false -> 583 | [filelib:ensure_dir(filename:join([Dir, SubDir, x])) 584 | || SubDir <- ["torrents", "downloads", "spool", "logs"]], 585 | ok 586 | end. 587 | 588 | clean_standard_directory_layout(Dir) -> 589 | del_r(Dir), 590 | ok. 591 | 592 | clean_transmission_directory(Dir) -> 593 | del_r(Dir), 594 | ok. 595 | 596 | 597 | 598 | 599 | seed_configuration(Dir) -> 600 | [{listen_ip, {127,0,0,2}}, 601 | {port, 1741 }, 602 | {udp_port, 1742 }, 603 | {dht_port, 1743 }, 604 | {max_upload_rate, 1000} 605 | | standard_directory_layout(Dir, ct:get_config(common_conf))]. 606 | 607 | leech_configuration(Dir) -> 608 | [{listen_ip, {127,0,0,3}}, 609 | {port, 1751 }, 610 | {udp_port, 1752 }, 611 | {dht_port, 1753 }, 612 | {max_download_rate, 1000} 613 | | standard_directory_layout(Dir, ct:get_config(common_conf))]. 614 | 615 | middleman_configuration(Dir) -> 616 | [{listen_ip, {127,0,0,4}}, 617 | {port, 1761 }, 618 | {udp_port, 1762 }, 619 | {dht_port, 1763 } 620 | | standard_directory_layout(Dir, ct:get_config(common_conf))]. 621 | 622 | choked_seed_configuration(Dir) -> 623 | [{listen_ip, {127,0,0,5}}, 624 | {port, 1771 }, 625 | {udp_port, 1772 }, 626 | {dht_port, 1773 }, 627 | {max_upload_rate, 1000} 628 | | standard_directory_layout(Dir, ct:get_config(common_conf))]. 629 | 630 | 631 | %% Tests 632 | %% ---------------------------------------------------------------------- 633 | groups() -> 634 | Tests = [find_local_peers, 635 | seed_transmission, leech_transmission, seed_leech, 636 | partial_downloading, udp_seed_leech, bep9, 637 | down_udp_tracker, checking, choked_reject], 638 | % [{main_group, [shuffle], Tests}]. 639 | [{main_group, [], Tests}]. 640 | 641 | all() -> 642 | [{group, main_group}]. 643 | 644 | seed_transmission() -> 645 | [{require, common_conf, etorrent_common_config}]. 646 | 647 | %% Etorrent => Transmission 648 | seed_transmission(Config) -> 649 | io:format("~n======START SEED TRANSMISSION TEST CASE======~n", []), 650 | {Ref, _Pid} = ?config(transmission_port, Config), 651 | receive 652 | {Ref, done} -> ok 653 | after 654 | 120*1000 -> exit(timeout_error) 655 | end, 656 | sha1_file(?config(src_filename, Config)) 657 | =:= sha1_file(?config(dest_filename, Config)). 658 | 659 | %% Transmission => Etorrent 660 | leech_transmission() -> 661 | [{require, common_conf, etorrent_common_config}]. 662 | 663 | leech_transmission(Config) -> 664 | io:format("~n======START LEECH TRANSMISSION TEST CASE======~n", []), 665 | %% Set callback and wait for torrent completion. 666 | {Ref, Pid} = {make_ref(), self()}, 667 | {ok, _} = rpc:call(?config(leech_node, Config), 668 | etorrent, start, 669 | [?config(http_torrent_file, Config), {Ref, Pid}]), 670 | receive 671 | {Ref, done} -> ok 672 | after 673 | 120*1000 -> exit(timeout_error) 674 | end, 675 | sha1_file(?config(src_filename, Config)) 676 | =:= sha1_file(?config(dest_filename, Config)). 677 | 678 | seed_leech() -> 679 | [{require, common_conf, etorrent_common_config}]. 680 | 681 | seed_leech(Config) -> 682 | io:format("~n======START SEED AND LEECHING TEST CASE======~n", []), 683 | {Ref, Pid} = {make_ref(), self()}, 684 | {ok, _} = rpc:call(?config(leech_node, Config), 685 | etorrent, start, 686 | [?config(http_torrent_file, Config), {Ref, Pid}]), 687 | receive 688 | {Ref, done} -> ok 689 | after 690 | 120*1000 -> exit(timeout_error) 691 | end, 692 | sha1_file(?config(src_filename, Config)) 693 | =:= sha1_file(?config(dest_filename, Config)). 694 | 695 | udp_seed_leech() -> 696 | [{require, common_conf, etorrent_common_config}]. 697 | 698 | udp_seed_leech(Config) -> 699 | io:format("~n======START SEED AND LEECHING USING UDP-ANNOUNCE TEST CASE======~n", []), 700 | {Ref, Pid} = {make_ref(), self()}, 701 | {ok, _} = rpc:call(?config(leech_node, Config), 702 | etorrent, start, 703 | [?config(udp_torrent_file, Config), {Ref, Pid}]), 704 | receive 705 | {Ref, done} -> ok 706 | after 707 | 120*1000 -> exit(timeout_error) 708 | end, 709 | sha1_file(?config(src_filename, Config)) 710 | =:= sha1_file(?config(dest_filename, Config)). 711 | 712 | down_udp_tracker() -> 713 | [{require, common_conf, etorrent_common_config}]. 714 | 715 | down_udp_tracker(Config) -> 716 | io:format("~n======START DOWN-UDP-TRACKER TEST CASE======~n", []), 717 | LeechNode = ?config(leech_node, Config), 718 | %% This test case was added, because of the error in etorrent_core. 719 | %% GIT revision is f25a965205f8dfd2f71897a09cdd0b4ac9f677ae. 720 | %% 721 | %% etorrent_udp_tracker_mgr terminated with reason: no case clause matching 722 | %% [{{conn_id_req,{"localhost",6666}},<0.178.0>},{{conn_id_req,{"localhost",6666}},<0.198.0>}] 723 | %% in etorrent_udp_tracker_mgr:cancel_conn_id_req/1 line 250 724 | Ref = erlang:monitor(process, {etorrent_udp_tracker_mgr, LeechNode}), 725 | 726 | {ok, _} = rpc:call(LeechNode, etorrent_ctl, start, 727 | [?config(bad_udp_torrent_file, Config), 728 | [{udp_tracker_connection_timeout, 5000}]]), 729 | {ok, _} = rpc:call(LeechNode, etorrent_ctl, start, 730 | [?config(bad_udp_dir_torrent_file, Config), 731 | [{udp_tracker_connection_timeout, 5000}]]), 732 | receive 733 | {'DOWN', Ref, _, _, Reason} -> error({manager_crashed, Reason}) 734 | after 30000 -> ok 735 | end, 736 | true. 737 | 738 | choked_reject() -> 739 | [{require, common_conf, etorrent_common_config}]. 740 | 741 | choked_reject(Config) -> 742 | io:format("~n======START CHOCKER REJECT TEST CASE======~n", []), 743 | {Ref, Pid} = {make_ref(), self()}, 744 | 745 | LNode = ?config(leech_node, Config), 746 | SNode = ?config(seed_node, Config), 747 | BinIH = ?config(info_hash_bin, Config), 748 | STorrentID = wait_torrent_registration(SNode, BinIH), 749 | io:format("TorrentID on the choked_seed node is ~p.~n", [STorrentID]), 750 | 751 | {ok, LTorrentID} = rpc:call(LNode, etorrent, start, 752 | [?config(http_torrent_file, Config), {Ref, Pid}]), 753 | 754 | io:format("TorrentID on the leech node is ~p.~n", [LTorrentID]), 755 | 756 | %% Leech peer control process on the slow node. 757 | LeechId = rpc:call(LNode, etorrent_ctl, local_peer_id, []), 758 | SPeerPid = wait_peer_registration(SNode, STorrentID, LeechId), 759 | io:format("Peer registered.~n", []), 760 | ok = rpc:call(SNode, etorrent_choker, set_round_time, [1000000]), 761 | ok = rpc:call(SNode, etorrent_choker, set_upload_slots, [0, 0]), 762 | ok = rpc:call(SNode, etorrent_peer_control, choke, [SPeerPid]), 763 | 764 | receive 765 | {Ref, done} -> error(marked_chocked_but_unchocked) 766 | after 767 | 20*1000 -> ct:pal("PASSED"), ok 768 | end. 769 | 770 | partial_downloading() -> 771 | [{require, common_conf, etorrent_common_config}]. 772 | 773 | partial_downloading(Config) -> 774 | io:format("~n======START PARTICAL DOWNLOADING TEST CASE======~n", []), 775 | {Ref, Pid} = {make_ref(), self()}, 776 | LeechNode = ?config(leech_node, Config), 777 | {ok, TorrentID} = rpc:call(LeechNode, 778 | etorrent, start, 779 | [?config(dir_torrent_file, Config), {Ref, Pid}]), 780 | io:format("TorrentID on the leech node is ~p.~n", [TorrentID]), 781 | rpc:call(LeechNode, etorrent_torrent_ctl, skip_file, [TorrentID, 2]), 782 | receive 783 | {Ref, done} -> ok 784 | after 785 | 120*1000 -> exit(timeout_error) 786 | end. 787 | 788 | 789 | checking() -> 790 | [{require, common_conf, etorrent_common_config}]. 791 | 792 | checking(Config) -> 793 | io:format("~n======START CHECKING TEST CASE======~n", []), 794 | {Ref, Pid} = {make_ref(), self()}, 795 | {ok, _} = rpc:call(?config(leech_node, Config), 796 | etorrent, start, 797 | [?config(http_torrent_file, Config), {Ref, Pid}]), 798 | receive 799 | {Ref, done} -> ok 800 | after 801 | 120*1000 -> exit(timeout_error) 802 | end, 803 | sha1_file(?config(src_filename, Config)) 804 | =:= sha1_file(?config(dest_filename, Config)). 805 | 806 | 807 | bep9() -> 808 | [{require, common_conf, etorrent_common_config}]. 809 | 810 | bep9(Config) -> 811 | %% Trackerless, download using infohash 812 | io:format("~n======START SEED AND LEECHING BEP-9 TEST CASE======~n", []), 813 | HexIH = ?config(info_hash_hex, Config), 814 | IntIH = ?config(info_hash_int, Config), 815 | error_logger:info_msg("Infohash is ~p.", [HexIH]), 816 | 817 | LeechNode = ?config(leech_node, Config), 818 | SeedNode = ?config(seed_node, Config), 819 | MiddlemanNode = ?config(middleman_node, Config), 820 | 821 | true = rpc:call(SeedNode, etorrent_config, dht, []), 822 | true = rpc:call(LeechNode, etorrent_config, dht, []), 823 | true = rpc:call(MiddlemanNode, etorrent_config, dht, []), 824 | 825 | timer:sleep(2000), 826 | %% Form a DHT network. 827 | %% etorrent_dht_state:safe_insert_node({127,0,0,1}, 6881). 828 | MiddlemanDhtPort = rpc:call(MiddlemanNode, etorrent_config, dht_port, []), 829 | MiddlemanIP = rpc:call(MiddlemanNode, etorrent_config, listen_ip, []), 830 | ct:pal("DHT-middleman address is ~p:~p.", [MiddlemanIP, MiddlemanDhtPort]), 831 | true = rpc:call(SeedNode, 832 | etorrent_dht_state, safe_insert_node, 833 | [MiddlemanIP, MiddlemanDhtPort]), 834 | true = rpc:call(LeechNode, 835 | etorrent_dht_state, safe_insert_node, 836 | [MiddlemanIP, MiddlemanDhtPort]), 837 | timer:sleep(3000), 838 | io:format("ANNOUNCE FROM SEED~n", []), 839 | ok = rpc:call(SeedNode, etorrent_dht_tracker, trigger_announce, []), 840 | 841 | %% Wait for announce. 842 | timer:sleep(3000), 843 | io:format("SEARCH FROM LEECH~n", []), 844 | 845 | Self = self(), 846 | Ref = make_ref(), 847 | CB = fun() -> Self ! {Ref, done} end, 848 | {ok, _TorrentID} = rpc:call(LeechNode, 849 | etorrent_magnet, download, [{infohash, IntIH}, [{callback, CB}]]), 850 | receive 851 | {Ref, done} -> ok 852 | after 853 | 120*1000 -> exit(timeout_error) 854 | end, 855 | sha1_file(?config(src_filename, Config)) 856 | =:= sha1_file(?config(dest_filename, Config)). 857 | 858 | 859 | find_local_peers() -> 860 | [{require, common_conf, etorrent_common_config}]. 861 | 862 | find_local_peers(Config) -> 863 | %% Trackerless, download using infohash 864 | io:format("~n======START SEED AND LEECHING BEP-26 TEST CASE======~n", []), 865 | HexIH = ?config(info_hash_hex, Config), 866 | IntIH = ?config(info_hash_int, Config), 867 | error_logger:info_msg("Infohash is ~p.", [HexIH]), 868 | 869 | LeechNode = ?config(leech_node, Config), 870 | SeedNode = ?config(seed_node, Config), 871 | 872 | Self = self(), 873 | Ref = make_ref(), 874 | CB = fun() -> Self ! {Ref, done} end, 875 | {ok, _TorrentID} = rpc:call(LeechNode, 876 | etorrent_magnet, download, [{infohash, IntIH}, [{callback, CB}]]), 877 | receive 878 | {Ref, done} -> ok 879 | after 880 | 120*1000 -> exit(timeout_error) 881 | end, 882 | sha1_file(?config(src_filename, Config)) 883 | =:= sha1_file(?config(dest_filename, Config)). 884 | 885 | 886 | 887 | %% Helpers 888 | %% ---------------------------------------------------------------------- 889 | start_opentracker(Dir) -> 890 | ToSpawn = "run_opentracker.sh -i 127.0.0.1 -p 6969 -P 6969", 891 | Spawn = filename:join([Dir, ToSpawn]), 892 | Pid = spawn(fun() -> 893 | Port = open_port({spawn, Spawn}, [binary, stream, eof]), 894 | opentracker_loop(Port, <<>>) 895 | end), 896 | Pid. 897 | 898 | quote(Str) -> 899 | lists:concat(["'", Str, "'"]). 900 | 901 | start_transmission(DataDir, DownDir, Torrent) -> 902 | io:format("Start transmission~n", []), 903 | ToSpawn = ["run_transmission-cli.sh ", quote(Torrent), 904 | " -w ", quote(DownDir), 905 | " -g ", quote(DownDir), 906 | " -p 1780"], 907 | Spawn = filename:join([DataDir, lists:concat(ToSpawn)]), 908 | error_logger:info_report([{spawn, Spawn}]), 909 | Ref = make_ref(), 910 | Self = self(), 911 | Pid = spawn_link(fun() -> 912 | Port = open_port( 913 | {spawn, Spawn}, 914 | [stream, binary, eof, stderr_to_stdout]), 915 | transmission_loop(Port, Ref, Self, <<>>, <<>>) 916 | end), 917 | {Ref, Pid}. 918 | 919 | stop_transmission(Pid) when is_pid(Pid) -> 920 | io:format("Stop transmission~n", []), 921 | Pid ! close, 922 | ok. 923 | 924 | transmission_complete_criterion() -> 925 | % "Seeding, uploading to". 926 | "Verifying local files (0.00%, 100.00% valid)". 927 | 928 | transmission_loop(Port, Ref, ReturnPid, OldBin, OldLine) -> 929 | case binary:split(OldBin, [<<"\r">>, <<"\n">>]) of 930 | [OnePart] -> 931 | receive 932 | {Port, {data, Data}} -> 933 | transmission_loop(Port, Ref, ReturnPid, <>, OldLine); 935 | close -> 936 | port_close(Port); 937 | M -> 938 | error_logger:error_report([received_unknown_msg, M]), 939 | transmission_loop(Port, Ref, ReturnPid, OnePart, OldLine) 940 | end; 941 | [L, Rest] -> 942 | %% Is it a different line? than show it. 943 | [io:format("TRANS: ~s~n", [L]) || L =/= OldLine], 944 | case string:str(binary_to_list(L), transmission_complete_criterion()) of 945 | 0 -> ok; 946 | N when is_integer(N) -> 947 | ReturnPid ! {Ref, done} 948 | end, 949 | transmission_loop(Port, Ref, ReturnPid, Rest, L) 950 | end. 951 | 952 | opentracker_loop(Port, OldBin) -> 953 | case binary:split(OldBin, [<<"\r">>, <<"\n">>]) of 954 | [OnePart] -> 955 | receive 956 | {Port, {data, Data}} -> 957 | opentracker_loop(Port, <>); 958 | close -> 959 | port_close(Port); 960 | M -> 961 | error_logger:error_report([received_unknown_msg, M]), 962 | opentracker_loop(Port, OnePart) 963 | end; 964 | [L, Rest] -> 965 | io:format("TRACKER: ~s~n", [L]), 966 | opentracker_loop(Port, Rest) 967 | end. 968 | 969 | stop_opentracker(Pid) -> 970 | Pid ! close. 971 | 972 | ensure_torrent_file(Fn, TorrentFn) -> 973 | case filelib:is_regular(TorrentFn) of 974 | true -> ok; 975 | false -> etorrent_mktorrent:create(Fn, undefined, TorrentFn) 976 | end, 977 | ok. 978 | 979 | ensure_torrent_file(Fn, TorrentFn, AnnounceUrl) -> 980 | case filelib:is_regular(TorrentFn) of 981 | true -> ok; 982 | false -> etorrent_mktorrent:create(Fn, AnnounceUrl, TorrentFn) 983 | end, 984 | ok. 985 | 986 | ensure_random_file(Fn) -> 987 | case filelib:is_regular(Fn) of 988 | true -> ok; 989 | false -> create_random_file(Fn) 990 | end. 991 | 992 | create_random_file(FName) -> 993 | Bin = crypto:rand_bytes(30*1024*1024), 994 | file:write_file(FName, Bin). 995 | 996 | 997 | ensure_random_dir(DName) -> 998 | case filelib:is_dir(DName) of 999 | true -> ok; 1000 | false -> file:make_dir(DName) 1001 | end, 1002 | File1 = filename:join(DName, "xyz.bin"), 1003 | File2 = filename:join(DName, "abc.bin"), 1004 | ensure_random_file(File1), 1005 | ensure_random_file(File2), 1006 | ok. 1007 | 1008 | %% Create a broken version of Fn, if it do not exist. 1009 | ensure_broken_file(Fn, BrokenFn) -> 1010 | case filelib:is_regular(BrokenFn) of 1011 | true -> ok; 1012 | false -> file:copy(Fn, BrokenFn), break_file(BrokenFn) 1013 | end. 1014 | 1015 | 1016 | break_file(BrokenFn) -> 1017 | random:seed(now()), 1018 | {ok, Fd} = file:open(BrokenFn, [write, read, binary]), 1019 | {ok, TotalSize} = file:position(Fd, eof), 1020 | %% Modify 10 chunks in the file. 1021 | write_bad_chunks(Fd, TotalSize, 10), 1022 | file:close(Fd). 1023 | 1024 | 1025 | write_bad_chunks(_Fd, _TotalSize, 0) -> 1026 | ok; 1027 | write_bad_chunks(Fd, TotalSize, N) -> 1028 | write_bad_chunk(Fd, TotalSize), 1029 | write_bad_chunks(Fd, TotalSize, N-1). 1030 | 1031 | write_bad_chunk(Fd, TotalSize) -> 1032 | %% Len :: 1 .. 16#FFFF. 1033 | Len = random:uniform(16#FFFF), 1034 | From = random:uniform(TotalSize - Len), 1035 | ct:pal("write_bad_chunk of length ~p from ~p.", [Len, From]), 1036 | file:pwrite(Fd, From, crypto:rand_bytes(Len)). 1037 | 1038 | 1039 | 1040 | 1041 | sha1_file(F) -> 1042 | Ctx = crypto:sha_init(), 1043 | {ok, FD} = file:open(F, [read,binary,raw]), 1044 | FinCtx = sha1_round(FD, file:read(FD, 1024*1024), Ctx), 1045 | crypto:sha_final(FinCtx). 1046 | 1047 | sha1_round(_FD, eof, Ctx) -> 1048 | Ctx; 1049 | sha1_round(FD, {ok, Data}, Ctx) -> 1050 | sha1_round(FD, file:read(FD, 1024*1024), crypto:sha_update(Ctx, Data)). 1051 | 1052 | 1053 | del_r(File) -> 1054 | case filelib:is_dir(File) of 1055 | true -> del_dir_r(File); 1056 | false -> file:delete(File) 1057 | end. 1058 | 1059 | del_dir_r(Dir) -> 1060 | {ok, Files} = file:list_dir(Dir), 1061 | [ok = del_r(filename:join(Dir, X)) || X <- Files], 1062 | %% Delete the empty directory 1063 | ok = file:del_dir(Dir), 1064 | ok. 1065 | 1066 | 1067 | prepare_node(Node) -> 1068 | io:format("Prepare node ~p.~n", [Node]), 1069 | rpc:call(Node, code, set_path, [code:get_path()]), 1070 | true = rpc:call(Node, erlang, unregister, [user]), 1071 | IOProxy = spawn(Node, spawn_io_proxy()), 1072 | true = rpc:call(Node, erlang, register, [user, IOProxy]), 1073 | Handlers = lager_handlers(Node), 1074 | ok = rpc:call(Node, application, load, [lager]), 1075 | ok = rpc:call(Node, application, set_env, [lager, handlers, Handlers]), 1076 | ok = rpc:call(Node, application, start, [lager]), 1077 | ok. 1078 | 1079 | spawn_io_proxy() -> 1080 | User = group_leader(), 1081 | fun() -> io_proxy(User) end. 1082 | 1083 | io_proxy(Pid) -> 1084 | receive 1085 | Mess -> Pid ! Mess, io_proxy(Pid) 1086 | end. 1087 | 1088 | lager_handlers(NodeName) -> 1089 | % [Node|_] = string:tokens(atom_to_list(NodeName), "@"), 1090 | [A,B|_] = atom_to_list(NodeName), 1091 | Node = [A,B], 1092 | Format = [Node, "> ", "[", time, "] [",severity,"] ", 1093 | {pid, [pid, " "], ""}, {module, [module, ":", line, " "], ""}, 1094 | message, "\n"], 1095 | [{lager_console_backend, [debug, {lager_default_formatter, Format}]}]. 1096 | 1097 | 1098 | %% Returns torrent_id. 1099 | wait_torrent_registration(Node, BinIH) -> 1100 | case rpc:call(Node, etorrent_table, get_torrent, [{infohash, BinIH}]) of 1101 | {value, Props} -> 1102 | proplists:get_value(id, Props); 1103 | not_found -> timer:sleep(500), wait_torrent_registration(Node, BinIH) 1104 | end. 1105 | 1106 | %% Returns pid of the conrol process. 1107 | wait_peer_registration(Node, TorrentID, PeerId) -> 1108 | case rpc:call(Node, etorrent_table, get_peer, [{peer_id, TorrentID, PeerId}]) of 1109 | {value, Props} -> 1110 | proplists:get_value(pid, Props); 1111 | not_found -> 1112 | timer:sleep(500), wait_peer_registration(Node, TorrentID, PeerId) 1113 | end. 1114 | 1115 | hex_to_bin_hash(HexIH) -> 1116 | IntIH = list_to_integer(HexIH, 16), 1117 | <>. 1118 | 1119 | %% Convert a literal infohash to integer. 1120 | hex_to_int_hash(X) -> 1121 | list_to_integer(X, 16). 1122 | 1123 | 1124 | copy_to(SrcFileName, DestDirName) -> 1125 | SrcBaseName = filename:basename(SrcFileName), 1126 | DestFileName = filename:join([DestDirName, SrcBaseName]), 1127 | file:copy(SrcFileName, DestFileName). 1128 | 1129 | 1130 | %% Recursively copy directories 1131 | -spec copy_r(file:filename(), file:filename()) -> ok. 1132 | copy_r(From, To) -> 1133 | {ok, Files} = file:list_dir(From), 1134 | [ok = copy_r(From, To, X) || X <- Files], 1135 | ok. 1136 | 1137 | -spec copy_r(list(), list(), list()) -> ok. 1138 | copy_r(From, To, File) -> 1139 | NewFrom = filename:join(From, File), 1140 | NewTo = filename:join(To, File), 1141 | case filelib:is_dir(NewFrom) of 1142 | true -> 1143 | ok = filelib:ensure_dir(NewTo), 1144 | copy_r(NewFrom, NewTo); 1145 | false -> 1146 | case filelib:is_file(NewFrom) of 1147 | true -> 1148 | ok = filelib:ensure_dir(NewTo), 1149 | {ok, _} = file:copy(NewFrom, NewTo), 1150 | ok; 1151 | false -> ok 1152 | end 1153 | end. 1154 | 1155 | 1156 | stop_app(Node) -> 1157 | ok = rpc:call(Node, etorrent, stop_app, []). 1158 | 1159 | start_app(Node, AppConfig) -> 1160 | ok = rpc:call(Node, etorrent, start_app, [AppConfig]). 1161 | 1162 | 1163 | enable_dht(AppConfig) -> 1164 | [{dht, true}|AppConfig]. 1165 | 1166 | enable_mdns(AppConfig) -> 1167 | [{mdns, true}|AppConfig]. 1168 | 1169 | %% Returns true, if files are equal. 1170 | compare_file_contents(Fn1, Fn2) -> 1171 | {ok, Fd1} = file:open(Fn1, [binary, read]), 1172 | {ok, Fd2} = file:open(Fn2, [binary, read]), 1173 | IsEqual = compare_file_contents_1(Fd1, Fd2, 0, 100000), 1174 | file:close(Fd1), 1175 | file:close(Fd2), 1176 | IsEqual. 1177 | 1178 | compare_file_contents_1(Fd1, Fd2, Offset, ChunkSize) -> 1179 | case {file:pread(Fd1, Offset, ChunkSize), 1180 | file:pread(Fd2, Offset, ChunkSize)} of 1181 | {eof, eof} -> true; 1182 | {{ok, X}, {ok, X}} -> compare_file_contents_1(Fd1, Fd2, Offset+ChunkSize, ChunkSize); 1183 | {{ok, _}, {ok, _}} -> false 1184 | end. 1185 | -------------------------------------------------------------------------------- /test/etorrent_SUITE_data/run_opentracker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | (cat && kill 0) | opentracker $* 4 | -------------------------------------------------------------------------------- /test/etorrent_SUITE_data/run_transmission-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | (cat && kill 0) | transmission-cli $* 4 | -------------------------------------------------------------------------------- /test/etorrent_SUITE_data/transmission/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "alt-speed-down": 50, 3 | "alt-speed-enabled": false, 4 | "alt-speed-time-begin": 540, 5 | "alt-speed-time-day": 127, 6 | "alt-speed-time-enabled": false, 7 | "alt-speed-time-end": 1020, 8 | "alt-speed-up": 50, 9 | "bind-address-ipv4": "0.0.0.0", 10 | "bind-address-ipv6": "::", 11 | "blocklist-enabled": false, 12 | "blocklist-url": "http://www.example.com/blocklist", 13 | "cache-size-mb": 4, 14 | "dht-enabled": false, 15 | "download-dir": ".", 16 | "encryption": 1, 17 | "idle-seeding-limit": 30, 18 | "idle-seeding-limit-enabled": false, 19 | "incomplete-dir": "/home/jlouis/Downloads", 20 | "incomplete-dir-enabled": false, 21 | "lazy-bitfield-enabled": true, 22 | "lpd-enabled": false, 23 | "message-level": 5, 24 | "open-file-limit": 32, 25 | "peer-congestion-algorithm": "", 26 | "peer-limit-global": 240, 27 | "peer-limit-per-torrent": 60, 28 | "peer-port": 51413, 29 | "peer-port-random-high": 65535, 30 | "peer-port-random-low": 49152, 31 | "peer-port-random-on-start": false, 32 | "peer-socket-tos": "default", 33 | "pex-enabled": false, 34 | "port-forwarding-enabled": false, 35 | "preallocation": 1, 36 | "prefetch-enabled": 1, 37 | "ratio-limit": 2, 38 | "ratio-limit-enabled": false, 39 | "rename-partial-files": true, 40 | "rpc-authentication-required": false, 41 | "rpc-bind-address": "0.0.0.0", 42 | "rpc-enabled": false, 43 | "rpc-password": "{8e9b4d6222faf86bf3a8046e73d00d37a1ae6959Y0kPqg3w", 44 | "rpc-port": 9091, 45 | "rpc-url": "/transmission/", 46 | "rpc-username": "", 47 | "rpc-whitelist": "127.0.0.1", 48 | "rpc-whitelist-enabled": true, 49 | "script-torrent-done-enabled": false, 50 | "script-torrent-done-filename": "", 51 | "speed-limit-down": 100, 52 | "speed-limit-down-enabled": false, 53 | "speed-limit-up": 100, 54 | "speed-limit-up-enabled": false, 55 | "start-added-torrents": true, 56 | "trash-original-torrent-files": false, 57 | "umask": 18, 58 | "upload-slots-per-torrent": 14 59 | } 60 | -------------------------------------------------------------------------------- /tools/graph: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env escript 2 | % -*- Mode: Erlang; -*- 3 | -import(lists, [prefix/2]). 4 | 5 | a2l(Atom) -> 6 | atom_to_list(Atom). 7 | 8 | write_to(Dir, Filename, Prefix) -> 9 | xref:start(s), 10 | xref:add_directory(s, Dir), 11 | {ok, Calls} = xref:q(s, "XC"), 12 | 13 | 14 | AllMods = lists:usort([{a2l(From), a2l(To)} 15 | || {{From,_,_}, {To,_,_}} <- Calls, 16 | From =/= To]), 17 | io:format("~p~n", [AllMods]), 18 | PrefMods = prefixed(Prefix, AllMods), 19 | Mods = ordsets:from_list(PrefMods), 20 | 21 | {Remaining, Clusters} = build_cluster(Mods, 22 | [ 23 | "etorrent_torrent", 24 | "etorrent_dht", 25 | "etorrent_peer", 26 | "etorrent_upnp", 27 | "etorrent_http", 28 | "etorrent_udp", 29 | "etorrent_io" 30 | ], []), 31 | Graph = graph(Remaining), 32 | 33 | file:write_file( 34 | Filename, 35 | ["digraph G { ", $\n, 36 | header(), 37 | Clusters, 38 | Graph, 39 | [" }", $\n]]). 40 | 41 | prefixed(P, All) -> 42 | [{F, T} || {F, T} <- All, 43 | prefix(P, F), prefix(P, T)]. 44 | 45 | graph(Mods) -> 46 | [[$\t, From, " -> ", To, " ; ", $\n] || {From, To} <- Mods]. 47 | 48 | build_cluster(Mods, [], Acc) -> {Mods, Acc}; 49 | build_cluster(Mods, [Cls | R], Acc) -> 50 | NM = prefixed(Cls, Mods), 51 | Txt = ["subgraph cluster_", Cls, " {", 52 | "style=rounded; penwidth=3; " 53 | "label = ", Cls, $;, 54 | "color = deepskyblue4;", 55 | "fontsize = 14", 56 | graph(NM), $}, $\n], 57 | build_cluster(Mods -- NM, R, [Txt | Acc]). 58 | 59 | header() -> 60 | ["node [fontname=\"URW Gothic L\",fontsize=12,shape=plaintext,labelfontname=Helvetica];", 61 | "rankdir=LR; labeljust = l; nodesep=1.2; ranksep=1.2; ", 62 | "labelloc = t;", 63 | "fontsize = 24;", 64 | "fontname=\"URW Gothic L\";", 65 | "concentrate=true;", 66 | "label = \"Etorrent dependency graph\""]. 67 | 68 | main([Dir, Filename, Prefix]) -> 69 | write_to(Dir, Filename, Prefix). 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | --------------------------------------------------------------------------------