├── .gitignore
├── README.rst
├── margay.conf
├── margay.sql
├── margay
├── __init__.py
├── config.py
├── database.py
├── main.py
├── schedule.py
├── site_comm.py
├── stats.py
├── structs.py
├── util.py
└── worker.py
├── runner.py
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | venv/
2 | .DS_Store
3 | .idea/
4 | margay/__pycache__/
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Margay
2 | ======
3 |
4 | Margay is a BitTorrent tracker written in Python (though intended to be compiled with Cython) for the Gazelle project.
5 | The first release is aimed to be 1-to-1 translation of Ocelot into Python to be then stress tested to see a comparison
6 | of efficiency between the two projects (with Margay then having the edge in developer productivity).
7 |
8 | Dependencies
9 | ------------
10 | * Python 3.6
11 | * `aiohttp `_
12 | * `bencode.py `_
13 | * `mysqlclient `_
14 | * `requests `_
15 |
16 | Installation
17 | ------------
18 | After cloning or downloading this repository, navigate to it and run::
19 |
20 | python setup.py install
21 |
22 |
23 | Usage
24 | -----
25 | Running margay is easy from this repo::
26 |
27 | usage: runner.py [-h] [-v] [-d] [-c [CONFIG]] [-V]
28 |
29 | Python BitTorrent tracker
30 |
31 | optional arguments:
32 | -h, --help show this help message and exit
33 | -v, --verbose Be more verbose in the output
34 | -d, --daemonize Run tracker as daemon
35 | -c [CONFIG], --config [CONFIG]
36 | -V, --version show program's version number and exit
37 |
38 | Gazelle
39 | ^^^^^^^
40 | After installing Gazelle, you should be able to point Margay towards that database and things should just work.
41 | Management of torrents, users, tokens, and the whitelist can all be done via the Gazelle site and it will be
42 | communicated to Margay. However, you must make sure that the Gazelle configuration (`classes/config.php`) is configured
43 | to point to where Margay is running and that both Margay and Gazelle have the same passwords configured in their
44 | respective configurations.
45 |
46 | Roadmap:
47 | --------
48 | 1. Develop a "Leopardus Tracker Tester" which would test Ocelot/Margay for compliance with each other as well as benchmark
49 | 2. Use the benchmarks to determine if it's worth developing this further
50 | 3. Investigate dropping aiohttp for `japronto `_ for potential speed-up
51 |
52 | See Also:
53 | ---------
54 | * [pybtracker](https://github.com/elektito/pybtracker)
55 | * [Ocelot](https://github.com/ApolloRIP/Ocelot)
--------------------------------------------------------------------------------
/margay.conf:
--------------------------------------------------------------------------------
1 | # Ocelot config file
2 | # Lines starting with a # are ignored
3 | # A # anywhere else is treated like any other character
4 |
5 | [internal]
6 | listen_port = 34000
7 | max_connections = 128
8 | max_middlemen = 20000
9 | max_read_buffer = 4096
10 | connection_timeout = 10
11 | # Keepalive is mostly useful if the tracker runs behind reverse proxies
12 | keepalive_timeout = 0
13 |
14 | [tracker]
15 | announce_interval = 1800
16 | max_request_size = 4096
17 | numwant_limit = 50
18 | request_log_size = 500
19 |
20 | [mysql]
21 | mysql_host = localhost
22 | mysql_username = gazelle
23 | mysql_password = password
24 | mysql_db = gazelle
25 |
26 | [gazelle]
27 | # The passwords must be 32 characters and match the Gazelle config
28 | report_password = 00000000000000000000000000000000
29 | site_password = 00000000000000000000000000000000
30 |
31 | [timers]
32 | peers_timeout = 7200
33 | del_reason_lifetime = 86400
34 | reap_peers_interval = 1800
35 | schedule_interval = 3
36 |
37 | [logging]
38 | log = true
39 | log_level = info
40 | log_path = /tmp/ocelot
41 |
42 | [debug]
43 | readonly = false
44 |
--------------------------------------------------------------------------------
/margay.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE IF NOT EXISTS `torrents` (
2 | `ID` int(10) NOT NULL AUTO_INCREMENT,
3 | `UserID` int(10) DEFAULT NULL,
4 | `info_hash` blob NOT NULL,
5 | `Leechers` int(6) NOT NULL DEFAULT '0',
6 | `Seeders` int(6) NOT NULL DEFAULT '0',
7 | `last_action` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
8 | `FreeTorrent` enum('0','1','2') NOT NULL DEFAULT '0',
9 | `FreeLeechType` enum('0','1','2','3','4','5','6','7') NOT NULL DEFAULT '0',
10 | `Time` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
11 | `Snatched` int(10) unsigned NOT NULL DEFAULT '0',
12 | `balance` bigint(20) NOT NULL DEFAULT '0',
13 | PRIMARY KEY (`ID`),
14 | UNIQUE KEY `InfoHash` (`info_hash`(40)),
15 | KEY `UserID` (`UserID`),
16 | KEY `Seeders` (`Seeders`),
17 | KEY `Leechers` (`Leechers`),
18 | KEY `Snatched` (`Snatched`),
19 | KEY `last_action` (`last_action`),
20 | KEY `Time` (`Time`),
21 | KEY `FreeTorrent` (`FreeTorrent`)
22 | ) ENGINE=InnoDB CHARSET utf8;
23 |
24 | CREATE TABLE IF NOT EXISTS `users_freeleeches` (
25 | `UserID` int(10) NOT NULL,
26 | `TorrentID` int(10) NOT NULL,
27 | `Time` datetime NOT NULL,
28 | `Expired` tinyint(1) NOT NULL DEFAULT '0',
29 | `Downloaded` bigint(20) NOT NULL DEFAULT '0',
30 | `Uses` int(10) NOT NULL DEFAULT '1',
31 | PRIMARY KEY (`UserID`,`TorrentID`),
32 | KEY `Time` (`Time`),
33 | KEY `Expired_Time` (`Expired`,`Time`)
34 | ) ENGINE=InnoDB CHARSET utf8;
35 |
36 | CREATE TABLE IF NOT EXISTS `users_main` (
37 | `ID` int(10) unsigned NOT NULL AUTO_INCREMENT,
38 | `LastLogin` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
39 | `LastAccess` datetime NOT NULL DEFAULT '0000-00-00 00:00:00',
40 | `IP` varchar(15) NOT NULL DEFAULT '0.0.0.0',
41 | `Uploaded` bigint(20) unsigned NOT NULL DEFAULT '0',
42 | `Downloaded` bigint(20) unsigned NOT NULL DEFAULT '0',
43 | `BonusPoints` float(20, 5) NOT NULL DEFAULT '0',
44 | `Enabled` enum('0','1','2') NOT NULL DEFAULT '0',
45 | `Visible` enum('1','0') NOT NULL DEFAULT '1',
46 | `can_leech` tinyint(4) NOT NULL DEFAULT '1',
47 | `torrent_pass` char(32) NOT NULL,
48 | `FLTokens` int(10) NOT NULL DEFAULT '0',
49 | PRIMARY KEY (`ID`),
50 | KEY `LastAccess` (`LastAccess`),
51 | KEY `IP` (`IP`),
52 | KEY `Uploaded` (`Uploaded`),
53 | KEY `Downloaded` (`Downloaded`),
54 | KEY `Enabled` (`Enabled`),
55 | KEY `torrent_pass` (`torrent_pass`)
56 | ) ENGINE=InnoDB CHARSET utf8;
57 |
58 | CREATE TABLE IF NOT EXISTS `xbt_client_whitelist` (
59 | `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
60 | `peer_id` varchar(20) DEFAULT NULL,
61 | `vstring` varchar(200) DEFAULT '',
62 | PRIMARY KEY (`id`),
63 | UNIQUE KEY `peer_id` (`peer_id`)
64 | ) ENGINE=InnoDB CHARSET utf8;
65 |
66 | CREATE TABLE IF NOT EXISTS `xbt_files_history` (
67 | `uid` int(11) NOT NULL,
68 | `fid` int(11) NOT NULL,
69 | `seedtime` int(11) NOT NULL DEFAULT '0',
70 | `downloaded` bigint(20) NOT NULL DEFAULT '0',
71 | `uploaded` bigint(20) NOT NULL DEFAULT '0'
72 | ) ENGINE=InnoDB CHARSET utf8;
73 |
74 | CREATE TABLE IF NOT EXISTS `xbt_files_users` (
75 | `uid` int(11) NOT NULL,
76 | `active` tinyint(1) NOT NULL DEFAULT '1',
77 | `announced` int(11) NOT NULL DEFAULT '0',
78 | `completed` tinyint(1) NOT NULL DEFAULT '0',
79 | `downloaded` bigint(20) NOT NULL DEFAULT '0',
80 | `remaining` bigint(20) NOT NULL DEFAULT '0',
81 | `uploaded` bigint(20) NOT NULL DEFAULT '0',
82 | `upspeed` int(10) unsigned NOT NULL DEFAULT '0',
83 | `downspeed` int(10) unsigned NOT NULL DEFAULT '0',
84 | `corrupt` bigint(20) NOT NULL DEFAULT '0',
85 | `timespent` int(10) unsigned NOT NULL DEFAULT '0',
86 | `useragent` varchar(51) NOT NULL DEFAULT '',
87 | `connectable` tinyint(4) NOT NULL DEFAULT '1',
88 | `peer_id` binary(20) NOT NULL DEFAULT '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0',
89 | `fid` int(11) NOT NULL,
90 | `mtime` int(11) NOT NULL DEFAULT '0',
91 | `ip` varchar(15) NOT NULL DEFAULT '',
92 | PRIMARY KEY (`peer_id`,`fid`,`uid`),
93 | KEY `remaining_idx` (`remaining`),
94 | KEY `fid_idx` (`fid`),
95 | KEY `mtime_idx` (`mtime`),
96 | KEY `uid_active` (`uid`,`active`)
97 | ) ENGINE=InnoDB CHARSET utf8;
98 |
99 | CREATE TABLE IF NOT EXISTS `xbt_snatched` (
100 | `uid` int(11) NOT NULL DEFAULT '0',
101 | `tstamp` int(11) NOT NULL,
102 | `fid` int(11) NOT NULL,
103 | `IP` varchar(15) NOT NULL,
104 | `seedtime` int(11) NOT NULL DEFAULT '0',
105 | KEY `fid` (`fid`),
106 | KEY `tstamp` (`tstamp`),
107 | KEY `uid_tstamp` (`uid`,`tstamp`)
108 | ) ENGINE=InnoDB CHARSET utf8;
--------------------------------------------------------------------------------
/margay/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'itismadness'
2 | __version__ = '0.1.0'
3 |
--------------------------------------------------------------------------------
/margay/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Configuration Class
3 | """
4 |
5 | from configparser import ConfigParser
6 | import logging
7 |
8 |
9 | class Config(object):
10 | def __init__(self, config_file=None, daemonize=False):
11 | self.config = {
12 | 'internal': {
13 | 'listen_port': 35000,
14 | 'max_connections': 1024,
15 | 'max_middlemen': 20000,
16 | 'max_read_buffer': 4096,
17 | 'connection_timeout': 10,
18 | 'keepalive_timeout': 0,
19 | 'daemonize': daemonize
20 | },
21 | 'tracker': {
22 | 'announce_interval': 1800,
23 | 'max_request_size': 4096,
24 | 'numwant_limit': 50,
25 | 'request_log_size': 500
26 | },
27 | 'timers': {
28 | 'del_reason_lifetime': 86400,
29 | 'peers_timeout': 7200,
30 | 'reap_peers_interval': 1800,
31 | 'schedule_interval': 3
32 | },
33 | # note: host=localhost will cause mysqlclient to use a socket regardless of port,
34 | # use 127.0.0.1 for the host if you're trying to connect to something with a port
35 | 'mysql': {
36 | 'host': '127.0.0.1',
37 | 'db': 'gazelle',
38 | 'user': 'gazelle',
39 | 'passwd': 'password',
40 | 'port': 36000
41 | },
42 | 'gazelle': {
43 | 'site_host': '127.0.0.1',
44 | 'site_path': '',
45 | 'site_password': '00000000000000000000000000000000',
46 | 'report_password': '00000000000000000000000000000000'
47 | },
48 | 'logging': {
49 | 'log': True,
50 | 'log_level': logging.getLevelName(logging.INFO),
51 | 'log_console': True,
52 | 'log_file': False,
53 | 'log_path': '/tmp/margay'
54 | },
55 | 'debug': {
56 | 'readonly': False
57 | }
58 | }
59 | print(self.config)
60 |
61 | if config_file is not None:
62 | config = ConfigParser()
63 | config.read(config_file)
64 | for key in config:
65 | for value in config[key]:
66 | if type(self.config[key][value]) == int:
67 | self.config[key][value] = int(self.config[key][value])
68 | elif type(self.config[key][value]) == bool:
69 | self.config[key][value] = config[key][value] in ('True', 'true', 'On',
70 | 'on')
71 | else:
72 | self.config[key][value] = config[key][value]
73 | if self.config['logging']['log_level'] == 'debug':
74 | self.print()
75 |
76 | def reload(self):
77 | pass
78 |
79 | def print(self):
80 | for key in self.config:
81 | print(self.config[key])
82 | for kkey in self.config[key]:
83 | print(kkey, self.config[key][kkey])
84 |
85 | def __getitem__(self, item):
86 | return self.config[item]
87 |
--------------------------------------------------------------------------------
/margay/database.py:
--------------------------------------------------------------------------------
1 | import binascii
2 | from copy import copy
3 | import logging
4 | from time import time
5 | from typing import Dict
6 | import threading
7 | # noinspection PyPackageRequirements
8 | import MySQLdb
9 |
10 | from .structs import Torrent, User, LeechType
11 | import margay.stats as stats
12 |
13 |
14 | class Database(object):
15 | def __init__(self, settings, readonly=False):
16 | self.logger = logging.getLogger()
17 | self.settings = settings
18 | self.db = self.get_connection()
19 |
20 | self.readonly = readonly
21 |
22 | self.user_buffer = []
23 | self.torrent_buffer = []
24 | self.heavy_peer_buffer = []
25 | self.light_peer_buffer = []
26 | self.snatch_buffer = []
27 | self.token_buffer = []
28 |
29 | self.user_queue = []
30 | self.torrent_queue = []
31 | self.peer_queue = []
32 | self.snatch_queue = []
33 | self.token_queue = []
34 |
35 | self.u_active = self.t_active = self.p_active = self.s_active = self.tok_active = False
36 | self.user_lock = threading.RLock()
37 | self.torrent_lock = threading.RLock()
38 | self.peer_lock = threading.RLock()
39 | self.snatch_lock = threading.RLock()
40 | self.token_lock = threading.RLock()
41 |
42 | self.torrent_list_lock = threading.RLock()
43 | self.user_list_lock = threading.RLock()
44 | self.whitelist_lock = threading.RLock()
45 |
46 | if not self.readonly:
47 | self.logger.info('Clearing xbt_files_users and resetting peer counts...')
48 | self.flush()
49 | self._clear_peer_data()
50 | self.logger.info('done')
51 |
52 | def get_connection(self):
53 | return MySQLdb.connect(host=self.settings['host'], user=self.settings['user'],
54 | passwd=self.settings['passwd'], db=self.settings['db'],
55 | port=self.settings['port'])
56 |
57 | def connected(self):
58 | return self.db is not None
59 |
60 | def load_torrents(self, torrents=None):
61 | if torrents is None:
62 | torrents = dict()
63 | cur_keys = set(torrents.keys())
64 |
65 | cursor = self.db.cursor()
66 | # info_hash is a binary blob and using HEX converts it to a hex string and LCASE lowers it
67 | # so that we don't have to worry about weird cases
68 | cursor.execute('SELECT ID, info_hash, FreeTorrent, Snatched FROM torrents '
69 | 'ORDER BY ID')
70 | with self.torrent_list_lock:
71 | for row in cursor.fetchall():
72 | info_hash = row[1].decode('utf-8', 'replace')
73 | if info_hash == '':
74 | continue
75 | if info_hash not in torrents:
76 | torrents[info_hash] = Torrent(row[0], row[3])
77 | else:
78 | torrents[info_hash].tokened_users.clear()
79 | cur_keys.remove(info_hash)
80 | torrents[info_hash].free_torrent = LeechType.to_enum(row[2])
81 | cursor.close()
82 |
83 | for key in cur_keys:
84 | stats.leechers -= torrents[key].leechers
85 | stats.seeders -= torrents[key].seeders
86 | for leecher in torrents[key].leechers:
87 | leecher.user.leeching -= 1
88 | for seeder in torrents[key].seeders:
89 | seeder.user.seeding -= 1
90 | del torrents[key]
91 |
92 | self.logger.info(f'Loaded {len(torrents)} torrents')
93 | self.load_tokens(torrents)
94 | return torrents
95 |
96 | def load_users(self, users=None):
97 | if users is None:
98 | users = dict()
99 | cur_keys = set(users.keys())
100 |
101 | cursor = self.db.cursor()
102 | cursor.execute("SELECT ID, can_leech, torrent_pass, (Visible='0' OR IP='127.0.0.1') AS "
103 | "Protected FROM users_main WHERE Enabled='1'")
104 | with self.user_list_lock:
105 | for row in cursor.fetchall():
106 | if row[2] not in users:
107 | users[row[2]] = User(row[0], row[1], row[3])
108 | else:
109 | users[row[2]].leech = row[1]
110 | users[row[2]].protect = row[3]
111 | cur_keys.remove(row[2])
112 | cursor.close()
113 |
114 | for key in cur_keys:
115 | del users[key]
116 |
117 | self.logger.info(f'Loaded {len(users)} users')
118 | return users
119 |
120 | def load_tokens(self, torrents: Dict[str, Torrent]):
121 | """
122 |
123 | :param torrents:
124 | :type torrents: Dict[str, Torrent]
125 | :return:
126 | """
127 | cursor = self.db.cursor()
128 | cursor.execute("SELECT uf.UserID, t.info_hash FROM users_freeleeches AS uf "
129 | "JOIN torrents AS t ON t.ID = uf.TorrentID "
130 | "WHERE uf.Expired = '0'")
131 | for row in cursor.fetchall():
132 | info_hash = str(row[1])
133 | torrent = torrents[info_hash]
134 | torrent.tokened_users.append(row[0])
135 | logging.info(f'Loaded {cursor.rownumber} tokens')
136 | cursor.close()
137 |
138 | def load_whitelist(self):
139 | cursor = self.db.cursor()
140 | whitelist = list()
141 | cursor.execute("SELECT peer_id FROM xbt_client_whitelist")
142 | with self.whitelist_lock:
143 | for result in cursor.fetchall():
144 | whitelist.append(result[0])
145 | cursor.close()
146 |
147 | if len(whitelist) == 0:
148 | self.logger.info('Assuming no whitelist desired, disabled')
149 | else:
150 | self.logger.info(f'Loaded {len(whitelist)} clients into the whitelist')
151 | return whitelist
152 |
153 | def record_token(self, user_id, torrent_id, downloaded):
154 | self.token_buffer.append((user_id, torrent_id, downloaded))
155 |
156 | def record_user(self, user_id, uploaded, downloaded):
157 | self.user_buffer.append((user_id, uploaded, downloaded))
158 |
159 | def record_torrent(self, torrent_id, seeders, leechers, snatched, balance):
160 | self.torrent_buffer.append((torrent_id, seeders, leechers, snatched, balance))
161 |
162 | def record_snatch(self, user_id, torrent_id, ipv4, ipv6):
163 | self.snatch_buffer.append((user_id, torrent_id, ipv4, ipv6))
164 |
165 | def record_peer_light(self, user_id, torrent_id, timespent, announced, peer_id):
166 | self.light_peer_buffer.append((user_id, torrent_id, timespent, announced, peer_id,
167 | int(time())))
168 |
169 | def record_peer_heavy(self, user_id, torrent_id, active, uploaded, downloaded, upspeed,
170 | downspeed, remaining, corrupt, timespent, announced, ip, peer_id,
171 | user_agent):
172 | self.heavy_peer_buffer.append((user_id, torrent_id, active, uploaded, downloaded, upspeed,
173 | downspeed, remaining, corrupt, timespent, announced,
174 | ip, peer_id, user_agent, int(time())))
175 |
176 | def flush(self):
177 | self._flush_users()
178 | self._flush_torrents()
179 | self._flush_snatches()
180 | self._flush_peers()
181 | self._flush_tokens()
182 |
183 | def _flush_users(self):
184 | if self.readonly:
185 | self.user_buffer.clear()
186 | return
187 |
188 | with self.user_lock:
189 | if len(self.user_queue) > 0:
190 | self.logger.info(f'User flush queue size: {len(self.user_queue)}, '
191 | f'next query length: {len(str(self.token_queue[0]))}')
192 | if len(self.user_buffer) == 0:
193 | return
194 | self.user_queue.extend(copy(self.user_buffer))
195 | self.user_buffer.clear()
196 | if not self.u_active:
197 | threading.Thread(target=self._do_flush_users).start()
198 |
199 | def _do_flush_users(self):
200 | self.u_active = True
201 | conn = self.get_connection()
202 | while len(self.user_queue) > 0:
203 | cursor = conn.cursor()
204 | cursor.executemany('INSERT INTO users_main (ID, Uploaded, Downloaded) '
205 | 'VALUES(%s, %s, %s) '
206 | 'ON DUPLICATE KEY UPDATE Uploaded = Uploaded + Values(Uploaded), '
207 | 'Downloaded = Downloaded + Values(Downloaded)', self.user_queue[0])
208 | cursor.close()
209 | with self.user_lock:
210 | self.user_queue.pop(0)
211 | self.u_active = False
212 |
213 | def _flush_torrents(self):
214 | if self.readonly:
215 | self.torrent_buffer.clear()
216 | return
217 |
218 | with self.torrent_lock:
219 | if len(self.torrent_queue) > 0:
220 | self.logger.info(f'Torrent flush queue size: {len(self.torrent_queue)}, '
221 | f'next query length: {len(str(self.torrent_queue[0]))}')
222 | if len(self.torrent_buffer) == 0:
223 | return
224 | self.torrent_queue.extend(copy(self.torrent_buffer))
225 | self.torrent_buffer.clear()
226 | if not self.t_active:
227 | threading.Thread(target=self._do_flush_torrents).start()
228 |
229 | def _do_flush_torrents(self):
230 | self.t_active = True
231 | conn = self.get_connection()
232 | while len(self.torrent_queue) > 0:
233 | cursor = conn.cursor()
234 | cursor.executemany('INSERT INTO torrents (ID, Seeders, Leechers, Snatched, Balance) '
235 | 'VALUES (%s, %s, %s, %s, %s) '
236 | 'ON DUPLICATE KEY UPDATE Seeders=VALUES(Seeders), '
237 | 'Leechers=VALUES(Leechers), '
238 | 'Snatched = Snatched + VALUES(Snatched), '
239 | 'Balance=VALUES(Balance), '
240 | 'last_action=IF(VALUES(Seeders) > 0, NOW(), last_action)',
241 | self.torrent_queue[0])
242 | cursor.execute("DELETE FROM torrents WHERE info_hash = ''")
243 | cursor.close()
244 | with self.torrent_lock:
245 | self.torrent_queue.pop(0)
246 | self.t_active = False
247 |
248 | def _flush_snatches(self):
249 | if self.readonly:
250 | self.snatch_buffer.clear()
251 | return
252 |
253 | with self.snatch_lock:
254 | if len(self.snatch_queue) > 0:
255 | self.logger.info(f'Snatch flush queue size: {len(self.snatch_queue)}, '
256 | f'next query length: {len(str(self.snatch_queue[0]))}')
257 | if len(self.snatch_buffer) == 0:
258 | return
259 | self.snatch_queue.extend(copy(self.snatch_buffer))
260 | self.snatch_buffer.clear()
261 | if not self.s_active:
262 | threading.Thread(target=self._do_flush_snatches).start()
263 |
264 | def _do_flush_snatches(self):
265 | self.s_active = True
266 | conn = self.get_connection()
267 | while len(self.snatch_queue) > 0:
268 | cursor = conn.cursor()
269 | cursor.executemany('INSERT INTO xbt_snatched (uid, fid, tstamp, IP) '
270 | 'VALUES (%s, %s, %s, %s)', self.snatch_queue[0])
271 |
272 | cursor.close()
273 | with self.snatch_lock:
274 | self.snatch_queue.pop(0)
275 | self.s_active = False
276 |
277 | def _flush_peers(self):
278 | if self.readonly:
279 | self.heavy_peer_buffer.clear()
280 | self.light_peer_buffer.clear()
281 | return
282 |
283 | with self.peer_lock:
284 | if len(self.peer_queue) > 0:
285 | self.logger.info(f'Heavy peer queue size: {len(self.peer_queue)}, '
286 | f'next query length: {len(str(self.peer_queue[0]))}')
287 | if len(self.heavy_peer_buffer) > 0:
288 | if len(self.peer_queue) > 1000:
289 | self.peer_queue.pop(0)
290 | self.peer_queue.extend(copy(self.heavy_peer_buffer))
291 | self.heavy_peer_buffer.clear()
292 | if len(self.light_peer_buffer) > 0:
293 | if len(self.peer_queue) > 1000:
294 | self.peer_queue.pop(0)
295 | self.peer_queue.extend(copy(self.light_peer_buffer))
296 | self.light_peer_buffer.clear()
297 | if not self.p_active:
298 | threading.Thread(target=self._do_flush_peers).start()
299 |
300 | def _do_flush_peers(self):
301 | self.p_active = True
302 | conn = self.get_connection()
303 |
304 | while len(self.peer_queue) > 0:
305 | cursor = conn.cursor()
306 | if len(self.peer_queue[0]) == 4:
307 | cursor.executemany('INSERT INTO xbt_files_users (uid, fid, timespent, '
308 | 'announced, peer_id, mtime) '
309 | 'VALUES (%s, %s, %s, %s, %s, %s) '
310 | 'ON DUPLICATE KEY UPDATE upspeed=0, downspeed=0, '
311 | 'timespent=VALUES(timespent), announced=VALUES(announced), '
312 | 'mtime=VALUES(mtime)', self.peer_queue[0])
313 | else:
314 | cursor.executemany('INSERT INTO xbt_files_users (uid, fid, active, uploaded, '
315 | 'downloaded, upspeed, downspeed, remaining, corrupt, '
316 | 'timespent, announced, ip, peer_id, useragent, mtime) '
317 | 'VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,'
318 | ' %s, %s) ON DUPLICATE KEY UPDATE active=VALUES(active), '
319 | 'uploaded=VALUES(uploaded), downloaded=VALUES(downloaded), '
320 | 'upspeed=VALUES(upspeed), downspeed=VALUES(downspeed), '
321 | 'remaining=VALUES(remaining), corrupt=VALUES(corrupt), '
322 | 'timespent=VALUES(timespent), announced=VALUES(announced), '
323 | 'mtime=VALUES(mtime)', self.peer_queue[0])
324 | cursor.close()
325 | with self.peer_lock:
326 | self.peer_queue.pop(0)
327 | self.p_active = False
328 |
329 | def _flush_tokens(self):
330 | if self.readonly:
331 | self.token_buffer.clear()
332 | return
333 |
334 | with self.token_lock:
335 | if len(self.token_queue) > 0:
336 | self.logger.info(f'Token flush queue size: {len(self.token_queue)}, '
337 | f'next query length: {len(str(self.token_queue[0]))}')
338 |
339 | if len(self.token_buffer) == 0:
340 | return
341 | self.token_queue.extend(copy(self.token_buffer))
342 | self.token_buffer.clear()
343 | if not self.tok_active:
344 | threading.Thread(target=self._do_flush_tokens).start()
345 |
346 | def _do_flush_tokens(self):
347 | self.tok_active = True
348 | conn = self.get_connection()
349 | while len(self.token_queue) > 0:
350 | cursor = conn.cursor()
351 | cursor.executemany('INSERT INTO users_freeleeches (UserID, TorrentID, Downloaded) '
352 | 'VALUES(%s, %s, %s)', self.token_queue[0])
353 | cursor.close()
354 | with self.token_lock:
355 | self.token_queue.pop(0)
356 | self.tok_active = False
357 |
358 | def _clear_peer_data(self):
359 | self.db.query('TRUNCATE xbt_files_users')
360 | self.db.query('UPDATE torrents SET Seeders = 0, Leechers = 0')
361 |
--------------------------------------------------------------------------------
/margay/main.py:
--------------------------------------------------------------------------------
1 | from argparse import ArgumentParser
2 | import logging
3 | from logging.handlers import TimedRotatingFileHandler
4 | import signal
5 | import sys
6 | import threading
7 |
8 | from . import __version__
9 | from .config import Config
10 | from .database import Database
11 | from .site_comm import SiteComm
12 | from .schedule import Schedule
13 | from .worker import Worker
14 |
15 |
16 | def run():
17 | parser = ArgumentParser(description='Python BitTorrent tracker')
18 | parser.add_argument('-d', '--daemonize', action='store_true', help='Run tracker as daemon')
19 | parser.add_argument('-c', '--config', nargs='?')
20 | parser.add_argument('-V', '--version', action='version', version='%(prog)s ' + __version__)
21 | args = parser.parse_args()
22 | config = Config(args.config, args.daemonize)
23 |
24 | logger = logging.getLogger()
25 | while logger.handlers:
26 | logger.handlers.pop()
27 | log_format = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
28 | if config['logging']['log']:
29 | if config['logging']['log_file']:
30 | file_logger = TimedRotatingFileHandler(config['logging']['log_path'], when='d',
31 | backupCount=5)
32 | file_logger.setFormatter(log_format)
33 | file_logger.setLevel(config['logging']['log_level'])
34 | #logger.addHandler(file_logger)
35 | if config['logging']['log_console']:
36 | console_logger = logging.StreamHandler(sys.stdout)
37 | console_logger.setFormatter(log_format)
38 | console_logger.setLevel(config['logging']['log_level'])
39 | logger.addHandler(console_logger)
40 | else:
41 | logger.addHandler(logging.NullHandler())
42 | logger.setLevel(config['logging']['log_level'])
43 |
44 | database = Database(config['mysql'], config['debug']['readonly'])
45 | schedule = Schedule(config['timers']['schedule_interval'],
46 | config['timers']['reap_peers_interval'],
47 | database)
48 | site_comm = SiteComm(config)
49 | worker = Worker(database, site_comm, config)
50 |
51 | def sig_handler(sig, _):
52 | print("help")
53 | logger = logging.getLogger()
54 | if sig == signal.SIGINT or sig == signal.SIGTERM:
55 | logger.info('Caught SIGINT/SIGTERM')
56 | if worker.shutdown():
57 | raise SystemExit
58 | elif sig == signal.SIGHUP:
59 | logger.info('Reloading config')
60 | config.reload()
61 | # reload various classes
62 | elif sig == signal.SIGUSR1:
63 | logger.info('Reloading from database')
64 | threading.Thread(target=worker.reload_lists)
65 |
66 | signal.signal(signal.SIGINT, sig_handler)
67 | signal.signal(signal.SIGTERM, sig_handler)
68 | signal.signal(signal.SIGHUP, sig_handler)
69 | signal.signal(signal.SIGUSR1, sig_handler)
70 | signal.signal(signal.SIGUSR2, sig_handler)
71 |
72 | try:
73 | worker.create_server(config['internal']['listen_port'])
74 | finally:
75 | schedule.stop()
76 |
--------------------------------------------------------------------------------
/margay/schedule.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from threading import Thread, Timer
3 |
4 | import margay.stats as stats
5 |
6 |
7 | class Schedule(Thread):
8 | def __init__(self, interval, reap_interval, database):
9 | super().__init__()
10 | self.logger = logging.getLogger()
11 | self._timer = None
12 | self.is_running = False
13 | self.counter = 0
14 | self.last_opened_connections = 0
15 | self.last_request_count = 0
16 | self.interval = interval
17 | self.reap_interval = reap_interval
18 | self.database = database
19 | self._reap = self.reap_interval
20 | self.start()
21 |
22 | def start(self):
23 | if not self.is_running:
24 | self._timer = Timer(self.interval, self._run)
25 | self._timer.start()
26 | self.is_running = True
27 |
28 | def stop(self):
29 | self._timer.cancel()
30 | self.is_running = False
31 |
32 | def _run(self):
33 | self.is_running = False
34 | self.start()
35 |
36 | self._reap -= self.interval
37 |
38 | if self.counter % 20 == 0:
39 | self.logger.info(f'{stats.open_connections} open, '
40 | f'{stats.opened_connections} connections ({stats.connection_rate}/s) '
41 | f'{stats.requests} requests ({stats.request_rate}/s)')
42 |
43 | self.last_opened_connections = stats.opened_connections
44 | self.last_request_count = stats.requests
45 |
46 | self.database.flush()
47 |
48 | if self._reap <= 0:
49 | self._reap = self.reap_interval
50 |
51 | self.counter += 1
52 |
--------------------------------------------------------------------------------
/margay/site_comm.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import threading
3 |
4 | import requests
5 |
6 |
7 | class SiteComm(object):
8 | def __init__(self, config):
9 | self.config = config
10 |
11 | self.logger = logging.getLogger()
12 |
13 | self.site_host = ''
14 | self.site_path = ''
15 | self.site_password = ''
16 | self.readonly = False
17 |
18 | self.expire_queue_lock = threading.RLock()
19 | self.token_queue = list()
20 | self.expire_token_buffer = ''
21 | self.t_active = False
22 | self.verbose_flush = False
23 |
24 | self.load_config(self.config)
25 |
26 | def load_config(self, config):
27 | self.site_host = config['gazelle']['site_host']
28 | self.site_path = config['gazelle']['site_path']
29 | self.site_password = config['gazelle']['site_password']
30 | self.readonly = config['debug']['readonly']
31 |
32 | def reload_config(self, config):
33 | self.load_config(config)
34 |
35 | def all_clear(self) -> bool:
36 | return len(self.token_queue) == 0
37 |
38 | def expire_token(self, torrent: int, user: int):
39 | token_pair = f'{user}:{torrent}'
40 | if self.expire_token_buffer != '':
41 | self.expire_token_buffer += ','
42 | self.expire_token_buffer += token_pair
43 | if len(self.expire_token_buffer) > 350:
44 | self.logger.info('Flushing overloaded token buffer')
45 | if not self.readonly:
46 | with self.expire_queue_lock:
47 | self.token_queue.append(self.expire_token_buffer)
48 | self.expire_token_buffer = ''
49 |
50 | def flush_tokens(self) -> None:
51 | if self.readonly:
52 | self.expire_token_buffer = ''
53 | return
54 | with self.expire_queue_lock:
55 | if self.verbose_flush or len(self.token_queue) > 0:
56 | self.logger.info(f'Token expire queue size: {len(self.token_queue)}')
57 | if self.expire_token_buffer == '':
58 | return
59 | self.token_queue.extend(self.expire_token_buffer)
60 | self.expire_token_buffer = ''
61 | if not self.t_active:
62 | threading.Thread(target=self._do_flush_tokens)
63 |
64 | def _do_flush_tokens(self):
65 | self.t_active = True
66 | while len(self.token_queue) > 0:
67 | response = requests.get(f'https://{self.site_host}/tools.php', params={
68 | 'key': self.site_password,
69 | 'type': 'expiretoken',
70 | 'action': 'ocelot',
71 | 'tokens': self.token_queue[0]
72 | })
73 |
74 | if response.status_code == 200:
75 | with self.expire_queue_lock:
76 | self.token_queue.pop(0)
77 | else:
78 | self.logger.error(f'Response returned with status code {response.status_code} '
79 | f'when trying to expire a token!')
80 | self.t_active = False
81 |
--------------------------------------------------------------------------------
/margay/stats.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 | open_connections = 0
4 | opened_connections = 0
5 | connection_rate = 0
6 | requests = 0
7 | request_rate = 0
8 | leechers = 0
9 | seeders = 0
10 | announcements = 0
11 | succ_announcements = 0
12 | scrapes = 0
13 | bytes_read = 0
14 | bytes_written = 0
15 | start_time = int(time.time())
--------------------------------------------------------------------------------
/margay/structs.py:
--------------------------------------------------------------------------------
1 | from collections import OrderedDict
2 | from enum import IntEnum
3 |
4 |
5 | class User(object):
6 | def __init__(self, uid: int, leech: bool, protect: bool):
7 | self.id = uid
8 | self.leech = leech
9 | self.protect = protect
10 | self.leeching = 0
11 | self.seeding = 0
12 | self.deleted = False
13 |
14 |
15 | class Peer(object):
16 | def __init__(self):
17 | self.uploaded = 0
18 | self.downloaded = 0
19 | self.corrupt = 0
20 | self.left = 0
21 | self.last_announced = None
22 | self.first_announced = None
23 | self.announces = 0
24 | self.port = None
25 | self.visible = False
26 | self.invalid_ip = False
27 | self.user = None # type: User
28 | self.ip = None
29 | self.ip_port = ''
30 | self.port = None
31 |
32 |
33 | class Torrent(object):
34 | def __init__(self, tid, completed):
35 | self.id = tid
36 | self.completed = completed
37 | self.balance = 0
38 | self.free_torrent = None
39 | self.last_flushed = 0
40 | self.seeders = OrderedDict() # type: OrderedDict[str, Peer]
41 | self.leechers = OrderedDict() # type: OrderedDict[str, Peer]
42 | self.last_selected_seeder = ''
43 | self.tokened_users = []
44 |
45 |
46 | class ErrorCodes(IntEnum):
47 | DUPE = 0
48 | TRUMP = 1
49 | BAD_FILE_NAMES = 2
50 | BAD_FOLDER_NAMES = 3
51 | BAD_TAGS = 4
52 | BAD_FORMAT = 5
53 | DISCS_MISSING = 6
54 | DISCOGRAPHY = 7
55 | EDITED_LOG = 8
56 | INACCURATE_BITRATE = 9
57 | LOW_BITRATE = 10
58 | MUTT_RIP = 11
59 | BAD_SOURCE = 12
60 | ENCODE_ERRORS = 13
61 | BANNED = 14
62 | TRACKS_MISSING = 15
63 | TRANSCODE = 16
64 | CASSETTE = 17
65 | UNSPLIT_ALBUM = 18
66 | USER_COMPILATION = 19
67 | WRONG_FORMAT = 20
68 | WRONG_MEDIA = 21
69 | AUDIENCE = 22
70 |
71 | @classmethod
72 | def get_del_reason(cls, value):
73 | if value == cls.DUPE:
74 | return 'Dupe'
75 | elif value == cls.TRUMP:
76 | return 'Trump'
77 | elif value == cls.BAD_FILE_NAMES:
78 | return 'Bad File Names'
79 | elif value == cls.BAD_FOLDER_NAMES:
80 | return 'Bad Folder Names'
81 | elif value == cls.BAD_TAGS:
82 | return 'Bad Tags'
83 | elif value == cls.BAD_FORMAT:
84 | return 'Disallowed Format'
85 | elif value == cls.DISCS_MISSING:
86 | return 'Discs Missing'
87 | elif value == cls.DISCOGRAPHY:
88 | return 'Discography'
89 | elif value == cls.EDITED_LOG:
90 | return 'Edited Log'
91 | elif value == cls.INACCURATE_BITRATE:
92 | return 'Inaccurate Bitrate'
93 | elif value == cls.LOW_BITRATE:
94 | return 'Low Bitrate'
95 | elif value == cls.MUTT_RIP:
96 | return 'Mutt Rip'
97 | elif value == cls.BAD_SOURCE:
98 | return 'Disallowed Source'
99 | elif value == cls.ENCODE_ERRORS:
100 | return 'Encode Errors'
101 | elif value == cls.BANNED:
102 | return 'Specifically Banned'
103 | elif value == cls.TRACKS_MISSING:
104 | return 'Tracks Missing'
105 | elif value == cls.TRANSCODE:
106 | return 'Transcode'
107 | elif value == cls.CASSETTE:
108 | return 'Unapproved Cassette'
109 | elif value == cls.UNSPLIT_ALBUM:
110 | return 'Unsplit Album'
111 | elif value == cls.USER_COMPILATION:
112 | return 'User Compilation'
113 | elif value == cls.WRONG_FORMAT:
114 | return 'Wrong Format'
115 | elif value == cls.WRONG_MEDIA:
116 | return 'Wrong Media'
117 | elif value == cls.AUDIENCE:
118 | return 'Audience Recording'
119 | else:
120 | return ''
121 |
122 |
123 | class LeechType(IntEnum):
124 | NORMAL = 0
125 | FREE = 1
126 | NEUTRAL = 2
127 |
128 | @classmethod
129 | def to_enum(cls, value):
130 | for enum in cls:
131 | if value == str(enum.value):
132 | return enum
133 | raise ValueError('Invalid leech type')
134 |
135 |
136 | torrent_list = dict()
137 | user_list = dict()
138 | peer_list = dict()
139 |
--------------------------------------------------------------------------------
/margay/util.py:
--------------------------------------------------------------------------------
1 | import binascii
2 |
3 |
4 | def hex_decode(inp: str) -> str:
5 | out = ''
6 | i = 0
7 | while i < len(inp):
8 | if inp[i] == '%' and (i + 2) < len(inp):
9 | # ord('0') == chr(48)
10 | x = 48
11 |
12 | i += 1
13 | char = ord(inp[i])
14 | if 'a' <= inp[i] <= 'f':
15 | x = (char - 87) << 4
16 | elif 'A' <= inp[i] <= 'F':
17 | x = (char - 55) << 4
18 | elif '0' <= inp[i] <= '9':
19 | x = (char - 48) << 4
20 |
21 | i += 1
22 | char = ord(inp[i])
23 | if 'a' <= inp[i] <= 'f':
24 | x += (char - 87)
25 | elif 'A' <= inp[i] <= 'F':
26 | x += (char - 55)
27 | elif '0' <= inp[i] <= '9':
28 | x += (char - 48)
29 | x = chr(x)
30 | else:
31 | x = inp[i]
32 | out += x
33 | i += 1
34 | return out
35 |
36 |
37 | if __name__ == '__main__':
38 | #a = 'A959693FCAC904B7247537B3327740BFCEAF7851'
39 | #b = b'0xA959693FCAC904B7247537B3327740BFCEAF7851'
40 | #print(a)
41 | #print(b)
42 | test = hex_decode('%a9Yi%3f%ca%c9%04%b7%24u7%b32w%40%bf%ce%afxQ')
43 | print(''.join(hex(ord(c))[2:] for c in test))
44 | #print(binascii.hexlify(bytearray(, 'utf8')))
45 | #print(binascii.hexlify(hex_decode('%A9Yi?%CA%C9%04%B7$u7%B32w@%BF%CE%AFxQ')))
46 | #print(hashlib.sha1(hex_decode('%a9Yi%3f%ca%c9%04%b7%24u7%b32w%40%bf%ce%afxQ').encode('utf-8')).digest())
47 |
48 | """
49 | >>> import urllib
50 | >>> '%a9Yi%3f%ca%c9%04%b7%24u7%b32w%40%bf%ce%afxQ'
51 | '%a9Yi%3f%ca%c9%04%b7%24u7%b32w%40%bf%ce%afxQ'
52 | >>> urllib.unquote_plus(_)
53 | '\xa9Yi?\xca\xc9\x04\xb7$u7\xb32w@\xbf\xce\xafxQ'
54 | >>> _.encode('hex')
55 | 'a959693fcac904b7247537b3327740bfceaf7851'
56 | >>> 'a959693fcac904b7247537b3327740bfceaf7851'
57 | """
--------------------------------------------------------------------------------
/margay/worker.py:
--------------------------------------------------------------------------------
1 | import ipaddress
2 | from enum import Enum, auto
3 | import logging
4 | import re
5 | from time import time
6 | import threading
7 | from typing import Dict, List
8 |
9 | # noinspection PyPackageRequirements
10 | import bencode
11 | from aiohttp import web
12 |
13 | from .structs import ErrorCodes, LeechType, Peer, Torrent, User
14 | import margay.stats as stats
15 |
16 | REGEX = re.compile(r'info_hash=([%a-zA-Z0-9]+)')
17 |
18 |
19 | class Status(Enum):
20 | OPEN = auto()
21 | PAUSED = auto()
22 | CLOSING = auto()
23 |
24 |
25 | class Worker(object):
26 | def __init__(self, database, site_comm, config):
27 | self.logger = logging.getLogger()
28 | self.database = database
29 | self.site_comm = site_comm
30 | self.config = config
31 | self.torrents = dict() # type: Dict[str, Torrent]
32 | self.users = dict() # type: Dict[str, User]
33 | self.whitelist = list() # type: List[str]
34 |
35 | self.del_reasons = dict()
36 | self.del_reasons_lock = threading.RLock()
37 |
38 | self.announce_interval = 0
39 | self.del_reason_lifetime = 0
40 | self.peers_timeout = 0
41 | self.numwant_limit = 0
42 | self.site_password = ''
43 | self.report_password = ''
44 |
45 | self.status = Status.OPEN
46 |
47 | self.reaper_active = False
48 |
49 | self.load_config(self.config)
50 | self.reload_lists()
51 |
52 | def load_config(self, config):
53 | self.announce_interval = config['tracker']['announce_interval']
54 | self.del_reason_lifetime = config['timers']['del_reason_lifetime']
55 | self.peers_timeout = config['timers']['peers_timeout']
56 | self.numwant_limit = config['tracker']['numwant_limit']
57 | self.site_password = config['gazelle']['site_password']
58 | self.report_password = config['gazelle']['report_password']
59 |
60 | def reload_config(self, config):
61 | self.load_config(config)
62 |
63 | def shutdown(self):
64 | if self.status == Status.OPEN:
65 | self.status = Status.CLOSING
66 | self.logger.info('closing tracker... press Ctrl+C again to terminate')
67 | return False
68 | elif self.status == Status.CLOSING:
69 | self.logger.info('shutting down uncleanly')
70 | return True
71 | else:
72 | return False
73 |
74 | def reload_lists(self):
75 | self.status = Status.PAUSED
76 | self.torrents = self.database.load_torrents(self.torrents)
77 | self.users = self.database.load_users(self.users)
78 | self.whitelist = self.database.load_whitelist()
79 | self.status = Status.OPEN
80 |
81 | def create_server(self, port):
82 | app = web.Application()
83 | app.router.add_get('/', self.handler_null)
84 | app.router.add_get('/{passkey}/{action}', self.handler_work)
85 | self.logger.info(f'======== Running on http://127.0.0.1:{port} ========')
86 | web.run_app(app, host='127.0.0.1', print=False, port=port, handle_signals=False)
87 |
88 | async def handler_null(self):
89 | return self.handle_null()
90 |
91 | # noinspection PyMethodMayBeStatic
92 | def handle_null(self):
93 | return web.Response(text='Nothing to see here.')
94 |
95 | def error(self, message):
96 | response = {'failure reason': message, 'min interval': 5400, 'interval': 5400}
97 | return self.response(response)
98 |
99 | def warning(self, message):
100 | return self.response({'warning message': message})
101 |
102 | async def handler_work(self, request):
103 | action = request.match_info.get('action').lower()
104 | actions = ['announce', 'scrape', 'update', 'report']
105 | if action not in actions:
106 | return web.Response(text='Invalid action.')
107 | if len(request.query) == 0:
108 | return self.handle_null()
109 |
110 | if self.status != Status.OPEN:
111 | return self.error('The tracker is temporarily unavailable.')
112 |
113 | passkey = request.match_info.get('passkey')
114 | if action == 'update' or action == 'report':
115 | if passkey != self.site_password:
116 | return self.error('Authentication failure.')
117 |
118 | if action == 'update':
119 | return self.handle_update(request)
120 | elif action == 'report':
121 | return self.handle_report(request)
122 |
123 | with self.database.user_list_lock:
124 | if passkey not in self.users:
125 | return self.error('Passkey not found')
126 | user = self.users[passkey]
127 |
128 | if action == 'announce':
129 | return self.handle_announce(request, user)
130 | elif action == 'scrape':
131 | return self.handle_scrape(request)
132 |
133 | def handle_announce(self, request, user):
134 | params = request.query
135 | with self.database.torrent_list_lock:
136 | tor = self.torrents[params['info_hash']] # type: Torrent
137 | cur_time = int(time())
138 | if params['compact'] != '1':
139 | return self.error('Your client does not support compact announces')
140 |
141 | left = max(0, int(params['left']))
142 | uploaded = max(0, int(params['uploaded']))
143 | downloaded = max(0, int(params['downloaded']))
144 | corrupt = max(0, int(params['corrupt']))
145 |
146 | snatched = 0
147 | active = 1
148 | inserted = False
149 | update_torrent = False
150 | completed_torrent = False
151 | stopped_torrent = False
152 | expire_token = False
153 | peer_changed = False
154 | invalid_ip = False
155 | inc_l = inc_s = dec_l = dec_s = False
156 |
157 | if 'peer_id' not in params:
158 | return self.error('No peer ID')
159 | elif len(params['peer_id']) != 20:
160 | return self.error('Invalid peer ID')
161 |
162 | with self.database.whitelist_lock:
163 | if len(self.whitelist) > 0:
164 | found = False
165 | for client in self.whitelist:
166 | if params['peer_id'].startswith(client):
167 | found = True
168 |
169 | if not found:
170 | return self.error('Your client is not on the whitelist')
171 |
172 | peer_key = params['peer_id'][12 + (tor.id & 7)] + str(user.id) + params['peer_id']
173 |
174 | if params['event'] == 'completed':
175 | completed_torrent = left == 0
176 | elif params['event'] == 'stopped':
177 | stopped_torrent = True
178 | peer_changed = True
179 | update_torrent = True
180 | active = 0
181 |
182 | peer = None # type: Peer
183 | if left > 0:
184 | if peer_key not in tor.leechers:
185 | peer = Peer()
186 | tor.leechers[peer_key] = peer
187 | inserted = True
188 | inc_l = True
189 | else:
190 | peer = tor.leechers[peer_key]
191 | elif completed_torrent:
192 | if peer_key not in tor.leechers:
193 | if peer_key not in tor.seeders:
194 | peer = Peer()
195 | tor.seeders[peer_key] = peer
196 | inserted = True
197 | inc_s = True
198 | else:
199 | peer = tor.seeders[peer_key]
200 | completed_torrent = False
201 | elif peer_key in tor.seeders:
202 | peer = tor.leechers[peer_key]
203 | dec_s = True
204 | else:
205 | if peer_key not in tor.seeders:
206 | if peer_key not in tor.leechers:
207 | peer = Peer()
208 | tor.seeders[peer_key] = peer
209 | inserted = True
210 | else:
211 | peer = tor.leechers[peer_key]
212 | tor.seeders[peer_key] = peer
213 | del tor.leechers[peer_key]
214 | peer_changed = True
215 | dec_l = True
216 |
217 | upspeed = 0
218 | downspeed = 0
219 |
220 | if inserted or params['event'] == 'started':
221 | update_torrent = True
222 | if inserted:
223 | peer.user = user
224 | peer.first_announced = cur_time
225 | peer.last_announced = 0
226 | peer.uploaded = uploaded
227 | peer.downloaded = downloaded
228 | peer.corrupt = corrupt
229 | peer.announces = 1
230 | peer_changed = True
231 | elif uploaded < peer.uploaded or downloaded < peer.downloaded:
232 | peer.announces += 1
233 | peer.uploaded = uploaded
234 | peer.downloaded = downloaded
235 | peer_changed = True
236 | else:
237 | uploaded_change = 0
238 | downloaded_change = 0
239 | corrupt_change = 0
240 | peer.announces += 1
241 |
242 | if uploaded != peer.uploaded:
243 | uploaded_change = uploaded - peer.uploaded
244 | peer.uploaded = uploaded
245 | if downloaded != peer.downloaded:
246 | downloaded_change = downloaded - peer.downloaded
247 | peer.downloaded = downloaded
248 | if corrupt != peer.corrupt:
249 | corrupt_change = corrupt - peer.corrupt
250 | peer.corrupt = corrupt
251 | tor.balance -= corrupt_change
252 | update_torrent = True
253 | peer_changed = peer_changed or uploaded_change or downloaded_change or corrupt_change
254 |
255 | if uploaded_change or downloaded_change:
256 | tor.balance += uploaded_change
257 | tor.balance -= downloaded_change
258 | update_torrent = True
259 |
260 | if cur_time > peer.last_announced:
261 | upspeed = uploaded_change / (cur_time - peer.last_announced)
262 | downspeed = downloaded_change / (cur_time - peer.last_announced)
263 |
264 | tokened = user.id in tor.tokened_users
265 | if tor.free_torrent == LeechType.NEUTRAL:
266 | downloaded_change = 0
267 | uploaded_change = 0
268 | elif tor.free_torrent == LeechType.FREE or tokened:
269 | if tokened:
270 | expire_token = True
271 | self.database.record_token(user.id, tor.id, downloaded_change)
272 | downloaded_change = 0
273 |
274 | if uploaded_change or downloaded_change:
275 | self.database.record_user(user.id, uploaded_change, downloaded_change)
276 |
277 | peer.left = left
278 |
279 | if 'ip' in params:
280 | ip = params['ip']
281 | elif 'ipv4' in params:
282 | ip = params['ipv4']
283 | else:
284 | ip = request.headers['x-forwarded-for'].split(',')[0]
285 |
286 | port = int(params['port'])
287 |
288 | if inserted or port != peer.port or ip != peer.ip:
289 | peer.ip = ip
290 | peer.port = port
291 | parsed = ipaddress.ip_address(ip)
292 | if parsed.is_private or parsed.is_unspecified or parsed.is_reserved or parsed.is_loopback:
293 | invalid_ip = True
294 | if not invalid_ip:
295 | peer.ip_port = parsed.packed + port.to_bytes(length=2, byteorder='big')
296 | if len(peer.ip_port) != 6:
297 | peer.ip_port = ''
298 | invalid_ip = True
299 | peer.invalid_ip = invalid_ip
300 | else:
301 | invalid_ip = peer.invalid_ip
302 |
303 | peer.last_announced = cur_time
304 |
305 | # Peer is visible in the lists if they have their leech priviledges and they're not
306 | # using an invalid IP address
307 | peer.visible = (peer.left == 0 or user.leech) and not peer.invalid_ip
308 |
309 | if peer_changed:
310 | record_ip = '' if user.protect else ip
311 | self.database.record_peer_heavy(user.id, tor.id, active, uploaded, downloaded,
312 | upspeed, downspeed, left, corrupt,
313 | (cur_time - peer.first_announced), peer.announces,
314 | record_ip, params['peer_id'],
315 | request.headers['user-agent'])
316 | else:
317 | self.database.record_peer_light(user.id, tor.id, (cur_time - peer.first_announced),
318 | peer.announces, params['peer_id'])
319 |
320 | numwant = self.numwant_limit
321 | if 'numwant' in params:
322 | numwant = min(params['numwant'], numwant)
323 |
324 | if stopped_torrent:
325 | numwant = 0
326 | if left > 0:
327 | dec_l = True
328 | else:
329 | dec_s = True
330 | elif completed_torrent:
331 | snatched = 1
332 | update_torrent = True
333 | tor.completed += 1
334 |
335 | record_ip = '' if user.protect else ip
336 | self.database.record_snatch(user.id, tor.id, cur_time, record_ip)
337 |
338 | if not inserted:
339 | tor.seeders[peer_key] = peer
340 | del tor.leechers[peer_key]
341 | dec_l = inc_s = True
342 |
343 | if expire_token:
344 | self.site_comm.expire_token(tor.id, user.id)
345 | tor.tokened_users.remove(user.id)
346 | elif not user.leech and left > 0:
347 | numwant = 0
348 |
349 | peers = b''
350 | if numwant > 0:
351 | found_peers = 0
352 | if left > 0:
353 | if len(tor.seeders) > 0:
354 | seeders_list = list(tor.seeders.keys())
355 | i = 0
356 |
357 | # Find out where to begin in the seeder list
358 | if tor.last_selected_seeder != '':
359 | try:
360 | i = seeders_list.index(tor.last_selected_seeder)
361 | i += 1
362 | if i == len(seeders_list):
363 | i = 0
364 | except ValueError:
365 | pass
366 |
367 | # Find out where to end in the seeder list
368 | end = len(seeders_list)
369 | if i != 0:
370 | end = i - 1
371 | if end == 0:
372 | end += 1
373 | i += 1
374 |
375 | while i != end and found_peers < numwant:
376 | if i == len(seeders_list):
377 | i = 0
378 | seeder = tor.seeders[seeders_list[i]]
379 | # Don't show users to themselves or leech disabled users
380 | if seeder.user.deleted or seeder.user.id == user.id or not seeder.visible:
381 | i += 1
382 | continue
383 | found_peers += 1
384 | peers += seeder.ip_port
385 |
386 | if found_peers < numwant and len(tor.leechers) > 1:
387 | for key in tor.leechers:
388 | leecher = tor.leechers[key]
389 | if leecher.user.deleted or leecher.ip_port == peer.ip_port or leecher.user.id == user.id or not leecher.visible:
390 | continue
391 | found_peers += 1
392 | peers += leecher.ip_port
393 | if found_peers >= numwant:
394 | break
395 | elif len(tor.leechers) > 0:
396 | for key in tor.leechers:
397 | leecher = tor.leechers[key]
398 | if leecher.user.id == user.id or not leecher.visible:
399 | continue
400 | found_peers += 1
401 | peers += leecher.ip_port
402 | if found_peers >= numwant:
403 | break
404 |
405 |
406 | stats.succ_announcements += 1
407 | if dec_l or dec_s or inc_l or inc_s:
408 | if inc_l:
409 | peer.user.leeching += 1
410 | stats.leechers += 1
411 | if inc_s:
412 | peer.user.seeding += 1
413 | stats.seeders += 1
414 | if dec_l:
415 | peer.user.leeching -= 1
416 | stats.leechers -= 1
417 | if dec_s:
418 | peer.user.seeding -= 1
419 | stats.seeders -= 1
420 |
421 | if peer.user != user:
422 | if not stopped_torrent:
423 | if left > 0:
424 | user.leeching += 1
425 | peer.user.leeching -= 1
426 | else:
427 | user.seeding += 1
428 | peer.user.seeding -= 1
429 | peer.user = user
430 |
431 | if stopped_torrent:
432 | if left > 0:
433 | del tor.leechers[peer_key]
434 | else:
435 | del tor.seeders[peer_key]
436 |
437 | if update_torrent or tor.last_flushed + 3600 < cur_time:
438 | tor.last_flushed = cur_time
439 |
440 | self.database.record_torrent(tor.id, len(tor.seeders), len(tor.leechers), snatched,
441 | tor.balance)
442 |
443 | if not user.leech and left > 0:
444 | return self.error('Access denied, leeching forbidden')
445 |
446 | response = {
447 | 'complete': len(tor.seeders),
448 | 'downloaded': tor.completed,
449 | 'incomplete': len(tor.leechers),
450 | 'interval': self.announce_interval + min(600, len(tor.seeders)), # ensure a more even distribution of announces/second
451 | 'min interval': self.announce_interval,
452 | 'peers': peers
453 | }
454 |
455 | if invalid_ip:
456 | response['warning message'] = 'Illegal character found in IP address. IPv6 is not ' \
457 | 'supported'
458 |
459 | return web.Response(text=bencode.encode(response))
460 |
461 | def handle_scrape(self, request):
462 | response = {'files': {}}
463 | for infohash in request.query['info_hash']:
464 | if infohash not in self.torrents:
465 | continue
466 | t = self.torrents[infohash]
467 | response['files'][infohash] = {
468 | 'complete': len(t.seeders),
469 | 'incomplete': len(t.leechers),
470 | 'downloaded': t.completed
471 | }
472 | return web.Response(text=bencode.encode(response))
473 |
474 | def handle_update(self, request):
475 | params = request.query
476 | if params['action'] == 'change_passkey':
477 | oldpasskey = params['oldpasskey']
478 | newpasskey = params['newpasskey']
479 | with self.database.user_list_lock:
480 | if oldpasskey not in self.users:
481 | self.logger.warning(f'No user with passkey {oldpasskey} exists when '
482 | f'attempting to change passkey to {newpasskey}')
483 | else:
484 | self.users[newpasskey] = self.users[oldpasskey]
485 | del self.users[oldpasskey]
486 | self.logger.info(f'Changed passkey from {oldpasskey} to {newpasskey} for '
487 | f'user {self.users[newpasskey].id}')
488 | elif params['action'] == 'add_torrent':
489 | info_hash = params['info_hash']
490 | with self.database.torrent_list_lock:
491 | if info_hash not in self.torrents:
492 | torrent = Torrent(params['id'], 0)
493 | else:
494 | torrent = self.torrents[info_hash]
495 | if params['freetorrent'] == '0':
496 | torrent.free_torrent = LeechType.NORMAL
497 | elif params['freetorrent'] == '1':
498 | torrent.free_torrent = LeechType.FREE
499 | else:
500 | torrent.free_torrent = LeechType.NEUTRAL
501 | self.torrents[info_hash] = torrent
502 | self.logger.info(f"Added torrent {torrent.id}. FL: {torrent.free_torrent} "
503 | f"{params['freetorrent']}")
504 | elif params['action'] == 'update_torrent':
505 | info_hash = params['info_hash']
506 | if params['freetorrent'] == '0':
507 | fl = LeechType.NORMAL
508 | elif params['freetorrent'] == '1':
509 | fl = LeechType.FREE
510 | else:
511 | fl = LeechType.NEUTRAL
512 | with self.database.torrent_list_lock:
513 | if info_hash in self.torrents:
514 | self.torrents[info_hash].free_torrent = fl
515 | self.logger.info(f'Updated torrent {self.torrents[info_hash].id} to FL {fl}')
516 | else:
517 | self.logger.warning(f'Failed to find torrent {info_hash} to FL {fl}')
518 | elif params['action'] == 'update_torrents':
519 | # Each decoded infohash is exactly 20 characters long
520 | # TODO: this probably doesn't work and needs more work
521 | info_hashes = params['info_hashes']
522 | if params['freetorrent'] == '0':
523 | fl = LeechType.NORMAL
524 | elif params['freetorrent'] == '1':
525 | fl = LeechType.FREE
526 | else:
527 | fl = LeechType.NEUTRAL
528 | with self.database.torrent_list_lock:
529 | pos = 0
530 | while pos < len(info_hashes):
531 | info_hash = info_hashes[pos:pos+20]
532 | if info_hash in self.torrents:
533 | self.torrents[info_hash].free_torrent = fl
534 | self.logger.info(f'Updated torrent {self.torrents[info_hash].id} '
535 | f'to FL {fl}')
536 | else:
537 | self.logger.warning(f'Failed to find torrent {info_hash} to FL {fl}')
538 | elif params['action'] == 'add_token':
539 | info_hash = params['info_hash']
540 | userid = int(params['userid'])
541 | with self.database.torrent_list_lock:
542 | if info_hash in self.torrents:
543 | self.torrents[info_hash].tokened_users.remove(userid)
544 | else:
545 | self.logger.warning(f'Failed to find torrent to add a token for user {userid}')
546 | elif params['action'] == 'remove_token':
547 | info_hash = params['info_hash']
548 | userid = int(params['userid'])
549 | with self.database.torrent_list_lock:
550 | if info_hash in self.torrents:
551 | self.torrents[info_hash].tokened_users.remove(userid)
552 | else:
553 | self.logger.warning(f'Failed to find torrent {info_hash} to remove token '
554 | f'for user {userid}')
555 | elif params['action'] == 'delete_torrent':
556 | info_hash = params['info_hash']
557 | reason = int(params['reason']) if 'reason' in params else -1
558 | with self.database.torrent_list_lock:
559 | if info_hash in self.torrents:
560 | torrent = self.torrents[info_hash]
561 | self.logger.info(f'Deleting torrent {torrent.id} for the '
562 | f'reason {ErrorCodes.get_del_reason(reason)}')
563 | stats.leechers -= len(torrent.leechers)
564 | stats.seeders -= len(torrent.seeders)
565 | for peer_key in torrent.leechers:
566 | torrent.leechers[peer_key].user.leeching -= 1
567 | for peer_key in torrent.seeders:
568 | torrent.seeders[peer_key].user.seeding -= 1
569 | with self.del_reasons_lock:
570 | self.del_reasons[info_hash] = {'reason': reason, time: int(time())}
571 | del self.torrents[info_hash]
572 | else:
573 | self.logger.warning(f'Failed to find torrent {info_hash} to delete')
574 | elif params['action'] == 'add_user':
575 | passkey = params['passkey']
576 | userid = int(params['id'])
577 | with self.database.user_list_lock:
578 | if passkey not in self.users:
579 | self.users[passkey] = User(userid, True, params['visible'] == '0')
580 | self.logger.info(f'Added user {passkey} with id {userid}')
581 | else:
582 | self.logger.warning(f'Tried to add already known user {passkey} '
583 | f'with id {self.users[passkey].id}')
584 | self.users[passkey].deleted = True
585 | elif params['action'] == 'remove_user':
586 | passkey = params['passkey']
587 | with self.database.user_list_lock:
588 | if passkey in self.users:
589 | self.logger.info(f'Removed user {passkey} with id {self.users[passkey].id}')
590 | self.users[passkey].deleted = True
591 | del self.users[passkey]
592 | elif params['action'] == 'remove_users':
593 | # Each passkey is 32 characters long
594 | passkeys = params['passkeys']
595 | with self.database.user_list_lock:
596 | i = 0
597 | while i < len(passkeys):
598 | passkey = passkeys[i:i+32]
599 | if passkey in self.users:
600 | self.logger.info(f'Removed user {passkey}')
601 | self.users[passkey].deleted = True
602 | del self.users[passkey]
603 | i += 32
604 | elif params['action'] == 'update_user':
605 | passkey = params['passkey']
606 | can_leech = False if params['can_leech'] == '0' else True
607 | protect_ip = True if params['visible'] == '0' else False
608 | with self.database.user_list_lock:
609 | if passkey not in self.users:
610 | self.logger.warning(f'No user with passkey {passkey} found when attempting to '
611 | f'change leeching status!')
612 | else:
613 | self.users[passkey].protect = protect_ip
614 | self.users[passkey].leech = can_leech
615 | self.logger.info(f'Updated user {passkey}')
616 | elif params['action'] == 'add_whitelist':
617 | peer_id = params['peer_id']
618 | with self.database.whitelist_lock:
619 | self.whitelist.append(peer_id)
620 | self.logger.info(f'Whitelisted {peer_id}')
621 | elif params['action'] == 'remove_whitelist':
622 | peer_id = params['peer_id']
623 | with self.database.whitelist_lock:
624 | try:
625 | self.whitelist.remove(peer_id)
626 | except ValueError:
627 | pass
628 | self.logger.info(f'De-whitelisted {peer_id}')
629 | elif params['action'] == 'edit_whitelist':
630 | new_peer_id = params['new_peer_id']
631 | old_peer_id = params['old_peer_id']
632 | with self.database.whitelist_lock:
633 | try:
634 | self.whitelist.remove(old_peer_id)
635 | except ValueError:
636 | pass
637 | self.whitelist.append(new_peer_id)
638 | self.logger.info(f'Edited whitelist item from {old_peer_id} to {new_peer_id}')
639 | elif params['action'] == 'update_announce_interval':
640 | self.announce_interval = int(params['announce_interval'])
641 | self.config['tracker']['announce_interval'] = self.announce_interval
642 | self.logger.info(f'Edited announce interval to {self.announce_interval}')
643 | elif params['action'] == 'info_torrent':
644 | info_hash = params['info_hash']
645 | self.logger.info(f"Info for torrent '{info_hash}'")
646 | with self.database.torrent_list_lock:
647 | if info_hash in self.torrents:
648 | self.logger.info(f'Torrent {self.torrents[info_hash].id}, '
649 | f'freetorrent = {self.torrents[info_hash].free_torrent}')
650 | else:
651 | self.logger.warning(f'Failed to find torrent {info_hash}')
652 |
653 | return web.Response(text='success')
654 |
655 | def handle_report(self, request):
656 | params = request.query
657 | action = params['get']
658 | output = ''
659 | if action == '':
660 | output += "Invalid action\n"
661 | elif action == 'stats':
662 | uptime = int(time()) - stats.start_time
663 | up_d = uptime // 86400
664 | uptime -= up_d * 86400
665 | up_h = uptime // 3600
666 | uptime -= up_h * 3600
667 | up_m = uptime // 60
668 | up_s = uptime - up_m * 60
669 | output += f"Uptime {up_d} days, {up_h:02}:{up_m:02}:{up_s:02}\n" \
670 | f"{stats.opened_connections} connections opened\n" \
671 | f"{stats.open_connections} open connections\n" \
672 | f"{stats.connection_rate} connections/s\n" \
673 | f"{stats.requests} requests handled\n" \
674 | f"{stats.request_rate} requests/s\n" \
675 | f"{stats.succ_announcements} successful announcements\n" \
676 | f"{(stats.announcements - stats.succ_announcements)} failed announcements\n" \
677 | f"{stats.scrapes} scrapes\n" \
678 | f"{stats.leechers} leechers tracked\n" \
679 | f"{stats.seeders} seeders tracked\n" \
680 | f"{stats.bytes_read} bytes read\n" \
681 | f"{stats.bytes_written} bytes written\n"
682 | elif action == 'user':
683 | key = params['key']
684 | if len(key) == 0:
685 | output += "Invalid action\n"
686 | else:
687 | with self.database.user_list_lock:
688 | if key in self.users:
689 | output += f"{self.users[key].leeching} leeching\n" \
690 | f"{self.users[key].seeding} seeding\n"
691 | else:
692 | output += "Invalid action\n"
693 | return web.Response(text=output)
694 |
695 | # noinspection PyMethodMayBeStatic
696 | def response(self, response):
697 | return web.Response(text=bencode.bencode(response))
698 |
699 | def start_reaper(self):
700 | if not self.reaper_active:
701 | threading.Thread(target=self._do_start_reaper)
702 |
703 | def _do_start_reaper(self):
704 | self.reaper_active = True
705 | self.reap_peers()
706 | self.reap_del_reasons()
707 | self.reaper_active = False
708 |
709 | def reap_peers(self):
710 | self.logger.info('Starting peer reaper')
711 | cur_time = int(time())
712 | reaped_l = reaped_s = 0
713 | cleared_torrents = 0
714 | for info_hash in self.torrents:
715 | reaped_this = False
716 | torrent = self.torrents[info_hash]
717 | for peer_key in torrent.leechers:
718 | if torrent.leechers[peer_key].last_announced + self.peers_timeout < cur_time:
719 | with self.database.torrent_list_lock:
720 | torrent.leechers[peer_key].user.leeching -= 1
721 | del torrent.leechers[peer_key]
722 | reaped_this = True
723 | reaped_l += 1
724 | for peer_key in torrent.seeders:
725 | if torrent.seeders[peer_key].last_announced + self.peers_timeout < cur_time:
726 | with self.database.torrent_list_lock:
727 | torrent.seeders[peer_key].user.seeding -= 1
728 | del torrent.seeders[peer_key]
729 | reaped_this = True
730 | reaped_s += 1
731 | if reaped_this and len(torrent.seeders) == 0 and len(torrent.leechers) == 0:
732 | self.database.record_torrent(torrent.id, 0, 0, 0, torrent.balance)
733 | cleared_torrents += 1
734 | if reaped_l > 0 or reaped_s > 0:
735 | stats.leechers -= reaped_l
736 | stats.seeders -= reaped_s
737 | self.logger.info(f'Reaped {reaped_l} leechers and {reaped_s} seeders. '
738 | f'Reset {cleared_torrents} torrents')
739 |
740 | def reap_del_reasons(self):
741 | self.logger.info('Starting del reason reaper')
742 | max_time = int(time()) - self.del_reason_lifetime
743 | reaped = 0
744 | for key in self.del_reasons:
745 | if self.del_reasons[key]['time'] <= max_time:
746 | with self.del_reasons_lock:
747 | del self.del_reasons[key]
748 | reaped += 1
749 |
750 | self.logger.info(f'Reaped {reaped} del reasons')
751 |
--------------------------------------------------------------------------------
/runner.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | from margay.main import run
4 |
5 | if __name__ == '__main__':
6 | run()
7 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from margay import __author__, __version__
3 |
4 | setup(
5 | name='Margay',
6 | author=__author__,
7 | version=__version__,
8 | url='https://github.com/ApolloRIP/Margay',
9 | description='A Python BitTorrent Server',
10 | long_description=open('README.rst').read(),
11 | classifiers=[
12 | 'Development Status :: 2 - Pre-Alpha',
13 | 'Environment :: Console',
14 | 'Programming Language :: Python :: 3.6',
15 | 'Programming Language :: Cython',
16 | 'Intended Audience :: Developers',
17 | 'License :: OSI Approved :: MIT License'
18 | ],
19 | install_requires=[
20 | 'aiohttp',
21 | 'bencode.py',
22 | 'mysqlclient',
23 | 'requests'
24 | ]
25 | )
26 |
--------------------------------------------------------------------------------