├── .dockerignore
├── .github
└── FUNDING.yml
├── .gitignore
├── .vscode
└── settings.json
├── Dockerfile
├── LICENSE
├── Procfile
├── README.md
├── app.json
├── constants
├── base_url.py
└── headers.py
├── docker-compose.yml
├── helper
├── __init__.py
├── asyncioPoliciesFix.py
├── dependencies.py
├── error_messages.py
├── html_scraper.py
├── is_site_available.py
└── uptime.py
├── main.py
├── okteto-stack.yaml
├── render.yaml
├── requirements.txt
├── routers
├── __init__.py
├── home_router.py
└── v1
│ ├── __init__.py
│ ├── catergory_router.py
│ ├── combo_routers.py
│ ├── recent_router.py
│ ├── search_router.py
│ ├── search_url_router.py
│ ├── sites_list_router.py
│ └── trending_router.py
└── torrents
├── __init__.py
├── bitsearch.py
├── glodls.py
├── kickass.py
├── libgen.py
├── limetorrents.py
├── magnet_dl.py
├── nyaa_si.py
├── pirate_bay.py
├── torlock.py
├── torrentProject.py
├── torrent_galaxy.py
├── torrentfunk.py
├── x1337.py
├── your_bittorrent.py
├── yts.py
└── zooqle.py
/.dockerignore:
--------------------------------------------------------------------------------
1 | # created by virtualenv automatically
2 | __pycache__
3 | api-py
4 | .env
5 | function.*
6 | .vscode
7 | .github
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: ryukme # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | otechie: # Replace with a single Otechie username
12 | custom: ["https://www.buymeacoffee.com/ryukmee"] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # created by virtualenv automatically
2 | __pycache__
3 | api-py
4 | .env
5 | function.*
6 | .vscode
7 | test.py
8 | *.log
9 | test.json
10 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "[python]": {
3 | "editor.defaultFormatter": "ms-python.autopep8"
4 | },
5 | "python.formatting.provider": "none"
6 | }
7 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.8
2 | ADD requirements.txt requirements.txt
3 | ADD main.py main.py
4 | RUN pip install -r requirements.txt
5 | COPY . .
6 | CMD ["python" ,"main.py"]
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Neeraj Kumar
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | web: gunicorn -w 4 -k uvicorn.workers.UvicornWorker main:app
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
Torrents Api ✨
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 | An Unofficial API for 1337x, Piratebay, Nyaasi, Torlock, Torrent Galaxy, Zooqle, Kickass, Bitsearch, MagnetDL, Libgen, YTS, Limetorrent, TorrentFunk, Glodls, TorrentProject and YourBittorrent
18 |
19 |
20 |
21 | ## Installation
22 |
23 | ```sh
24 |
25 | # Clone the repo
26 | $ git clone https://github.com/Ryuk-me/Torrent-Api-py
27 |
28 | # Go to the repository
29 | $ cd Torrent-Api-py
30 |
31 | # Install virtualenv
32 | $ pip install virtualenv
33 |
34 | # Create Virtual Env
35 | $ py -3 -m venv api-py
36 |
37 | # Activate Virtual Env [Windows]
38 | $ .\api-py\Scripts\activate
39 |
40 | # Activate Virtual Env [Linux]
41 | $ source api-py/bin/activate
42 |
43 | # Install Dependencies
44 | $ pip install -r requirements.txt
45 |
46 | # Start
47 | $ python main.py
48 |
49 | # (optional) To Use a PROXY, set the HTTP Proxy environment variable
50 | # You can also use a tor proxy using dperson/torproxy:latest
51 | $ export HTTP_PROXY="http://proxy-host:proxy-port"
52 |
53 | # To access API Open any browser/API Testing tool & move to the given URL
54 | $ localhost:8009
55 |
56 | ```
57 |
58 |
59 | ---
60 |
61 | ## Supported Sites
62 |
63 | | Website | Keyword | Url | Cloudfare |
64 | | :------------: | :--------------: | :--------------------------: | :-------: |
65 | | 1337x | `1337x` | https://1337x.to | ❌ |
66 | | Torrent Galaxy | `tgx` | https://torrentgalaxy.to | ❌ |
67 | | Torlock | `torlock` | https://www.torlock.com | ❌ |
68 | | PirateBay | `piratebay` | https://thepiratebay10.org | ❌ |
69 | | Nyaasi | `nyaasi` | https://nyaa.si | ❌ |
70 | | Zooqle | `zooqle` | https://zooqle.com | ❌ |
71 | | KickAss | `kickass` | https://kickasstorrents.to | ❌ |
72 | | Bitsearch | `bitsearch` | https://bitsearch.to | ❌ |
73 | | MagnetDL | `magnetdl` | https://www.magnetdl.com | ✅ |
74 | | Libgen | `libgen` | https://libgen.is | ❌ |
75 | | YTS | `yts` | https://yts.mx | ❌ |
76 | | Limetorrent | `limetorrent` | https://www.limetorrents.pro | ❌ |
77 | | TorrentFunk | `torrentfunk` | https://www.torrentfunk.com | ❌ |
78 | | Glodls | `glodls` | https://glodls.to | ❌ |
79 | | TorrentProject | `torrentproject` | https://torrentproject2.com | ❌ |
80 | | YourBittorrent | `ybt` | https://yourbittorrent.com | ❌ |
81 |
82 | ---
83 |
84 |
85 | Supported Methods and categories
86 |
87 | > If you want to change the default limit site wise [Visit Here](https://github.com/Ryuk-me/Torrent-Api-py/blob/main/helper/is_site_available.py#L39)
88 |
89 |
90 |
91 | ```json
92 |
93 | {
94 | "1337x": {
95 | "trending_available": True,
96 | "trending_category": True,
97 | "search_by_category": True,
98 | "recent_available": True,
99 | "recent_category_available": True,
100 | "categories": ["anime", "music", "games", "tv","apps","documentaries", "other", "xxx", "movies"],
101 | "limit" : 100
102 | },
103 | "torlock": {
104 | "trending_available": True,
105 | "trending_category": True,
106 | "search_by_category": False,
107 | "recent_available": True,
108 | "recent_category_available": True,
109 | "categories": ["anime", "music", "games", "tv","apps", "documentaries", "other", "xxx", "movies", "books", "images"],
110 | "limit" : 50
111 | },
112 | "zooqle": {
113 | "trending_available": False,
114 | "trending_category": False,
115 | "search_by_category": False,
116 | "recent_available": False,
117 | "recent_category_available": False,
118 | "categories": [],
119 | "limit": 30
120 | },
121 | "magnetdl": {
122 | "trending_available": False,
123 | "trending_category": False,
124 | "search_by_category": False,
125 | "recent_available": True,
126 | "recent_category_available": True,
127 | "categories": ["apps", "movies", "music", "games", "tv", "books"],
128 | "limit": 40
129 | },
130 | "tgx": {
131 | "trending_available": True,
132 | "trending_category": True,
133 | "search_by_category": False,
134 | "recent_available": True,
135 | "recent_category_available": True,
136 | "categories": ["anime", "music", "games", "tv",
137 | "apps", "documentaries", "other", "xxx", "movies", "books"],
138 | "limit": 50
139 | },
140 | "nyaasi": {
141 | "trending_available": False,
142 | "trending_category": False,
143 | "search_by_category": False,
144 | "recent_available": True,
145 | "recent_category_available": False,
146 | "categories": [],
147 | "limit": 50
148 |
149 | },
150 | "piratebay": {
151 | "trending_available": True,
152 | "trending_category": False,
153 | "search_by_category": False,
154 | "recent_available": True,
155 | "recent_category_available": True,
156 | "categories": ["tv"],
157 | "limit": 50
158 | },
159 | "bitsearch": {
160 | "trending_available": True,
161 | "trending_category": False,
162 | "search_by_category": False,
163 | "recent_available": False,
164 | "recent_category_available": False,
165 | "categories": [],
166 | "limit": 50
167 | },
168 | "kickass": {
169 | "trending_available": True,
170 | "trending_category": True,
171 | "search_by_category": False,
172 | "recent_available": True,
173 | "recent_category_available": True,
174 | "categories": ["anime", "music", "games", "tv","apps", "documentaries", "other", "xxx", "movies", "books"],
175 | "limit": 50
176 | },
177 | "libgen'": {
178 | "trending_available": False,
179 | "trending_category": False,
180 | "search_by_category": False,
181 | "recent_available": False,
182 | "recent_category_available": False,
183 | "categories": [],
184 | "limit": 25
185 | },
186 | "yts": {
187 | "trending_available": True,
188 | "trending_category": False,
189 | "search_by_category": False,
190 | "recent_available": True,
191 | "recent_category_available": False,
192 | "categories": [],
193 | "limit": 20
194 | },
195 | "limetorrent": {
196 | "trending_available": True,
197 | "trending_category": False,
198 | "search_by_category": False,
199 | "recent_available": True,
200 | "recent_category_available": True,
201 | "categories": ["anime", "music", "games", "tv",
202 | "apps", "other", "movies", "books"], # applications and tv-shows
203 | "limit": 50
204 | },
205 | "torrentfunk": {
206 | "trending_available": True,
207 | "trending_category": True,
208 | "search_by_category": False,
209 | "recent_available": True,
210 | "recent_category_available": True,
211 | "categories": ["anime", "music", "games", "tv",
212 | "apps", "xxx", "movies", "books"], # television # software #adult # ebooks
213 | "limit": 50
214 | },
215 | "glodls": {
216 | "trending_available": True,
217 | "trending_category": False,
218 | "search_by_category": False,
219 | "recent_available": True,
220 | "recent_category_available": False,
221 | "categories": [],
222 | "limit": 45
223 | },
224 | "torrentproject": {
225 | "trending_available": False,
226 | "trending_category": False,
227 | "search_by_category": False,
228 | "recent_available": False,
229 | "recent_category_available": False,
230 | "categories": [],
231 | "limit": 20
232 | },
233 | "ybt": {
234 | "trending_available": True,
235 | "trending_category": True,
236 | "search_by_category": False,
237 | "recent_available": True,
238 | "recent_category_available": True,
239 | "categories": ["anime", "music", "games", "tv",
240 | "apps", "xxx", "movies", "books", "pictures", "other"], # book -> ebooks
241 | "limit": 20
242 | }
243 |
244 | }
245 | ```
246 |
247 |
248 |
249 |
250 | ---
251 |
252 | ## API Endpoints
253 |
254 |
255 | Supported sites list
256 |
257 |
258 | > [`api/v1/sites`](https://torrent-api-py-nx0x.onrender.com/api/v1/sites)
259 |
260 |
261 |
262 |
263 |
264 |
265 | Site Configs
266 |
267 |
268 | > [`api/v1/sites/config`](https://torrent-api-py-nx0x.onrender.com/api/v1/sites/config)
269 |
270 |
271 |
272 |
273 |
274 |
275 | Search
276 |
277 |
278 | > [`api/v1/search`](https://torrent-api-py-nx0x.onrender.com/api/v1/search)
279 |
280 | | Parameter | Required | Type | Default | Example |
281 | | :-------: | :------: | :-----: | :-----: | :------------------------------------------------------: |
282 | | site | ✅ | string | None | `api/v1/search?site=1337x` |
283 | | query | ✅ | string | None | `api/v1/search?site=1337x&query=avengers` |
284 | | limit | ❌ | integer | Default | `api/v1/search?site=1337x&query=avengers&limit=20` |
285 | | page | ❌ | integer | 1 | `api/v1/search?site=1337x&query=avengers&limit=0&page=2` |
286 |
287 |
288 |
289 |
290 |
291 |
292 | Trending
293 |
294 |
295 | > `api/v1/trending`
296 |
297 | | Parameter | Required | Type | Default | Example |
298 | | :-------: | :------: | :-----: | :-----: | :-----------------------------------------------------: |
299 | | site | ✅ | string | None | `api/v1/trending?site=1337x` |
300 | | limit | ❌ | integer | Default | `api/v1/trending?site=1337x&limit=10` |
301 | | category | ❌ | string | None | `api/v1/trending?site=1337x&limit=0&category=tv` |
302 | | page | ❌ | integer | 1 | `api/v1/trending?site=1337x&limit=6&category=tv&page=2` |
303 |
304 |
305 |
306 |
307 |
308 |
309 | Recent
310 |
311 |
312 | > `api/v1/recent`
313 |
314 | | Parameter | Required | Type | Default | Example |
315 | | :-------: | :------: | :-----: | :-----: | :----------------------------------------------------: |
316 | | site | ✅ | string | None | `api/v1/recent?site=1337x` |
317 | | limit | ❌ | integer | Default | `api/v1/recent?site=1337x&limit=7` |
318 | | category | ❌ | string | None | `api/v1/recent?site=1337x&limit=0&category=tv` |
319 | | page | ❌ | integer | 1 | `api/v1/recent?site=1337x&limit=15&category=tv&page=2` |
320 |
321 |
322 |
323 |
324 |
325 |
326 | Search By Category
327 |
328 |
329 | > `api/v1/category`
330 |
331 | | Parameter | Required | Type | Default | Example |
332 | | :-------: | :------: | :-----: | :-----: | :--------------------------------------------------------------------: |
333 | | site | ✅ | string | None | `api/v1/category?site=1337x` |
334 | | query | ✅ | string | None | `api/v1/category?site=1337x&query=avengers` |
335 | | category | ✅ | string | None | `api/v1/category?site=1337x&query=avengers&category=movies` |
336 | | limit | ❌ | integer | Default | `api/v1/category?site=1337x&query=avengers&category=movies&limit=10` |
337 | | page | ❌ | integer | 1 | `api/v1/category?site=1337x&query=avengers&category=tv&limit=0&page=2` |
338 |
339 |
340 |
341 |
342 |
343 |
344 |
345 | Search from all sites
346 |
347 |
348 | > `api/v1/all/search`
349 |
350 | | Parameter | Required | Type | Default | Example |
351 | | :-------: | :------: | :-----: | :-----: | :----------------------------------------: |
352 | | query | ✅ | string | None | `api/v1/all/search?query=avengers` |
353 | | limit | ❌ | integer | Default | `api/v1/all/search?query=avengers&limit=5` |
354 |
355 |
Here limit = 5 will get 5 results from each site.
356 |
357 | > [api/v1/all/search?query=avengers](https://torrent-api-py-nx0x.onrender.com/api/v1/all/search?query=avengers)
358 |
359 | > [api/v1/all/search?query=avengers&limit=5](https://torrent-api-py-nx0x.onrender.com/api/v1/all/search?query=avengers&limit=5)
360 |
361 |
362 |
363 |
364 |
365 |
366 |
367 | Get trending from all sites
368 |
369 |
370 | > `api/v1/all/trending`
371 |
372 | | Parameter | Required | Type | Default | Example |
373 | | :-------: | :------: | :-----: | :-----: | :---------------------------: |
374 | | limit | ❌ | integer | Default | `api/v1/all/trending?limit=2` |
375 |
376 | > [api/v1/all/trending](https://torrent-api-py-nx0x.onrender.com/api/v1/all/trending)
377 |
378 | > [api/v1/all/trending?limit=2](https://torrent-api-py-nx0x.onrender.com/api/v1/all/trending?limit=2)
379 |
380 |
381 |
382 |
383 |
384 |
385 |
386 | Get recent from all sites
387 |
388 |
389 | > `api/v1/all/recent`
390 |
391 | | Parameter | Required | Type | Default | Example |
392 | | :-------: | :------: | :-----: | :-----: | :-------------------------: |
393 | | limit | ❌ | integer | Default | `api/v1/all/recent?limit=2` |
394 |
395 | > [api/v1/all/recent](https://torrent-api-py-nx0x.onrender.com/api/v1/all/recent)
396 |
397 | > [api/v1/all/recent?limit=2](https://torrent-api-py-nx0x.onrender.com/api/v1/all/recent)
398 |
399 |
400 |
401 |
402 | ---
403 |
404 | ## Authentication
405 |
406 | To enable authentication, set your API key in the environment variable `PYTORRENTS_API_KEY`. Clients must include this key in the `x-api-key` header of their requests to authenticate successfully.
407 |
408 | ## Want to Try api ?
409 |
410 | > [api/v1/search?site=1337x&query=eternals](https://torrent-api-py-nx0x.onrender.com/api/v1/search?site=1337x&query=eternals)
411 |
412 |
413 | See response
414 |
415 |
416 | ```json
417 | {
418 | "data": [
419 | {
420 | "name": "Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG",
421 | "size": "1.6 GB",
422 | "date": "Jan. 11th '22",
423 | "seeders": "3674",
424 | "leechers": "983",
425 | "url": "https://1337x.to/torrent/5110228/Eternals-2021-1080p-WEBRip-1600MB-DD5-1-x264-GalaxyRG/",
426 | "uploader": "TGxGoodies",
427 | "screenshot": [
428 | "https://everest.picturedent.org/images/2022/01/11/tmpposter23827.jpg",
429 | "https://everest.picturedent.org/images/2022/01/11/Harone8014.th.jpg",
430 | "https://everest.picturedent.org/images/2022/01/11/Harone31320.th.jpg",
431 | "https://everest.picturedent.org/images/2022/01/11/Harone8129XqiKn.th.jpg",
432 | "https://everest.picturedent.org/images/2022/01/11/Harone27162.th.jpg",
433 | "https://everest.picturedent.org/images/2022/01/11/Harone1352.th.jpg",
434 | "https://everest.picturedent.org/images/2022/01/11/Harone14355.th.jpg"
435 | ],
436 | "category": "Movies",
437 | "files": [
438 | "Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG.mkv (1.6 GB)",
439 | "[TGx]Downloaded from torrentgalaxy.to .txt (0.7 KB)"
440 | ],
441 | "poster": "https://lx1.dyncdn.cc/cdn/02/0251ab7772c031c1130bc92810758cd4.jpg",
442 | "magnet": "magnet:?xt=urn:btih:20F8D7C2942B143E6E2A0FB5562CDE7EE1B17822&dn=Eternals.2021.1080p.WEBRip.1600MB.DD5.1.x264-GalaxyRG&tr=udp://open.stealth.si:80/announce&tr=udp://tracker.tiny-vps.com:6969/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=udp://tracker.torrent.eu.org:451/announce&tr=udp://explodie.org:6969/announce&tr=udp://tracker.cyberia.is:6969/announce&tr=udp://ipv4.tracker.harry.lu:80/announce&tr=udp://p4p.arenabg.com:1337/announce&tr=udp://tracker.birkenwald.de:6969/announce&tr=udp://tracker.moeking.me:6969/announce&tr=udp://opentor.org:2710/announce&tr=udp://tracker.dler.org:6969/announce&tr=udp://9.rarbg.me:2970/announce&tr=https://tracker.foreverpirates.co:443/announce&tr=udp://tracker.opentrackr.org:1337/announce&tr=http://tracker.openbittorrent.com:80/announce&tr=udp://opentracker.i2p.rocks:6969/announce&tr=udp://tracker.internetwarriors.net:1337/announce&tr=udp://tracker.leechers-paradise.org:6969/announce&tr=udp://coppersurfer.tk:6969/announce&tr=udp://tracker.zer0day.to:1337/announce",
443 | "hash": "20F8D7C2942B143E6E2A0FB5562CDE7EE1B17822"
444 | }
445 | ],
446 | "current_page": 1,
447 | "total_pages": 7,
448 | "time": 1.276763677597046,
449 | "total": 20
450 | }
451 | ```
452 |
453 |
454 |
455 |
456 | ---
457 |
458 | ## Donations
459 |
460 | If you feel like showing your appreciation for this project, then how about buying me a coffee?
461 |
462 | [](https://www.buymeacoffee.com/ryukmee)
463 |
464 | ---
465 |
466 | ## DEPLOY
467 |
468 |
469 |
470 |
471 |
472 |
473 |
474 | [](https://heroku.com/deploy)
475 |
--------------------------------------------------------------------------------
/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Torrent-Api-py",
3 | "description": "An Unofficial API for 1337x, Piratebay, Nyaasi, Torlock, Torrent Galaxy, Zooqle, Kickass, Bitsearch, MagnetDL, Libgen, YTS, TorrentFunk, Glodls TorrentProject and YourBittorrent",
4 | "keywords": [
5 | "fast-api",
6 | "python",
7 | "torrent",
8 | "api"
9 | ],
10 | "repository": "https://github.com/Ryuk-me/Torrent-Api-py"
11 | }
--------------------------------------------------------------------------------
/constants/base_url.py:
--------------------------------------------------------------------------------
1 | X1337 = "https://1337x.to"
2 | TGX = "https://torrentgalaxy.to"
3 | TORLOCK = "https://www.torlock.com"
4 | PIRATEBAY = "https://thepiratebay10.org"
5 | NYAASI = "https://nyaa.si"
6 | ZOOQLE = "https://zooqle.com"
7 | KICKASS = "https://kickasstorrents.to"
8 | BITSEARCH = "https://bitsearch.to"
9 | MAGNETDL = "https://www.magnetdl.com"
10 | LIBGEN = "https://libgen.is"
11 | YTS = "https://yts.mx"
12 | LIMETORRENT = "https://www.limetorrents.pro"
13 | TORRENTFUNK = "https://www.torrentfunk.com"
14 | GLODLS = "https://glodls.to"
15 | TORRENTPROJECT = "https://torrentproject2.com"
16 | YOURBITTORRENT = "https://yourbittorrent.com"
17 |
--------------------------------------------------------------------------------
/constants/headers.py:
--------------------------------------------------------------------------------
1 | HEADER_AIO = {
2 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67",
3 | "Cookie": "fencekey=0e31613a539b90e445bbcecafaa5a273",
4 | }
5 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | api-py:
4 | build: .
5 | ports:
6 | - "8009:8009"
--------------------------------------------------------------------------------
/helper/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ryuk-me/Torrent-Api-py/1a302617686b22714eacf60cc8d215bbd5979ffe/helper/__init__.py
--------------------------------------------------------------------------------
/helper/asyncioPoliciesFix.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import sys
3 |
4 |
5 | def decorator_asyncio_fix(func):
6 | def wrapper(*args):
7 | if (
8 | sys.version_info[0] == 3
9 | and sys.version_info[1] >= 8
10 | and sys.platform.startswith("win")
11 | ):
12 | asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
13 | return func(*args)
14 |
15 | return wrapper
16 |
--------------------------------------------------------------------------------
/helper/dependencies.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from fastapi import Security, HTTPException, status
4 | from fastapi.security import APIKeyHeader
5 |
6 |
7 | api_key = os.environ.get("PYTORRENT_API_KEY")
8 | api_key_header = APIKeyHeader(name="X-API-Key", auto_error=False)
9 |
10 | def authenticate_request(
11 | x_api_key: str = Security(api_key_header),
12 | ):
13 | """
14 | Dependency function to authenticate a request with an API key.
15 | """
16 | if api_key and x_api_key != api_key:
17 | raise HTTPException(
18 | status_code=status.HTTP_403_FORBIDDEN,
19 | detail="Access forbidden: Incorrect credentials."
20 | )
21 |
--------------------------------------------------------------------------------
/helper/error_messages.py:
--------------------------------------------------------------------------------
1 | from fastapi.encoders import jsonable_encoder
2 | from fastapi.responses import JSONResponse
3 |
4 |
5 | def error_handler(status_code, json_message):
6 | return JSONResponse(
7 | status_code=status_code,
8 | content=jsonable_encoder(json_message),
9 | )
10 |
--------------------------------------------------------------------------------
/helper/html_scraper.py:
--------------------------------------------------------------------------------
1 | import os
2 | import asyncio
3 | from .asyncioPoliciesFix import decorator_asyncio_fix
4 | from constants.headers import HEADER_AIO
5 |
6 | HTTP_PROXY = os.environ.get("HTTP_PROXY", None)
7 |
8 |
9 | class Scraper:
10 | @decorator_asyncio_fix
11 | async def _get_html(self, session, url):
12 | try:
13 | async with session.get(url, headers=HEADER_AIO, proxy=HTTP_PROXY) as r:
14 | return await r.text()
15 | except:
16 | return None
17 |
18 | async def get_all_results(self, session, url):
19 | return await asyncio.gather(asyncio.create_task(self._get_html(session, url)))
20 |
--------------------------------------------------------------------------------
/helper/is_site_available.py:
--------------------------------------------------------------------------------
1 | from torrents.bitsearch import Bitsearch
2 | from torrents.glodls import Glodls
3 | from torrents.kickass import Kickass
4 | from torrents.libgen import Libgen
5 | from torrents.limetorrents import Limetorrent
6 | from torrents.magnet_dl import Magnetdl
7 | from torrents.nyaa_si import NyaaSi
8 | from torrents.pirate_bay import PirateBay
9 | from torrents.torlock import Torlock
10 | from torrents.torrent_galaxy import TorrentGalaxy
11 | from torrents.torrentfunk import TorrentFunk
12 | from torrents.torrentProject import TorrentProject
13 | from torrents.x1337 import x1337
14 | from torrents.your_bittorrent import YourBittorrent
15 | from torrents.yts import Yts
16 | from torrents.zooqle import Zooqle
17 |
18 | all_sites = {
19 | "1337x": {
20 | "website": x1337,
21 | "trending_available": True,
22 | "trending_category": True,
23 | "search_by_category": True,
24 | "recent_available": True,
25 | "recent_category_available": True,
26 | "categories": [
27 | "anime",
28 | "music",
29 | "games",
30 | "tv",
31 | "apps",
32 | "documentaries",
33 | "other",
34 | "xxx",
35 | "movies",
36 | ],
37 | "limit": 100,
38 | },
39 | "torlock": {
40 | "website": Torlock,
41 | "trending_available": True,
42 | "trending_category": True,
43 | "search_by_category": False,
44 | "recent_available": True,
45 | "recent_category_available": True,
46 | "categories": [
47 | "anime",
48 | "music",
49 | "games",
50 | "tv",
51 | "apps",
52 | "documentaries",
53 | "other",
54 | "xxx",
55 | "movies",
56 | "books",
57 | "images",
58 | ], # ebooks
59 | "limit": 50,
60 | },
61 | "zooqle": {
62 | "website": Zooqle,
63 | "trending_available": False,
64 | "trending_category": False,
65 | "search_by_category": False,
66 | "recent_available": False,
67 | "recent_category_available": False,
68 | "categories": [],
69 | "limit": 30,
70 | },
71 | "magnetdl": {
72 | "website": Magnetdl,
73 | "trending_available": False,
74 | "trending_category": False,
75 | "search_by_category": False,
76 | "recent_available": True,
77 | "recent_category_available": True,
78 | # e-books
79 | "categories": ["apps", "movies", "music", "games", "tv", "books"],
80 | "limit": 40,
81 | },
82 | "tgx": {
83 | "website": TorrentGalaxy,
84 | "trending_available": True,
85 | "trending_category": True,
86 | "search_by_category": False,
87 | "recent_available": True,
88 | "recent_category_available": True,
89 | "categories": [
90 | "anime",
91 | "music",
92 | "games",
93 | "tv",
94 | "apps",
95 | "documentaries",
96 | "other",
97 | "xxx",
98 | "movies",
99 | "books",
100 | ],
101 | "limit": 50,
102 | },
103 | "nyaasi": {
104 | "website": NyaaSi,
105 | "trending_available": False,
106 | "trending_category": False,
107 | "search_by_category": False,
108 | "recent_available": True,
109 | "recent_category_available": False,
110 | "categories": [],
111 | "limit": 50,
112 | },
113 | "piratebay": {
114 | "website": PirateBay,
115 | "trending_available": True,
116 | "trending_category": False,
117 | "search_by_category": False,
118 | "recent_available": True,
119 | "recent_category_available": True,
120 | "categories": ["tv"],
121 | "limit": 50,
122 | },
123 | "bitsearch": {
124 | "website": Bitsearch,
125 | "trending_available": True,
126 | "trending_category": False,
127 | "search_by_category": False,
128 | "recent_available": False,
129 | "recent_category_available": False,
130 | "categories": [],
131 | "limit": 50,
132 | },
133 | "kickass": {
134 | "website": Kickass,
135 | "trending_available": True,
136 | "trending_category": True,
137 | "search_by_category": False,
138 | "recent_available": True,
139 | "recent_category_available": True,
140 | "categories": [
141 | "anime",
142 | "music",
143 | "games",
144 | "tv",
145 | "apps",
146 | "documentaries",
147 | "other",
148 | "xxx",
149 | "movies",
150 | "books",
151 | ], # television applications
152 | "limit": 50,
153 | },
154 | "libgen": {
155 | "website": Libgen,
156 | "trending_available": False,
157 | "trending_category": False,
158 | "search_by_category": False,
159 | "recent_available": False,
160 | "recent_category_available": False,
161 | "categories": [],
162 | "limit": 25,
163 | },
164 | "yts": {
165 | "website": Yts,
166 | "trending_available": True,
167 | "trending_category": False,
168 | "search_by_category": False,
169 | "recent_available": True,
170 | "recent_category_available": False,
171 | "categories": [],
172 | "limit": 20,
173 | },
174 | "limetorrent": {
175 | "website": Limetorrent,
176 | "trending_available": True,
177 | "trending_category": False,
178 | "search_by_category": False,
179 | "recent_available": True,
180 | "recent_category_available": True,
181 | "categories": [
182 | "anime",
183 | "music",
184 | "games",
185 | "tv",
186 | "apps",
187 | "other",
188 | "movies",
189 | "books",
190 | ], # applications and tv-shows
191 | "limit": 50,
192 | },
193 | "torrentfunk": {
194 | "website": TorrentFunk,
195 | "trending_available": True,
196 | "trending_category": True,
197 | "search_by_category": False,
198 | "recent_available": True,
199 | "recent_category_available": True,
200 | "categories": [
201 | "anime",
202 | "music",
203 | "games",
204 | "tv",
205 | "apps",
206 | "xxx",
207 | "movies",
208 | "books",
209 | ], # television # software #adult # ebooks
210 | "limit": 50,
211 | },
212 | "glodls": {
213 | "website": Glodls,
214 | "trending_available": True,
215 | "trending_category": False,
216 | "search_by_category": False,
217 | "recent_available": True,
218 | "recent_category_available": False,
219 | "categories": [],
220 | "limit": 45,
221 | },
222 | "torrentproject": {
223 | "website": TorrentProject,
224 | "trending_available": False,
225 | "trending_category": False,
226 | "search_by_category": False,
227 | "recent_available": False,
228 | "recent_category_available": False,
229 | "categories": [],
230 | "limit": 20,
231 | },
232 | "ybt": {
233 | "website": YourBittorrent,
234 | "trending_available": True,
235 | "trending_category": True,
236 | "search_by_category": False,
237 | "recent_available": True,
238 | "recent_category_available": True,
239 | "categories": [
240 | "anime",
241 | "music",
242 | "games",
243 | "tv",
244 | "apps",
245 | "xxx",
246 | "movies",
247 | "books",
248 | "pictures",
249 | "other",
250 | ], # book -> ebooks
251 | "limit": 20,
252 | },
253 | }
254 |
255 | sites_config = {
256 | key: {
257 | **site_info,
258 | "website": site_info["website"]._name
259 | } for key, site_info in all_sites.items()
260 | }
261 |
262 | def check_if_site_available(site):
263 | if site in all_sites.keys():
264 | return all_sites
265 | return False
266 |
--------------------------------------------------------------------------------
/helper/uptime.py:
--------------------------------------------------------------------------------
1 | import time
2 |
3 |
4 | def getUptime(startTime: time) -> time:
5 | """
6 | Returns the number of seconds since the program started.
7 | """
8 | return time.time() - startTime
9 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 | from fastapi import FastAPI, Request, Depends
3 | from fastapi.responses import JSONResponse
4 | from fastapi.middleware.cors import CORSMiddleware
5 | from routers.v1.search_router import router as search_router
6 | from routers.v1.trending_router import router as trending_router
7 | from routers.v1.catergory_router import router as category_router
8 | from routers.v1.recent_router import router as recent_router
9 | from routers.v1.combo_routers import router as combo_router
10 | from routers.v1.sites_list_router import router as site_list_router
11 | from routers.home_router import router as home_router
12 | from routers.v1.search_url_router import router as search_url_router
13 | from helper.uptime import getUptime
14 | from helper.dependencies import authenticate_request
15 | from mangum import Mangum
16 | from math import ceil
17 | import time
18 |
19 | startTime = time.time()
20 |
21 | app = FastAPI(
22 | title="Torrent-Api-Py",
23 | version="1.0.1",
24 | description="Unofficial Torrent-Api",
25 | docs_url="/docs",
26 | contact={
27 | "name": "Neeraj Kumar",
28 | "url": "https://github.com/ryuk-me",
29 | "email": "neerajkr1210@gmail.com",
30 | },
31 | )
32 |
33 | origins = ["*"]
34 |
35 | app.add_middleware(
36 | CORSMiddleware,
37 | allow_origins=origins,
38 | allow_credentials=True,
39 | allow_methods=["*"],
40 | allow_headers=["*"],
41 | )
42 |
43 |
44 | @app.get("/health")
45 | async def health_route(req: Request):
46 | """
47 | Health Route : Returns App details.
48 |
49 | """
50 | return JSONResponse(
51 | {
52 | "app": "Torrent-Api-Py",
53 | "version": "v" + "1.0.1",
54 | "ip": req.client.host,
55 | "uptime": ceil(getUptime(startTime)),
56 | }
57 | )
58 |
59 |
60 | app.include_router(search_router, prefix="/api/v1/search", dependencies=[Depends(authenticate_request)])
61 | app.include_router(trending_router, prefix="/api/v1/trending", dependencies=[Depends(authenticate_request)])
62 | app.include_router(category_router, prefix="/api/v1/category", dependencies=[Depends(authenticate_request)])
63 | app.include_router(recent_router, prefix="/api/v1/recent", dependencies=[Depends(authenticate_request)])
64 | app.include_router(combo_router, prefix="/api/v1/all", dependencies=[Depends(authenticate_request)])
65 | app.include_router(site_list_router, prefix="/api/v1/sites", dependencies=[Depends(authenticate_request)])
66 | app.include_router(search_url_router, prefix="/api/v1/search_url", dependencies=[Depends(authenticate_request)])
67 | app.include_router(home_router, prefix="")
68 |
69 | handler = Mangum(app)
70 |
71 | if __name__ == "__main__":
72 | uvicorn.run(app, host="0.0.0.0", port=8009)
73 |
--------------------------------------------------------------------------------
/okteto-stack.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | fastapi:
3 | public: true
4 | build: .
5 | replicas: 1
6 | ports:
7 | - 8080
8 | resources:
9 | requests:
10 | cpu: 500m
11 | memory: 1500Mi
12 | limits:
13 | cpu: 1000m
14 | memory: 3000Mi
15 |
--------------------------------------------------------------------------------
/render.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | # A Docker web service
3 | - type: web
4 | name: torrent-api-py
5 | runtime: python
6 | plan: free
7 | autoDeploy: true
8 | buildCommand: pip install -r requirements.txt
9 | startCommand: uvicorn main:app --host 0.0.0.0 --port 8009
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp[speedups]
2 | beautifulsoup4
3 | cloudscraper
4 | fastapi==0.104.1
5 | gunicorn
6 | mangum
7 | requests
8 | uvicorn[standard]
--------------------------------------------------------------------------------
/routers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ryuk-me/Torrent-Api-py/1a302617686b22714eacf60cc8d215bbd5979ffe/routers/__init__.py
--------------------------------------------------------------------------------
/routers/home_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 | from fastapi.responses import FileResponse
3 |
4 |
5 | router = APIRouter(tags=["Home Route"])
6 |
7 |
8 | @router.get("/")
9 | async def home():
10 | return FileResponse("README.md")
11 |
--------------------------------------------------------------------------------
/routers/v1/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ryuk-me/Torrent-Api-py/1a302617686b22714eacf60cc8d215bbd5979ffe/routers/v1/__init__.py
--------------------------------------------------------------------------------
/routers/v1/catergory_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 | from fastapi import status
3 | from typing import Optional
4 | from helper.is_site_available import check_if_site_available
5 | from helper.error_messages import error_handler
6 |
7 | router = APIRouter(tags=["Category Torrents Route"])
8 |
9 |
10 | @router.get("/")
11 | @router.get("")
12 | async def get_category(
13 | site: str,
14 | query: str,
15 | category: str,
16 | limit: Optional[int] = 0,
17 | page: Optional[int] = 1,
18 | ):
19 | all_sites = check_if_site_available(site)
20 | site = site.lower()
21 | query = query.lower()
22 | category = category.lower()
23 | if all_sites:
24 | limit = (
25 | all_sites[site]["limit"]
26 | if limit == 0 or limit > all_sites[site]["limit"]
27 | else limit
28 | )
29 |
30 | if all_sites[site]["search_by_category"]:
31 | if category not in all_sites[site]["categories"]:
32 | return error_handler(
33 | status_code=status.HTTP_404_NOT_FOUND,
34 | json_message={
35 | "error": "Selected category not available.",
36 | "available_categories": all_sites[site]["categories"],
37 | },
38 | )
39 | resp = await all_sites[site]["website"]().search_by_category(
40 | query, category, page, limit
41 | )
42 | if resp is None:
43 | return error_handler(
44 | status_code=status.HTTP_403_FORBIDDEN,
45 | json_message={
46 | "error": "Website Blocked Change IP or Website Domain."
47 | },
48 | )
49 | elif len(resp["data"]) > 0:
50 | return resp
51 | else:
52 | return error_handler(
53 | status_code=status.HTTP_404_NOT_FOUND,
54 | json_message={"error": "Result not found."},
55 | )
56 | else:
57 | return error_handler(
58 | status_code=status.HTTP_404_NOT_FOUND,
59 | json_message={
60 | "error": "Category search not availabe for {}.".format(site)
61 | },
62 | )
63 | return error_handler(
64 | status_code=status.HTTP_404_NOT_FOUND,
65 | json_message={"error": "Selected Site Not Available"},
66 | )
67 |
--------------------------------------------------------------------------------
/routers/v1/combo_routers.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, status
2 | from typing import Optional
3 | from helper.is_site_available import check_if_site_available
4 | import time
5 | import asyncio
6 | from helper.error_messages import error_handler
7 |
8 |
9 | router = APIRouter(tags=["Combo Routes"])
10 |
11 |
12 | @router.get("/search")
13 | async def get_search_combo(query: str, limit: Optional[int] = 0):
14 | start_time = time.time()
15 | query = query.lower()
16 | all_sites = check_if_site_available("1337x")
17 | sites_list = list(all_sites.keys())
18 | tasks = []
19 | COMBO = {"data": []}
20 | total_torrents_overall = 0
21 | for site in sites_list:
22 | limit = (
23 | all_sites[site]["limit"]
24 | if limit == 0 or limit > all_sites[site]["limit"]
25 | else limit
26 | )
27 | tasks.append(
28 | asyncio.create_task(
29 | all_sites[site]["website"]().search(query, page=1, limit=limit)
30 | )
31 | )
32 | results = await asyncio.gather(*tasks)
33 | for res in results:
34 | if res is not None and len(res["data"]) > 0:
35 | for torrent in res["data"]:
36 | COMBO["data"].append(torrent)
37 | total_torrents_overall = total_torrents_overall + res["total"]
38 | COMBO["time"] = time.time() - start_time
39 | COMBO["total"] = total_torrents_overall
40 | if total_torrents_overall == 0:
41 | return error_handler(
42 | status_code=status.HTTP_404_NOT_FOUND,
43 | json_message={"error": "Result not found."},
44 | )
45 | return COMBO
46 |
47 |
48 | @router.get("/trending")
49 | async def get_all_trending(limit: Optional[int] = 0):
50 | start_time = time.time()
51 | # * just getting all_sites dictionary
52 | all_sites = check_if_site_available("1337x")
53 | sites_list = [
54 | site
55 | for site in all_sites.keys()
56 | if all_sites[site]["trending_available"] and all_sites[site]["website"]
57 | ]
58 | tasks = []
59 | COMBO = {"data": []}
60 | total_torrents_overall = 0
61 | for site in sites_list:
62 | limit = (
63 | all_sites[site]["limit"]
64 | if limit == 0 or limit > all_sites[site]["limit"]
65 | else limit
66 | )
67 | tasks.append(
68 | asyncio.create_task(
69 | all_sites[site]["website"]().trending(
70 | category=None, page=1, limit=limit
71 | )
72 | )
73 | )
74 | results = await asyncio.gather(*tasks)
75 | for res in results:
76 | if res is not None and len(res["data"]) > 0:
77 | for torrent in res["data"]:
78 | COMBO["data"].append(torrent)
79 | total_torrents_overall = total_torrents_overall + res["total"]
80 | COMBO["time"] = time.time() - start_time
81 | COMBO["total"] = total_torrents_overall
82 | if total_torrents_overall == 0:
83 | return error_handler(
84 | status_code=status.HTTP_404_NOT_FOUND,
85 | json_message={"error": "Result not found."},
86 | )
87 | return COMBO
88 |
89 |
90 | @router.get("/recent")
91 | async def get_all_recent(limit: Optional[int] = 0):
92 | start_time = time.time()
93 | # just getting all_sites dictionary
94 | all_sites = check_if_site_available("1337x")
95 | sites_list = [
96 | site
97 | for site in all_sites.keys()
98 | if all_sites[site]["recent_available"] and all_sites[site]["website"]
99 | ]
100 | tasks = []
101 | COMBO = {"data": []}
102 | total_torrents_overall = 0
103 | for site in sites_list:
104 | limit = (
105 | all_sites[site]["limit"]
106 | if limit == 0 or limit > all_sites[site]["limit"]
107 | else limit
108 | )
109 | tasks.append(
110 | asyncio.create_task(
111 | all_sites[site]["website"]().recent(category=None, page=1, limit=limit)
112 | )
113 | )
114 | results = await asyncio.gather(*tasks)
115 | for res in results:
116 | if res is not None and len(res["data"]) > 0:
117 | for torrent in res["data"]:
118 | COMBO["data"].append(torrent)
119 | total_torrents_overall = total_torrents_overall + res["total"]
120 | COMBO["time"] = time.time() - start_time
121 | COMBO["total"] = total_torrents_overall
122 | if total_torrents_overall == 0:
123 | return error_handler(
124 | status_code=status.HTTP_404_NOT_FOUND,
125 | json_message={"error": "Result not found."},
126 | )
127 | return COMBO
128 |
--------------------------------------------------------------------------------
/routers/v1/recent_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 | from fastapi import status
3 | from typing import Optional
4 | from helper.is_site_available import check_if_site_available
5 | from helper.error_messages import error_handler
6 |
7 | router = APIRouter(tags=["Recent Torrents Route"])
8 |
9 |
10 | @router.get("/")
11 | @router.get("")
12 | async def get_recent(
13 | site: str,
14 | limit: Optional[int] = 0,
15 | category: Optional[str] = None,
16 | page: Optional[int] = 1,
17 | ):
18 | all_sites = check_if_site_available(site)
19 | site = site.lower()
20 | category = category.lower() if category is not None else None
21 | if all_sites:
22 | limit = (
23 | all_sites[site]["limit"]
24 | if limit == 0 or limit > all_sites[site]["limit"]
25 | else limit
26 | )
27 | if all_sites[site]["recent_available"]:
28 | if (
29 | category is not None
30 | and not all_sites[site]["recent_category_available"]
31 | ):
32 | return error_handler(
33 | status_code=status.HTTP_404_NOT_FOUND,
34 | json_message={
35 | "error": "Search by Recent category not available for {}.".format(
36 | site
37 | )
38 | },
39 | )
40 | if category is not None and category not in all_sites[site]["categories"]:
41 | return error_handler(
42 | status_code=status.HTTP_404_NOT_FOUND,
43 | json_message={
44 | "error": "Selected category not available.",
45 | "available_categories": all_sites[site]["categories"],
46 | },
47 | )
48 | resp = await all_sites[site]["website"]().recent(category, page, limit)
49 | if resp is None:
50 | return error_handler(
51 | status_code=status.HTTP_403_FORBIDDEN,
52 | json_message={
53 | "error": "Website Blocked Change IP or Website Domain."
54 | },
55 | )
56 |
57 | elif len(resp["data"]) > 0:
58 | return resp
59 | else:
60 | return error_handler(
61 | status_code=status.HTTP_404_NOT_FOUND,
62 | json_message={"error": "Result not found."},
63 | )
64 | else:
65 | return error_handler(
66 | status_code=status.HTTP_404_NOT_FOUND,
67 | json_message={
68 | "error": "Recent search not availabe for {}.".format(site)
69 | },
70 | )
71 | return error_handler(
72 | status_code=status.HTTP_404_NOT_FOUND,
73 | json_message={"error": "Selected Site Not Available"},
74 | )
75 |
--------------------------------------------------------------------------------
/routers/v1/search_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 | from typing import Optional
3 | from helper.is_site_available import check_if_site_available
4 | from fastapi import status
5 | from helper.error_messages import error_handler
6 |
7 | router = APIRouter(tags=["Search"])
8 |
9 |
10 | @router.get("/")
11 | @router.get("")
12 | async def search_for_torrents(
13 | site: str, query: str, limit: Optional[int] = 0, page: Optional[int] = 1
14 | ):
15 | site = site.lower()
16 | query = query.lower()
17 | all_sites = check_if_site_available(site)
18 | if all_sites:
19 | limit = (
20 | all_sites[site]["limit"]
21 | if limit == 0 or limit > all_sites[site]["limit"]
22 | else limit
23 | )
24 |
25 | resp = await all_sites[site]["website"]().search(query, page, limit)
26 | if resp is None:
27 | return error_handler(
28 | status_code=status.HTTP_403_FORBIDDEN,
29 | json_message={"error": "Website Blocked Change IP or Website Domain."},
30 | )
31 | elif len(resp["data"]) > 0:
32 | return resp
33 | else:
34 | return error_handler(
35 | status_code=status.HTTP_404_NOT_FOUND,
36 | json_message={"error": "Result not found."},
37 | )
38 |
39 | return error_handler(
40 | status_code=status.HTTP_404_NOT_FOUND,
41 | json_message={"error": "Selected Site Not Available"},
42 | )
43 |
--------------------------------------------------------------------------------
/routers/v1/search_url_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, status
2 | from helper.is_site_available import check_if_site_available
3 | from helper.error_messages import error_handler
4 |
5 | router = APIRouter(tags=["Torrent By Url"])
6 |
7 |
8 | # * Only supports 1337x AS OF NOW
9 | @router.get("/")
10 | @router.get("")
11 | async def get_torrent_from_url(site: str, url: str):
12 | site = site.lower()
13 | all_sites = check_if_site_available(site)
14 | if all_sites:
15 | resp = await all_sites[site]["website"]().get_torrent_by_url(url)
16 | if resp is None:
17 | return error_handler(
18 | status_code=status.HTTP_403_FORBIDDEN,
19 | json_message={"error": "Website Blocked Change IP or Website Domain."},
20 | )
21 | elif len(resp["data"]) > 0:
22 | return resp
23 | else:
24 | return error_handler(
25 | status_code=status.HTTP_404_NOT_FOUND,
26 | json_message={"error": "Result not found."},
27 | )
28 | return error_handler(
29 | status_code=status.HTTP_404_NOT_FOUND,
30 | json_message={"error": "Selected Site Not Available"},
31 | )
32 |
--------------------------------------------------------------------------------
/routers/v1/sites_list_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter, status
2 | from helper.is_site_available import check_if_site_available, sites_config
3 | from helper.error_messages import error_handler
4 |
5 | router = APIRouter(tags=["Get all sites"])
6 |
7 |
8 | @router.get("/")
9 | @router.get("")
10 | async def get_all_supported_sites():
11 | all_sites = check_if_site_available("1337x")
12 | sites_list = [site for site in all_sites.keys() if all_sites[site]["website"]]
13 | return error_handler(
14 | status_code=status.HTTP_200_OK,
15 | json_message={
16 | "supported_sites": sites_list,
17 | },
18 | )
19 |
20 | @router.get("/config")
21 | async def get_site_config():
22 | return error_handler(
23 | status_code=status.HTTP_200_OK,
24 | json_message=sites_config
25 | )
26 |
--------------------------------------------------------------------------------
/routers/v1/trending_router.py:
--------------------------------------------------------------------------------
1 | from fastapi import APIRouter
2 | from fastapi import status
3 | from typing import Optional
4 | from helper.is_site_available import check_if_site_available
5 | from helper.error_messages import error_handler
6 |
7 | router = APIRouter(tags=["Trending Torrents"])
8 |
9 |
10 | @router.get("/")
11 | @router.get("")
12 | async def get_trending(
13 | site: str,
14 | limit: Optional[int] = 0,
15 | category: Optional[str] = None,
16 | page: Optional[int] = 1,
17 | ):
18 | site = site.lower()
19 | all_sites = check_if_site_available(site)
20 | category = category.lower() if category is not None else None
21 | if all_sites:
22 | limit = (
23 | all_sites[site]["limit"]
24 | if limit == 0 or limit > all_sites[site]["limit"]
25 | else limit
26 | )
27 | if all_sites[site]["trending_available"]:
28 | if not category is None and not all_sites[site]["trending_category"]:
29 | return error_handler(
30 | status_code=status.HTTP_404_NOT_FOUND,
31 | json_message={
32 | "error": "Search by trending category not available for {}.".format(
33 | site
34 | )
35 | },
36 | )
37 | if not category is None and category not in all_sites[site]["categories"]:
38 | return error_handler(
39 | status_code=status.HTTP_404_NOT_FOUND,
40 | json_message={
41 | "error": "Selected category not available.",
42 | "available_categories": all_sites[site]["categories"],
43 | },
44 | )
45 | resp = await all_sites[site]["website"]().trending(category, page, limit)
46 | if resp is None:
47 | return error_handler(
48 | status_code=status.HTTP_403_FORBIDDEN,
49 | json_message={
50 | "error": "Website Blocked Change IP or Website Domain."
51 | },
52 | )
53 | elif len(resp["data"]) > 0:
54 | return resp
55 | else:
56 | return error_handler(
57 | status_code=status.HTTP_404_NOT_FOUND,
58 | json_message={"error": "Result not found."},
59 | )
60 | else:
61 | return error_handler(
62 | status_code=status.HTTP_404_NOT_FOUND,
63 | json_message={
64 | "error": "Trending search not availabe for {}.".format(site)
65 | },
66 | )
67 | return error_handler(
68 | status_code=status.HTTP_404_NOT_FOUND,
69 | json_message={"error": "Selected Site Not Available"},
70 | )
71 |
--------------------------------------------------------------------------------
/torrents/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Ryuk-me/Torrent-Api-py/1a302617686b22714eacf60cc8d215bbd5979ffe/torrents/__init__.py
--------------------------------------------------------------------------------
/torrents/bitsearch.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.html_scraper import Scraper
6 | from constants.base_url import BITSEARCH
7 |
8 |
9 | class Bitsearch:
10 | _name = "Bit Search"
11 | def __init__(self):
12 | self.BASE_URL = BITSEARCH
13 | self.LIMIT = None
14 |
15 | def _parser(self, htmls):
16 | try:
17 | for html in htmls:
18 | soup = BeautifulSoup(html, "html.parser")
19 |
20 | my_dict = {"data": []}
21 | for divs in soup.find_all("li", class_="search-result"):
22 | info = divs.find("div", class_="info")
23 | name = info.find("h5", class_="title").find("a").text
24 | url = info.find("h5", class_="title").find("a")["href"]
25 | category = info.find("div").find("a", class_="category").text
26 | if not category:
27 | continue
28 | stats = info.find("div", class_="stats").find_all("div")
29 | if stats:
30 | downloads = stats[0].text
31 | size = stats[1].text
32 | seeders = stats[2].text.strip()
33 | leechers = stats[3].text.strip()
34 | date = stats[4].text
35 | links = divs.find("div", class_="links").find_all("a")
36 | magnet = links[1]["href"]
37 | torrent = links[0]["href"]
38 | my_dict["data"].append(
39 | {
40 | "name": name,
41 | "size": size,
42 | "seeders": seeders,
43 | "leechers": leechers,
44 | "category": category,
45 | "hash": re.search(
46 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
47 | ).group(0),
48 | "magnet": magnet,
49 | "torrent": torrent,
50 | "url": self.BASE_URL + url,
51 | "date": date,
52 | "downloads": downloads,
53 | }
54 | )
55 | if len(my_dict["data"]) == self.LIMIT:
56 | break
57 | try:
58 | total_pages = (
59 | int(
60 | soup.select(
61 | "body > main > div.container.mt-2 > div > div:nth-child(1) > div > span > b"
62 | )[0].text
63 | )
64 | / 20
65 | ) # !20 search result available on each page
66 | total_pages = (
67 | total_pages + 1
68 | if type(total_pages) == float
69 | else total_pages
70 | if int(total_pages) > 0
71 | else total_pages + 1
72 | )
73 |
74 | current_page = int(
75 | soup.find("div", class_="pagination")
76 | .find("a", class_="active")
77 | .text
78 | )
79 | my_dict["current_page"] = current_page
80 | my_dict["total_pages"] = int(total_pages)
81 | except:
82 | ...
83 | return my_dict
84 | except:
85 | return None
86 |
87 | async def search(self, query, page, limit):
88 | async with aiohttp.ClientSession() as session:
89 | start_time = time.time()
90 | self.LIMIT = limit
91 | url = self.BASE_URL + "/search?q={}&page={}".format(query, page)
92 | return await self.parser_result(start_time, url, session)
93 |
94 | async def parser_result(self, start_time, url, session):
95 | html = await Scraper().get_all_results(session, url)
96 | results = self._parser(html)
97 | if results is not None:
98 | results["time"] = time.time() - start_time
99 | results["total"] = len(results["data"])
100 | return results
101 | return results
102 |
103 | async def trending(self, category, page, limit):
104 | async with aiohttp.ClientSession() as session:
105 | start_time = time.time()
106 | self.LIMIT = limit
107 | url = self.BASE_URL + "/trending"
108 | return await self.parser_result(start_time, url, session)
109 |
--------------------------------------------------------------------------------
/torrents/glodls.py:
--------------------------------------------------------------------------------
1 | import time
2 | import aiohttp
3 | from bs4 import BeautifulSoup
4 | from helper.html_scraper import Scraper
5 | from constants.base_url import GLODLS
6 |
7 |
8 | class Glodls:
9 | _name = "Glodls"
10 | def __init__(self):
11 | self.BASE_URL = GLODLS
12 | self.LIMIT = None
13 |
14 | def _parser(self, htmls):
15 | try:
16 | for html in htmls:
17 | soup = BeautifulSoup(html, "html.parser")
18 |
19 | my_dict = {"data": []}
20 | for tr in soup.find_all("tr", class_="t-row")[0:-1:2]:
21 | td = tr.find_all("td")
22 | name = td[1].find_all("a")[-1].find("b").text
23 | url = self.BASE_URL + td[1].find_all("a")[-1]["href"]
24 | torrent = self.BASE_URL + td[2].find("a")["href"]
25 | magnet = td[3].find("a")["href"]
26 | size = td[4].text
27 | seeders = td[5].find("font").find("b").text
28 | leechers = td[6].find("font").find("b").text
29 | try:
30 | uploader = td[7].find("a").find("b").find("font").text
31 | except:
32 | uploader = ""
33 | my_dict["data"].append(
34 | {
35 | "name": name,
36 | "size": size,
37 | "uploader": uploader,
38 | "seeders": seeders,
39 | "leechers": leechers,
40 | "magnet": magnet,
41 | "torrent": torrent,
42 | "url": self.BASE_URL + url,
43 | }
44 | )
45 | if len(my_dict["data"]) == self.LIMIT:
46 | break
47 | try:
48 | pagination = soup.find("div", class_="pagination")
49 | total_pages = pagination.find_all("a")[-2]["href"]
50 | total_pages = total_pages.split("=")[-1]
51 | my_dict["total_pages"] = int(total_pages) + 1
52 | except:
53 | ...
54 | return my_dict
55 | except:
56 | return None
57 |
58 | async def search(self, query, page, limit):
59 | async with aiohttp.ClientSession() as session:
60 | start_time = time.time()
61 | self.LIMIT = limit
62 | url = (
63 | self.BASE_URL
64 | + "/search_results.php?search={}&cat=0&incldead=0&inclexternal=0&lang=0&sort=seeders&order=desc&page={}".format(
65 | query, page - 1
66 | )
67 | )
68 | return await self.parser_result(start_time, url, session)
69 |
70 | async def parser_result(self, start_time, url, session):
71 | html = await Scraper().get_all_results(session, url)
72 | results = self._parser(html)
73 | if results is not None:
74 | results["time"] = time.time() - start_time
75 | results["total"] = len(results["data"])
76 | return results
77 | return results
78 |
79 | async def trending(self, category, page, limit):
80 | async with aiohttp.ClientSession() as session:
81 | start_time = time.time()
82 | self.LIMIT = limit
83 | url = self.BASE_URL + "/today.php"
84 | return await self.parser_result(start_time, url, session)
85 |
86 | async def recent(self, category, page, limit):
87 | async with aiohttp.ClientSession() as session:
88 | start_time = time.time()
89 | self.LIMIT = limit
90 | url = self.BASE_URL + "/search.php"
91 | return await self.parser_result(start_time, url, session)
92 |
--------------------------------------------------------------------------------
/torrents/kickass.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import KICKASS
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class Kickass:
13 | _name = "Kick Ass"
14 | def __init__(self):
15 | self.BASE_URL = KICKASS
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj):
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | poster = soup.find("a", class_="movieCover")
26 | if poster:
27 | poster = poster.find("img")["src"]
28 | obj["poster"] = self.BASE_URL + poster
29 | imgs = (soup.find("div", class_="data")).find_all("img")
30 | if imgs and len(imgs) > 0:
31 | obj["screenshot"] = [img["src"] for img in imgs]
32 | magnet_and_torrent = soup.find_all("a", class_="kaGiantButton")
33 | magnet = magnet_and_torrent[0]["href"]
34 | obj["hash"] = re.search(
35 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
36 | ).group(0)
37 | obj["magnet"] = magnet
38 | except:
39 | ...
40 | except:
41 | return None
42 |
43 | async def _get_torrent(self, result, session, urls):
44 | tasks = []
45 | for idx, url in enumerate(urls):
46 | for obj in result["data"]:
47 | if obj["url"] == url:
48 | task = asyncio.create_task(
49 | self._individual_scrap(session, url, result["data"][idx])
50 | )
51 | tasks.append(task)
52 | await asyncio.gather(*tasks)
53 | return result
54 |
55 | def _parser(self, htmls):
56 | try:
57 | for html in htmls:
58 | soup = BeautifulSoup(html, "html.parser")
59 | list_of_urls = []
60 | my_dict = {"data": []}
61 | for tr in soup.select("tr.odd,tr.even"):
62 | td = tr.find_all("td")
63 | name = tr.find("a", class_="cellMainLink").text.strip()
64 | url = self.BASE_URL + tr.find("a", class_="cellMainLink")["href"]
65 | list_of_urls.append(url)
66 | if name:
67 | size = td[1].text.strip()
68 | seeders = td[4].text.strip()
69 | leechers = td[5].text.strip()
70 | uploader = td[2].text.strip()
71 | date = td[3].text.strip()
72 |
73 | my_dict["data"].append(
74 | {
75 | "name": name,
76 | "size": size,
77 | "date": date,
78 | "seeders": seeders,
79 | "leechers": leechers,
80 | "url": url,
81 | "uploader": uploader,
82 | }
83 | )
84 | if len(my_dict["data"]) == self.LIMIT:
85 | break
86 | try:
87 | pages = soup.find("div", class_="pages")
88 | current_page = int(pages.find("a", class_="active").text)
89 | pages = pages.find_all("a")
90 | total_page = pages[-1].text
91 | if total_page == ">>":
92 | total_page = pages[-2].text
93 | my_dict["current_page"] = current_page
94 | my_dict["total_pages"] = int(total_page)
95 | except:
96 | ...
97 | return my_dict, list_of_urls
98 | except:
99 | return None, None
100 |
101 | async def search(self, query, page, limit):
102 | async with aiohttp.ClientSession() as session:
103 | start_time = time.time()
104 | self.LIMIT = limit
105 | url = self.BASE_URL + "/usearch/{}/{}/".format(query, page)
106 | return await self.parser_result(start_time, url, session)
107 |
108 | async def parser_result(self, start_time, url, session):
109 | htmls = await Scraper().get_all_results(session, url)
110 | result, urls = self._parser(htmls)
111 | if result is not None:
112 | results = await self._get_torrent(result, session, urls)
113 | results["time"] = time.time() - start_time
114 | results["total"] = len(results["data"])
115 | return results
116 | return result
117 |
118 | async def trending(self, category, page, limit):
119 | async with aiohttp.ClientSession() as session:
120 | start_time = time.time()
121 | self.LIMIT = limit
122 | if not category:
123 | url = self.BASE_URL + "/top-100"
124 | else:
125 | if category == "tv":
126 | category == "television"
127 | elif category == "apps":
128 | category = "applications"
129 | url = self.BASE_URL + "/top-100-{}/".format(category)
130 | return await self.parser_result(start_time, url, session)
131 |
132 | async def recent(self, category, page, limit):
133 | async with aiohttp.ClientSession() as session:
134 | start_time = time.time()
135 | self.LIMIT = limit
136 | if not category:
137 | url = self.BASE_URL + "/new/"
138 | else:
139 | url = self.BASE_URL + "/{}/".format(category)
140 | return await self.parser_result(start_time, url, session)
141 |
--------------------------------------------------------------------------------
/torrents/libgen.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
6 | from helper.html_scraper import Scraper
7 | from constants.base_url import LIBGEN
8 | from constants.headers import HEADER_AIO
9 |
10 |
11 | class Libgen:
12 | _name = "Libgen"
13 | def __init__(self):
14 | self.BASE_URL = LIBGEN
15 | self.LIMIT = None
16 |
17 | @decorator_asyncio_fix
18 | async def _individual_scrap(self, session, url, obj, sem):
19 | async with sem:
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | x = soup.find_all("a")
26 | for a in x:
27 | if a.text == "One-filetorrent":
28 | if a["href"] != "#":
29 | obj["torrent"] = self.BASE_URL + a["href"]
30 | poster = soup.find_all("img")[0]
31 |
32 | if poster:
33 | obj["poster"] = "http://library.lol" + poster["src"]
34 | except:
35 | ...
36 | except:
37 | return None
38 |
39 | async def _get_torrent(self, result, session, urls):
40 | tasks = []
41 | sem = asyncio.Semaphore(3)
42 | for idx, url in enumerate(urls):
43 | for obj in result["data"]:
44 | if obj["url"] == url:
45 | task = asyncio.create_task(
46 | self._individual_scrap(session, url, result["data"][idx], sem)
47 | )
48 | tasks.append(task)
49 | await asyncio.gather(*tasks)
50 | return result
51 |
52 | def _parser(self, htmls):
53 | try:
54 | for html in htmls:
55 | soup = BeautifulSoup(html, "html.parser")
56 | list_of_urls = []
57 | my_dict = {"data": []}
58 | trs = soup.select("[valign=top]")
59 | for tr in trs[1:]:
60 | td = tr.find_all("td")
61 | id = td[0].text
62 | authors = []
63 | author = td[1].find_all("a")
64 | for a in author:
65 | authors.append(a.text.strip())
66 | name_and_url = td[2].find("a")
67 | name = name_and_url.text
68 | url = self.BASE_URL + "/" + name_and_url["href"]
69 | list_of_urls.append(url)
70 | publisher = td[3].text
71 | year = td[4].text
72 | pages = None
73 | try:
74 | pages = td[5].text
75 | except:
76 | ...
77 | language = td[6].text
78 | size = td[7].text
79 | extension = td[8].text
80 |
81 | my_dict["data"].append(
82 | {
83 | "id": id,
84 | "authors": authors,
85 | "name": name,
86 | "publisher": publisher,
87 | "year": year,
88 | "pages": pages,
89 | "language": language,
90 | "size": size,
91 | "extension": extension,
92 | "url": url,
93 | }
94 | )
95 | if len(my_dict["data"]) == self.LIMIT:
96 | break
97 | return my_dict, list_of_urls
98 | except:
99 | return None, None
100 |
101 | async def search(self, query, page, limit):
102 | async with aiohttp.ClientSession() as session:
103 | start_time = time.time()
104 | self.LIMIT = limit
105 | url = (
106 | self.BASE_URL
107 | + "/search.php?req={}&lg_topic=libgen&open=0&view=simple&res=100&phrase=1&column=def".format(
108 | query
109 | )
110 | )
111 | return await self.parser_result(start_time, url, session)
112 |
113 | async def parser_result(self, start_time, url, session):
114 | htmls = await Scraper().get_all_results(session, url)
115 | result, urls = self._parser(htmls)
116 | if result is not None:
117 | results = await self._get_torrent(result, session, urls)
118 | results["time"] = time.time() - start_time
119 | results["total"] = len(results["data"])
120 | return results
121 | return result
122 |
--------------------------------------------------------------------------------
/torrents/limetorrents.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import LIMETORRENT
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class Limetorrent:
13 | _name = "Lime Torrents"
14 | def __init__(self):
15 | self.BASE_URL = LIMETORRENT
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj):
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | a_tag = soup.find_all("a", class_="csprite_dltorrent")
26 | obj["torrent"] = a_tag[0]["href"]
27 | obj["magnet"] = a_tag[-1]["href"]
28 | obj["hash"] = re.search(
29 | r"([{a-f\d,A-F\d}]{32,40})\b", obj["magnet"]
30 | ).group(0)
31 | except:
32 | ...
33 | except:
34 | return None
35 |
36 | async def _get_torrent(self, result, session, urls):
37 | tasks = []
38 | for idx, url in enumerate(urls):
39 | for obj in result["data"]:
40 | if obj["url"] == url:
41 | task = asyncio.create_task(
42 | self._individual_scrap(session, url, result["data"][idx])
43 | )
44 | tasks.append(task)
45 | await asyncio.gather(*tasks)
46 | return result
47 |
48 | def _parser(self, htmls, idx=0):
49 | try:
50 | for html in htmls:
51 | soup = BeautifulSoup(html, "html.parser")
52 | list_of_urls = []
53 | my_dict = {"data": []}
54 |
55 | for tr in soup.find_all("tr")[idx:]:
56 | td = tr.find_all("td")
57 | if len(td) == 0:
58 | continue
59 | name = td[0].get_text(strip=True)
60 | url = self.BASE_URL + td[0].find_all("a")[-1]["href"]
61 | list_of_urls.append(url)
62 | added_on_and_category = td[1].get_text(strip=True)
63 | date = (added_on_and_category.split("-")[0]).strip()
64 | category = (added_on_and_category.split("in")[-1]).strip()
65 | size = td[2].text
66 | seeders = td[3].text
67 | leechers = td[4].text
68 | my_dict["data"].append(
69 | {
70 | "name": name,
71 | "size": size,
72 | "date": date,
73 | "category": category if category != date else None,
74 | "seeders": seeders,
75 | "leechers": leechers,
76 | "url": url,
77 | }
78 | )
79 | if len(my_dict["data"]) == self.LIMIT:
80 | break
81 | try:
82 | div = soup.find("div", class_="search_stat")
83 | current_page = int(div.find("span", class_="active").text)
84 | total_page = int((div.find_all("a"))[-2].text)
85 | if current_page > total_page:
86 | total_page = current_page
87 | my_dict["current_page"] = current_page
88 | my_dict["total_pages"] = total_page
89 | except:
90 | ...
91 | return my_dict, list_of_urls
92 | except:
93 | return None, None
94 |
95 | async def search(self, query, page, limit):
96 | async with aiohttp.ClientSession() as session:
97 | start_time = time.time()
98 | self.LIMIT = limit
99 | url = self.BASE_URL + "/search/all/{}//{}".format(query, page)
100 | return await self.parser_result(start_time, url, session, idx=5)
101 |
102 | async def parser_result(self, start_time, url, session, idx=0):
103 | htmls = await Scraper().get_all_results(session, url)
104 | result, urls = self._parser(htmls, idx)
105 | if result is not None:
106 | results = await self._get_torrent(result, session, urls)
107 | results["time"] = time.time() - start_time
108 | results["total"] = len(results["data"])
109 | return results
110 | return result
111 |
112 | async def trending(self, category, page, limit):
113 | async with aiohttp.ClientSession() as session:
114 | start_time = time.time()
115 | self.LIMIT = limit
116 | url = self.BASE_URL + "/top100"
117 | return await self.parser_result(start_time, url, session)
118 |
119 | async def recent(self, category, page, limit):
120 | async with aiohttp.ClientSession() as session:
121 | start_time = time.time()
122 | self.LIMIT = limit
123 | if not category:
124 | url = self.BASE_URL + "/latest100"
125 | else:
126 | category = (category).capitalize()
127 | if category == "Apps":
128 | category = "Applications"
129 | elif category == "Tv":
130 | category = "TV-shows"
131 | url = self.BASE_URL + "/browse-torrents/{}/date/{}/".format(
132 | category, page
133 | )
134 | return await self.parser_result(start_time, url, session)
135 |
--------------------------------------------------------------------------------
/torrents/magnet_dl.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | import cloudscraper
6 | import requests
7 | from bs4 import BeautifulSoup
8 | from constants.base_url import MAGNETDL
9 |
10 |
11 | class Magnetdl:
12 | _name = "MagnetDL"
13 | def __init__(self):
14 | self.BASE_URL = MAGNETDL
15 | self.LIMIT = None
16 |
17 | def _parser(self, htmls):
18 | try:
19 | for html in htmls:
20 | soup = BeautifulSoup(html, "html.parser")
21 |
22 | my_dict = {"data": []}
23 | table = soup.find("table", class_="download")
24 | for tr in soup.find_all("tr"):
25 | td = tr.find_all("td")
26 | if len(td) > 1:
27 | name = td[1].find("a").get_text(strip=True)
28 | if name != "":
29 | magnet = td[0].find("a")["href"]
30 | try:
31 | size = td[5].get_text(strip=True)
32 | except IndexError:
33 | size = None
34 | url = td[1].find("a")["href"]
35 | date = td[2].get_text(strip=True)
36 | seeders = td[6].get_text(strip=True)
37 | leechers = td[7].get_text(strip=True)
38 | category = td[3].text
39 | my_dict["data"].append(
40 | {
41 | "name": name,
42 | "size": size,
43 | "seeders": seeders,
44 | "leechers": leechers,
45 | "category": category,
46 | "hash": re.search(
47 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
48 | ).group(0),
49 | "magnet": magnet,
50 | "url": self.BASE_URL + url,
51 | "date": date,
52 | }
53 | )
54 | if len(my_dict["data"]) == self.LIMIT:
55 | break
56 | total_results = soup.find("div", id="footer").text.replace(",", "")
57 | current_page = int(
58 | (re.search(r"Page\s\d*", total_results).group(0)).replace(
59 | "Page ", ""
60 | )
61 | )
62 | total_pages = (
63 | int(
64 | (
65 | (re.search(r"Found\s\d*", total_results).group(0)).replace(
66 | "Found ", ""
67 | )
68 | )
69 | )
70 | // 40
71 | )
72 | my_dict["current_page"] = current_page
73 | my_dict["total_pages"] = (
74 | 30
75 | if total_pages > 30
76 | else total_pages
77 | if total_pages != 0
78 | else total_pages + 1
79 | )
80 | return my_dict
81 | except:
82 | return None
83 |
84 | async def _get_html(self, session, url):
85 | session = cloudscraper.create_scraper(sess=session)
86 | try:
87 | return session.get(url).text
88 | except:
89 | return None
90 |
91 | async def _get_all_results(self, session, url):
92 | return await asyncio.gather(asyncio.create_task(self._get_html(session, url)))
93 |
94 | async def search(self, query, page, limit):
95 | async with aiohttp.ClientSession() as session:
96 | start_time = time.time()
97 | self.LIMIT = limit
98 | query = requests.utils.unquote(query)
99 | query = query.split(" ")
100 | query = "-".join(query)
101 | url = self.BASE_URL + "/{}/{}/se/desc/{}/".format(query[0], query, page)
102 | return await self.parser_result(start_time, url, session)
103 |
104 | async def parser_result(self, start_time, url, session):
105 | data = await self._get_all_results(session, url)
106 | results = self._parser(data)
107 | if results is not None:
108 | results["time"] = time.time() - start_time
109 | results["total"] = len(results["data"])
110 | return results
111 | return results
112 |
113 | async def recent(self, category, page, limit):
114 | async with aiohttp.ClientSession() as session:
115 | start_time = time.time()
116 | self.LIMIT = limit
117 | if not category:
118 | url = self.BASE_URL + "/download/movies/{}".format(page)
119 | else:
120 | if category == "books":
121 | category = "e-books"
122 | url = self.BASE_URL + "/download/{}/{}/".format(category, page)
123 | return await self.parser_result(start_time, url, session)
124 |
125 | #! maximum page in category is 30
126 |
--------------------------------------------------------------------------------
/torrents/nyaa_si.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.html_scraper import Scraper
6 | from constants.base_url import NYAASI
7 |
8 |
9 | class NyaaSi:
10 | _name = "Nyaa"
11 | def __init__(self):
12 | self.BASE_URL = NYAASI
13 | self.LIMIT = None
14 |
15 | def _parser(self, htmls):
16 | try:
17 | for html in htmls:
18 | soup = BeautifulSoup(html, "html.parser")
19 |
20 | my_dict = {"data": []}
21 | for tr in (soup.find("table")).find_all("tr")[1:]:
22 | td = tr.find_all("td")
23 | name = td[1].find_all("a")[-1].text
24 | url = td[1].find_all("a")[-1]["href"]
25 | magnet_and_torrent = td[2].find_all("a")
26 | magnet = magnet_and_torrent[-1]["href"]
27 | torrent = self.BASE_URL + magnet_and_torrent[0]["href"]
28 | size = td[3].text
29 | date = td[4].text
30 | seeders = td[5].text
31 | leechers = td[6].text
32 | downloads = td[7].text
33 | category = td[0].find("a")["title"].split("-")[0].strip()
34 | my_dict["data"].append(
35 | {
36 | "name": name,
37 | "size": size,
38 | "seeders": seeders,
39 | "leechers": leechers,
40 | "category": category,
41 | "hash": re.search(
42 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
43 | ).group(0),
44 | "magnet": magnet,
45 | "torrent": torrent,
46 | "url": self.BASE_URL + url,
47 | "date": date,
48 | "downloads": downloads,
49 | }
50 | )
51 | if len(my_dict["data"]) == self.LIMIT:
52 | break
53 |
54 | try:
55 | ul = soup.find("ul", class_="pagination")
56 | tpages = ul.find_all("a")[-2].text
57 | current_page = (ul.find("li", class_="active")).find("a").text
58 | my_dict["current_page"] = int(current_page)
59 | my_dict["total_pages"] = int(tpages)
60 | except:
61 | my_dict["current_page"] = None
62 | my_dict["total_pages"] = None
63 | return my_dict
64 | except:
65 | return None
66 |
67 | async def search(self, query, page, limit):
68 | async with aiohttp.ClientSession() as session:
69 | start_time = time.time()
70 | self.LIMIT = limit
71 | url = self.BASE_URL + "/?f=0&c=0_0&q={}&p={}".format(query, page)
72 | return await self.parser_result(start_time, url, session)
73 |
74 | async def parser_result(self, start_time, url, session):
75 | html = await Scraper().get_all_results(session, url)
76 | results = self._parser(html)
77 | if results is not None:
78 | results["time"] = time.time() - start_time
79 | results["total"] = len(results["data"])
80 | return results
81 | return results
82 |
83 | async def recent(self, category, page, limit):
84 | async with aiohttp.ClientSession() as session:
85 | start_time = time.time()
86 | self.LIMIT = limit
87 | url = self.BASE_URL
88 | return await self.parser_result(start_time, url, session)
89 |
--------------------------------------------------------------------------------
/torrents/pirate_bay.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.html_scraper import Scraper
6 | from constants.base_url import PIRATEBAY
7 |
8 |
9 | class PirateBay:
10 | _name = "Pirate Bay"
11 | def __init__(self):
12 | self.BASE_URL = PIRATEBAY
13 | self.LIMIT = None
14 |
15 | def _parser(self, htmls):
16 | try:
17 | for html in htmls:
18 | soup = BeautifulSoup(html, "html.parser")
19 |
20 | my_dict = {"data": []}
21 | for tr in soup.find_all("tr")[1:]:
22 | td = tr.find_all("td")
23 | try:
24 | name = td[1].find("a").text
25 | except:
26 | name = None
27 | if name:
28 | url = td[1].find("a")["href"]
29 | magnet = td[3].find_all("a")[0]["href"]
30 | size = td[4].text.strip()
31 | seeders = td[5].text
32 | leechers = td[6].text
33 | category = td[0].find_all("a")[0].text
34 | uploader = td[7].text
35 | dateUploaded = td[2].text
36 |
37 | my_dict["data"].append(
38 | {
39 | "name": name,
40 | "size": size,
41 | "seeders": seeders,
42 | "leechers": leechers,
43 | "category": category,
44 | "uploader": uploader,
45 | "url": url,
46 | "date": dateUploaded,
47 | "hash": re.search(
48 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
49 | ).group(0),
50 | "magnet": magnet,
51 | }
52 | )
53 | if len(my_dict["data"]) == self.LIMIT:
54 | break
55 | last_tr = soup.find_all("tr")[-1]
56 | potential_page_link = last_tr.find("td").find("a").href
57 | check_if_pagination_available = potential_page_link is not None and potential_page_link[:len("/search/")] == "/search/"
58 | if check_if_pagination_available:
59 | current_page = last_tr.find("td").find("b").text
60 | my_dict["current_page"] = int(current_page)
61 | my_dict["total_pages"] = int(
62 | last_tr.find("td").find_all("a")[-2].text
63 | )
64 | return my_dict
65 | except:
66 | return None
67 |
68 | async def search(self, query, page, limit):
69 | async with aiohttp.ClientSession() as session:
70 | start_time = time.time()
71 | self.LIMIT = limit
72 | url = self.BASE_URL + "/search/{}/{}/99/0".format(query, page)
73 | return await self.parser_result(start_time, url, session)
74 |
75 | async def parser_result(self, start_time, url, session):
76 | html = await Scraper().get_all_results(session, url)
77 | results = self._parser(html)
78 | if results is not None:
79 | results["time"] = time.time() - start_time
80 | results["total"] = len(results["data"])
81 | return results
82 | return results
83 |
84 | async def trending(self, category, page, limit):
85 | async with aiohttp.ClientSession() as session:
86 | start_time = time.time()
87 | self.LIMIT = limit
88 | url = self.BASE_URL + "/top/all"
89 | return await self.parser_result(start_time, url, session)
90 |
91 | async def recent(self, category, page, limit):
92 | async with aiohttp.ClientSession() as session:
93 | start_time = time.time()
94 | self.LIMIT = limit
95 | if not category:
96 | url = self.BASE_URL + "/recent"
97 | else:
98 | url = self.BASE_URL + "/{}/latest/".format(category)
99 | return await self.parser_result(start_time, url, session)
100 |
--------------------------------------------------------------------------------
/torrents/torlock.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import TORLOCK
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class Torlock:
13 | _name = "Tor Lock"
14 | def __init__(self):
15 | self.BASE_URL = TORLOCK
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj):
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | tm = soup.find_all("a")
26 | magnet = tm[20]["href"]
27 | torrent = tm[23]["href"]
28 | try:
29 | obj["poster"] = soup.find_all("img", class_="img-responsive")[
30 | 0
31 | ]["src"]
32 | except:
33 | ...
34 | if str(magnet).startswith("magnet") and str(torrent).endswith(
35 | "torrent"
36 | ):
37 | obj["torrent"] = torrent
38 | obj["magnet"] = magnet
39 | obj["hash"] = re.search(
40 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
41 | ).group(0)
42 | obj["category"] = tm[25].text
43 | imgs = soup.select(".tab-content img.img-fluid")
44 | if imgs and len(imgs) > 0:
45 | obj["screenshot"] = [img["src"] for img in imgs]
46 | else:
47 | del obj
48 | except IndexError:
49 | ...
50 | except:
51 | return None
52 |
53 | async def _get_torrent(self, result, session, urls):
54 | tasks = []
55 | for idx, url in enumerate(urls):
56 | for obj in result["data"]:
57 | if obj["url"] == url:
58 | task = asyncio.create_task(
59 | self._individual_scrap(session, url, result["data"][idx])
60 | )
61 | tasks.append(task)
62 | await asyncio.gather(*tasks)
63 | return result
64 |
65 | def _parser(self, htmls, idx=0):
66 | try:
67 | for html in htmls:
68 | soup = BeautifulSoup(html, "html.parser")
69 | list_of_urls = []
70 | my_dict = {"data": []}
71 |
72 | for tr in soup.find_all("tr")[idx:]:
73 | td = tr.find_all("td")
74 | if len(td) == 0:
75 | continue
76 | name = td[0].get_text(strip=True)
77 | if name != "":
78 | url = td[0].find("a")["href"]
79 | if url == "":
80 | break
81 | url = self.BASE_URL + url
82 | list_of_urls.append(url)
83 | size = td[2].get_text(strip=True)
84 | date = td[1].get_text(strip=True)
85 | seeders = td[3].get_text(strip=True)
86 | leechers = td[4].get_text(strip=True)
87 | my_dict["data"].append(
88 | {
89 | "name": name,
90 | "size": size,
91 | "date": date,
92 | "seeders": seeders,
93 | "leechers": leechers,
94 | "url": url,
95 | }
96 | )
97 | if len(my_dict["data"]) == self.LIMIT:
98 | break
99 | try:
100 | ul = soup.find("ul", class_="pagination")
101 | tpages = ul.find_all("a")[-2].text
102 | current_page = (
103 | (ul.find("li", class_="active")).find("span").text.split(" ")[0]
104 | )
105 | my_dict["current_page"] = int(current_page)
106 | my_dict["total_pages"] = int(tpages)
107 | except:
108 | my_dict["current_page"] = None
109 | my_dict["total_pages"] = None
110 | return my_dict, list_of_urls
111 | except:
112 | return None, None
113 |
114 | async def search(self, query, page, limit):
115 | async with aiohttp.ClientSession() as session:
116 | start_time = time.time()
117 | self.LIMIT = limit
118 | url = self.BASE_URL + "/all/torrents/{}.html?sort=seeds&page={}".format(
119 | query, page
120 | )
121 | return await self.parser_result(start_time, url, session, idx=5)
122 |
123 | async def parser_result(self, start_time, url, session, idx=0):
124 | htmls = await Scraper().get_all_results(session, url)
125 | result, urls = self._parser(htmls, idx)
126 | if result is not None:
127 | results = await self._get_torrent(result, session, urls)
128 | results["time"] = time.time() - start_time
129 | results["total"] = len(results["data"])
130 | return results
131 | return result
132 |
133 | async def trending(self, category, page, limit):
134 | async with aiohttp.ClientSession() as session:
135 | start_time = time.time()
136 | self.LIMIT = limit
137 | if not category:
138 | url = self.BASE_URL
139 | else:
140 | if category == "books":
141 | category = "ebooks"
142 | url = self.BASE_URL + "/{}.html".format(category)
143 | return await self.parser_result(start_time, url, session)
144 |
145 | async def recent(self, category, page, limit):
146 | async with aiohttp.ClientSession() as session:
147 | start_time = time.time()
148 | self.LIMIT = limit
149 | if not category:
150 | url = self.BASE_URL + "/fresh.html"
151 | else:
152 | if category == "books":
153 | category = "ebooks"
154 | url = self.BASE_URL + "/{}/{}/added/desc.html".format(category, page)
155 | return await self.parser_result(start_time, url, session)
156 |
157 | #! Maybe impelment Search By Category in Future
158 |
--------------------------------------------------------------------------------
/torrents/torrentProject.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 | import aiohttp
4 | import requests
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import TORRENTPROJECT
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class TorrentProject:
13 | _name = "Torrent Project"
14 | def __init__(self):
15 | self.BASE_URL = TORRENTPROJECT
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj, sem):
20 | async with sem:
21 | try:
22 | async with session.get(
23 | url,
24 | headers=HEADER_AIO,
25 | ) as res:
26 | html = await res.text(encoding="ISO-8859-1")
27 | soup = BeautifulSoup(html, "html.parser")
28 | try:
29 | magnet = soup.select_one(
30 | "#download > div:nth-child(2) > div > a"
31 | )["href"]
32 | index_of_magnet = magnet.index("magnet")
33 | magnet = requests.utils.unquote(magnet[index_of_magnet:])
34 | obj["magnet"] = magnet
35 | except:
36 | ...
37 | except:
38 | return None
39 |
40 | async def _get_torrent(self, result, session, urls):
41 | tasks = []
42 | sem = asyncio.Semaphore(3)
43 | for idx, url in enumerate(urls):
44 | for obj in result["data"]:
45 | if obj["url"] == url:
46 | task = asyncio.create_task(
47 | self._individual_scrap(session, url, result["data"][idx], sem)
48 | )
49 | tasks.append(task)
50 | await asyncio.gather(*tasks)
51 | return result
52 |
53 | def _parser(self, htmls):
54 | try:
55 | for html in htmls:
56 | soup = BeautifulSoup(html, "html.parser")
57 | list_of_urls = []
58 | my_dict = {"data": []}
59 | for div in soup.select("div#similarfiles div")[2:]:
60 | span = div.find_all("span")
61 | name = span[0].find("a").text
62 | url = self.BASE_URL + span[0].find("a")["href"]
63 | list_of_urls.append(url)
64 | seeders = span[2].text
65 | leechers = span[3].text
66 | date = span[4].text
67 | size = span[5].text
68 |
69 | my_dict["data"].append(
70 | {
71 | "name": name,
72 | "size": size,
73 | "date": date,
74 | "seeders": seeders,
75 | "leechers": leechers,
76 | "url": url,
77 | }
78 | )
79 | if len(my_dict["data"]) == self.LIMIT:
80 | break
81 | return my_dict, list_of_urls
82 | except:
83 | return None, None
84 |
85 | async def search(self, query, page, limit):
86 | async with aiohttp.ClientSession() as session:
87 | start_time = time.time()
88 | self.LIMIT = limit
89 | url = self.BASE_URL + "/?t={}&p={}".format(query, page - 1)
90 | return await self.parser_result(start_time, url, session)
91 |
92 | async def parser_result(self, start_time, url, session):
93 | htmls = await Scraper().get_all_results(session, url)
94 | result, urls = self._parser(htmls)
95 | if result is not None:
96 | results = await self._get_torrent(result, session, urls)
97 | results["time"] = time.time() - start_time
98 | results["total"] = len(results["data"])
99 | return results
100 | return result
101 |
--------------------------------------------------------------------------------
/torrents/torrent_galaxy.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.html_scraper import Scraper
6 | from constants.base_url import TGX
7 |
8 |
9 | class TorrentGalaxy:
10 | _name = "Torrent Galaxy"
11 | def __init__(self):
12 | self.BASE_URL = TGX
13 | self.LIMIT = None
14 |
15 | def _parser_individual(self, html):
16 | try:
17 | soup = BeautifulSoup(html[0], "html.parser")
18 | my_dict = {"data": []}
19 | root_div = soup.find("div", class_="gluewrapper")
20 | post_nd_torrents = root_div.find_next("div").find_all("div")
21 | poster = post_nd_torrents[1].find("img")["data-src"]
22 | torrentsand_all = post_nd_torrents[4].find_all("a")
23 | torrent_link = torrentsand_all[0]["href"]
24 | magnet_link = torrentsand_all[1]["href"]
25 | direct_link = self.BASE_URL + torrentsand_all[2]["href"]
26 |
27 | details_root = soup.find("div", class_="gluewrapper").select(
28 | "div > :nth-child(2) > div > .tprow"
29 | )
30 |
31 | name = details_root[0].find_all("div")[-1].get_text(strip=True)
32 | category = (
33 | details_root[3].find_all("div")[-1].get_text(strip=True).split(">")[0]
34 | )
35 | languagee = details_root[4].find_all("div")[-1].get_text(strip=True)
36 | size = details_root[5].find_all("div")[-1].get_text(strip=True)
37 | hash = details_root[6].find_all("div")[-1].get_text(strip=True)
38 | username = (
39 | details_root[7]
40 | .find_all("div")[-1]
41 | .find("span", class_="username")
42 | .get_text(strip=True)
43 | )
44 | date_up = details_root[8].find_all("div")[-1].get_text(strip=True)
45 |
46 | btns = details_root[10].find_all("button")
47 | seeders = btns[0].find("span").get_text(strip=True)
48 | leechers = btns[1].find("span").get_text(strip=True)
49 | downloads = btns[2].find("span").get_text(strip=True)
50 | imdb_id = soup.select_one("#imdbpage")["href"].split("/")[-1]
51 | genre_list = [
52 | x.get_text(strip=True) for x in details_root[11].find_all("a")
53 | ]
54 | soup.find("div", id="intblockslide").find_all("a")
55 | imgs = [
56 | img["href"]
57 | for img in (soup.find("div", id="intblockslide").find_all("a"))
58 | if img["href"].endswith((".png", ".jpg", ".jpeg"))
59 | ]
60 | my_dict["data"].append(
61 | {
62 | "name": name,
63 | "size": size,
64 | "seeders": seeders,
65 | "language": languagee,
66 | "leechers": leechers,
67 | "category": category,
68 | "uploader": username,
69 | "downloads": downloads,
70 | "poster": poster,
71 | "direct_download_link": direct_link,
72 | "imdb_id": imdb_id,
73 | "hash": hash,
74 | "magnet": magnet_link,
75 | "torrent": torrent_link,
76 | "screenshot": imgs,
77 | "genre": genre_list,
78 | "date": date_up,
79 | }
80 | )
81 | return my_dict
82 | except:
83 | return None
84 |
85 | def _parser(self, htmls):
86 | try:
87 | for html in htmls:
88 | soup = BeautifulSoup(html, "html.parser")
89 |
90 | my_dict = {"data": []}
91 | for idx, divs in enumerate(soup.find_all("div", class_="tgxtablerow")):
92 | div = divs.find_all("div")
93 | try:
94 | name = div[4].find("a").get_text(strip=True)
95 | imdb_url = (div[4].find_all("a"))[-1]["href"]
96 | except:
97 | name = (div[1].find("a", class_="txlight")).find("b").text
98 | imdb_url = (div[1].find_all("a"))[-1]["href"]
99 |
100 | if name != "":
101 | try:
102 | magnet = div[5].find_all("a")[1]["href"]
103 | torrent = div[5].find_all("a")[0]["href"]
104 | except:
105 | magnet = div[3].find_all("a")[1]["href"]
106 | torrent = div[3].find_all("a")[0]["href"]
107 | size = soup.select("span.badge.badge-secondary.txlight")[
108 | idx
109 | ].text
110 | try:
111 | url = div[4].find("a")["href"]
112 | except:
113 | url = div[1].find("a", class_="txlight")["href"]
114 | try:
115 | date = div[12].get_text(strip=True)
116 | except:
117 | date = div[10].get_text(strip=True)
118 | try:
119 | seeders_leechers = div[11].find_all("b")
120 | seeders = seeders_leechers[0].text
121 | leechers = seeders_leechers[1].text
122 | except:
123 | seeders_leechers = div[11].find_all("b")
124 | seeders = seeders_leechers[0].text
125 | leechers = seeders_leechers[1].text
126 | try:
127 | uploader = (div[7].find("a")).find("span").text
128 | except:
129 | uploader = (div[5].find("a")).find("span").text
130 | try:
131 | category = (
132 | div[0].find("small").text.replace(" ", "")
133 | ).split(":")[0]
134 | except:
135 | category = None
136 | my_dict["data"].append(
137 | {
138 | "name": name,
139 | "size": size,
140 | "seeders": seeders,
141 | "leechers": leechers,
142 | "category": category,
143 | "uploader": uploader,
144 | "imdb_id": imdb_url.split("=")[-1],
145 | "hash": re.search(
146 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
147 | ).group(0),
148 | "magnet": magnet,
149 | "torrent": torrent,
150 | "url": self.BASE_URL + url,
151 | "date": date,
152 | }
153 | )
154 | if len(my_dict["data"]) == self.LIMIT:
155 | break
156 | try:
157 | ul = soup.find_all("ul", class_="pagination")[-1]
158 | tpages = ul.find_all("li")[-2]
159 | my_dict["current_page"] = int(
160 | soup.select_one("li.page-item.active.txlight a").text.split(
161 | " "
162 | )[0]
163 | )
164 | my_dict["total_pages"] = int(tpages.find("a").text)
165 | except:
166 | my_dict["current_page"] = None
167 | my_dict["total_pages"] = None
168 | # ...
169 | return my_dict
170 | except:
171 | return None
172 |
173 | async def search(self, query, page, limit):
174 | async with aiohttp.ClientSession() as session:
175 | start_time = time.time()
176 | self.LIMIT = limit
177 | url = (
178 | self.BASE_URL
179 | + "/torrents.php?search=+{}&sort=seeders&order=desc&page={}".format(
180 | query, page - 1
181 | )
182 | )
183 | return await self.parser_result(start_time, url, session)
184 |
185 | async def get_torrent_by_url(self, torrent_url):
186 | async with aiohttp.ClientSession() as session:
187 | start_time = time.time()
188 | return await self.parser_result(
189 | start_time, torrent_url, session, is_individual=True
190 | )
191 |
192 | async def parser_result(self, start_time, url, session, is_individual=False):
193 | html = await Scraper().get_all_results(session, url)
194 | if is_individual:
195 | results = self._parser_individual(html)
196 | else:
197 | results = self._parser(html)
198 | if results is not None:
199 | results["time"] = time.time() - start_time
200 | results["total"] = len(results["data"])
201 | return results
202 | return results
203 |
204 | async def trending(self, category, page, limit):
205 | async with aiohttp.ClientSession() as session:
206 | start_time = time.time()
207 | self.LIMIT = limit
208 | url = self.BASE_URL
209 | return await self.parser_result(start_time, url, session)
210 |
211 | async def recent(self, category, page, limit):
212 | async with aiohttp.ClientSession() as session:
213 | start_time = time.time()
214 | self.LIMIT = limit
215 | if not category:
216 | url = self.BASE_URL + "/latest"
217 | else:
218 | if category == "documentaries":
219 | category = "Docus"
220 | url = (
221 | self.BASE_URL
222 | + "/torrents.php?parent_cat={}&sort=id&order=desc&page={}".format(
223 | str(category).capitalize(), page - 1
224 | )
225 | )
226 | return await self.parser_result(start_time, url, session)
227 |
228 | #! Maybe Implemented in Future
229 |
--------------------------------------------------------------------------------
/torrents/torrentfunk.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
6 | from helper.html_scraper import Scraper
7 | from constants.base_url import TORRENTFUNK
8 | from constants.headers import HEADER_AIO
9 |
10 |
11 | class TorrentFunk:
12 | _name = "Torrent Funk"
13 | def __init__(self):
14 | self.BASE_URL = TORRENTFUNK
15 | self.LIMIT = None
16 |
17 | @decorator_asyncio_fix
18 | async def _individual_scrap(self, session, url, obj):
19 | try:
20 | async with session.get(url, headers=HEADER_AIO) as res:
21 | html = await res.text(encoding="ISO-8859-1")
22 | soup = BeautifulSoup(html, "html.parser")
23 | try:
24 | obj["torrent"] = soup.select_one(
25 | "#right > main > div.content > table:nth-child(3) > tr > td:nth-child(2) > a"
26 | )["href"]
27 | obj["category"] = soup.select_one(
28 | "#right > main > div.content > table:nth-child(7) > tr> td:nth-child(2) > a"
29 | ).text
30 | obj["hash"] = soup.select_one(
31 | "#right > main > div.content > table:nth-child(7) > tr:nth-child(3) > td:nth-child(2)"
32 | ).text
33 | except:
34 | ...
35 | except:
36 | return None
37 |
38 | async def _get_torrent(self, result, session, urls):
39 | tasks = []
40 | for idx, url in enumerate(urls):
41 | for obj in result["data"]:
42 | if obj["url"] == url:
43 | task = asyncio.create_task(
44 | self._individual_scrap(session, url, result["data"][idx])
45 | )
46 | tasks.append(task)
47 | await asyncio.gather(*tasks)
48 | return result
49 |
50 | def _parser(self, htmls, idx=1):
51 | try:
52 | for html in htmls:
53 | soup = BeautifulSoup(html, "html.parser")
54 | list_of_urls = []
55 | my_dict = {"data": []}
56 |
57 | for tr in soup.select(".tmain tr")[idx:]:
58 | td = tr.find_all("td")
59 | if len(td) == 0:
60 | continue
61 | name = td[0].find("a").text
62 | date = td[1].text
63 | size = td[2].text
64 | seeders = td[3].text
65 | leechers = td[4].text
66 | uploader = td[5].text
67 | url = self.BASE_URL + td[0].find("a")["href"]
68 | list_of_urls.append(url)
69 | my_dict["data"].append(
70 | {
71 | "name": name,
72 | "size": size,
73 | "date": date,
74 | "seeders": seeders,
75 | "leechers": leechers,
76 | "uploader": uploader if uploader else None,
77 | "url": url,
78 | }
79 | )
80 | if len(my_dict["data"]) == self.LIMIT:
81 | break
82 | return my_dict, list_of_urls
83 | except:
84 | return None, None
85 |
86 | async def search(self, query, page, limit):
87 | async with aiohttp.ClientSession() as session:
88 | start_time = time.time()
89 | self.LIMIT = limit
90 | url = self.BASE_URL + "/all/torrents/{}/{}.html".format(query, page)
91 | return await self.parser_result(start_time, url, session, idx=6)
92 |
93 | async def parser_result(self, start_time, url, session, idx=1):
94 | htmls = await Scraper().get_all_results(session, url)
95 | result, urls = self._parser(htmls, idx)
96 | if result:
97 | results = await self._get_torrent(result, session, urls)
98 | results["time"] = time.time() - start_time
99 | results["total"] = len(results["data"])
100 | return results
101 | return result
102 |
103 | async def trending(self, category, page, limit):
104 | async with aiohttp.ClientSession() as session:
105 | start_time = time.time()
106 | self.LIMIT = limit
107 | url = self.BASE_URL
108 | return await self.parser_result(start_time, url, session)
109 |
110 | async def recent(self, category, page, limit):
111 | async with aiohttp.ClientSession() as session:
112 | start_time = time.time()
113 | self.LIMIT = limit
114 | if not category:
115 | url = self.BASE_URL + "/movies/recent.html"
116 | else:
117 | if category == "apps":
118 | category = "software"
119 | elif category == "tv":
120 | category = "television"
121 | elif category == "books":
122 | category = "ebooks"
123 | url = self.BASE_URL + "/{}/recent.html".format(category)
124 | return await self.parser_result(start_time, url, session)
125 |
--------------------------------------------------------------------------------
/torrents/x1337.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import X1337
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class x1337:
13 | _name = "1337x"
14 | def __init__(self):
15 | self.BASE_URL = X1337
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj):
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | magnet = soup.select_one(".no-top-radius > div > ul > li > a")[
26 | "href"
27 | ]
28 | uls = soup.find_all("ul", class_="list")[1]
29 | lis = uls.find_all("li")[0]
30 | imgs = [
31 | img["data-original"]
32 | for img in (soup.find("div", id="description")).find_all("img")
33 | if img["data-original"].endswith((".png", ".jpg", ".jpeg"))
34 | ]
35 | files = [
36 | f.text for f in soup.find("div", id="files").find_all("li")
37 | ]
38 | if len(imgs) > 0:
39 | obj["screenshot"] = imgs
40 | obj["category"] = lis.find("span").text
41 | obj["files"] = files
42 | try:
43 | poster = soup.select_one("div.torrent-image img")["src"]
44 | if str(poster).startswith("//"):
45 | obj["poster"] = "https:" + poster
46 | elif str(poster).startswith("/"):
47 | obj["poster"] = self.BASE_URL + poster
48 | except:
49 | ...
50 | obj["magnet"] = magnet
51 |
52 | obj["hash"] = re.search(
53 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
54 | ).group(0)
55 | except IndexError:
56 | ...
57 | except:
58 | return None
59 |
60 | async def _get_torrent(self, result, session, urls):
61 | tasks = []
62 | for idx, url in enumerate(urls):
63 | for obj in result["data"]:
64 | if obj["url"] == url:
65 | task = asyncio.create_task(
66 | self._individual_scrap(session, url, result["data"][idx])
67 | )
68 | tasks.append(task)
69 | await asyncio.gather(*tasks)
70 | return result
71 |
72 | def _parser(self, htmls):
73 | try:
74 | for html in htmls:
75 | soup = BeautifulSoup(html, "html.parser")
76 | list_of_urls = []
77 | my_dict = {"data": []}
78 | trs = soup.select("tbody tr")
79 | for tr in trs:
80 | td = tr.find_all("td")
81 | name = td[0].find_all("a")[-1].text
82 | if name:
83 | url = self.BASE_URL + td[0].find_all("a")[-1]["href"]
84 | list_of_urls.append(url)
85 | seeders = td[1].text
86 | leechers = td[2].text
87 | date = td[3].text
88 | size = td[4].text.replace(seeders, "")
89 | uploader = td[5].find("a").text
90 |
91 | my_dict["data"].append(
92 | {
93 | "name": name,
94 | "size": size,
95 | "date": date,
96 | "seeders": seeders,
97 | "leechers": leechers,
98 | "url": url,
99 | "uploader": uploader,
100 | }
101 | )
102 | if len(my_dict["data"]) == self.LIMIT:
103 | break
104 | try:
105 | pages = soup.select(".pagination li a")
106 | my_dict["current_page"] = int(pages[0].text)
107 | tpages = pages[-1].text
108 | if tpages == ">>":
109 | my_dict["total_pages"] = int(pages[-2].text)
110 | else:
111 | my_dict["total_pages"] = int(pages[-1].text)
112 | except:
113 | ...
114 | return my_dict, list_of_urls
115 | except:
116 | return None, None
117 |
118 | async def search(self, query, page, limit):
119 | async with aiohttp.ClientSession() as session:
120 | self.LIMIT = limit
121 | start_time = time.time()
122 | url = self.BASE_URL + "/search/{}/{}/".format(query, page)
123 | return await self.parser_result(
124 | start_time, url, session, query=query, page=page
125 | )
126 |
127 | async def parser_result(self, start_time, url, session, page, query=None):
128 | htmls = await Scraper().get_all_results(session, url)
129 | result, urls = self._parser(htmls)
130 | if result is not None:
131 | results = await self._get_torrent(result, session, urls)
132 | results["time"] = time.time() - start_time
133 | results["total"] = len(results["data"])
134 | if query is None:
135 | return results
136 | while True:
137 | if len(results["data"]) >= self.LIMIT:
138 | results["data"] = results["data"][0 : self.LIMIT]
139 | results["total"] = len(results["data"])
140 | return results
141 | page = page + 1
142 | url = self.BASE_URL + "/search/{}/{}/".format(query, page)
143 | htmls = await Scraper().get_all_results(session, url)
144 | result, urls = self._parser(htmls)
145 | if result is not None:
146 | if len(result["data"]) > 0:
147 | res = await self._get_torrent(result, session, urls)
148 | for obj in res["data"]:
149 | results["data"].append(obj)
150 | try:
151 | results["current_page"] = res["current_page"]
152 | except:
153 | ...
154 | results["time"] = time.time() - start_time
155 | results["total"] = len(results["data"])
156 | else:
157 | break
158 | else:
159 | break
160 | return results
161 | return result
162 |
163 | async def trending(self, category, page, limit):
164 | async with aiohttp.ClientSession() as session:
165 | start_time = time.time()
166 | self.LIMIT = limit
167 | if not category:
168 | url = self.BASE_URL + "/home/"
169 | else:
170 | url = self.BASE_URL + "/popular-{}".format(category.lower())
171 | return await self.parser_result(start_time, url, session, page)
172 |
173 | async def recent(self, category, page, limit):
174 | async with aiohttp.ClientSession() as session:
175 | start_time = time.time()
176 | self.LIMIT = limit
177 | if not category:
178 | url = self.BASE_URL + "/trending"
179 | else:
180 | url = self.BASE_URL + "/cat/{}/{}/".format(
181 | str(category).capitalize(), page
182 | )
183 | return await self.parser_result(start_time, url, session, page)
184 |
185 | async def search_by_category(self, query, category, page, limit):
186 | async with aiohttp.ClientSession() as session:
187 | start_time = time.time()
188 | self.LIMIT = limit
189 | url = self.BASE_URL + "/category-search/{}/{}/{}/".format(
190 | query, category.capitalize(), page
191 | )
192 | return await self.parser_result(start_time, url, session, page, query)
193 |
--------------------------------------------------------------------------------
/torrents/your_bittorrent.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
6 | from helper.html_scraper import Scraper
7 | from constants.base_url import YOURBITTORRENT
8 | from constants.headers import HEADER_AIO
9 |
10 |
11 | class YourBittorrent:
12 | _name = "Your BitTorrent"
13 | def __init__(self):
14 | self.BASE_URL = YOURBITTORRENT
15 | self.LIMIT = None
16 |
17 | @decorator_asyncio_fix
18 | async def _individual_scrap(self, session, url, obj):
19 | try:
20 | async with session.get(url, headers=HEADER_AIO) as res:
21 | html = await res.text(encoding="ISO-8859-1")
22 | soup = BeautifulSoup(html, "html.parser")
23 | try:
24 | container = soup.select_one("div.card-body.container")
25 | poster = (
26 | container.find("div")
27 | .find_all("div")[0]
28 | .find("picture")
29 | .find("img")["src"]
30 | )
31 | clearfix = soup.find("div", class_="clearfix")
32 | torrent = clearfix.find("div").find_all("div")[1].find("a")["href"]
33 | obj["torrent"] = torrent
34 | obj["poster"] = poster
35 | except:
36 | ...
37 | except:
38 | return None
39 |
40 | async def _get_torrent(self, result, session, urls):
41 | tasks = []
42 | for idx, url in enumerate(urls):
43 | for obj in result["data"]:
44 | if obj["url"] == url:
45 | task = asyncio.create_task(
46 | self._individual_scrap(session, url, result["data"][idx])
47 | )
48 | tasks.append(task)
49 | await asyncio.gather(*tasks)
50 | return result
51 |
52 | def _parser(self, htmls, idx=1):
53 | try:
54 | for html in htmls:
55 | soup = BeautifulSoup(html, "html.parser")
56 | list_of_urls = []
57 | my_dict = {"data": []}
58 |
59 | for tr in soup.find_all("tr")[idx:]:
60 | td = tr.find_all("td")
61 | name = td[1].find("a").get_text(strip=True)
62 | url = self.BASE_URL + td[1].find("a")["href"]
63 | list_of_urls.append(url)
64 | size = td[2].text
65 | date = td[3].text
66 | seeders = td[4].text
67 | leechers = td[5].text
68 | my_dict["data"].append(
69 | {
70 | "name": name,
71 | "size": size,
72 | "date": date,
73 | "seeders": seeders,
74 | "leechers": leechers,
75 | "url": url,
76 | }
77 | )
78 | if len(my_dict["data"]) == self.LIMIT:
79 | break
80 | return my_dict, list_of_urls
81 | except:
82 | return None, None
83 |
84 | async def search(self, query, page, limit):
85 | async with aiohttp.ClientSession() as session:
86 | start_time = time.time()
87 | self.LIMIT = limit
88 | url = self.BASE_URL + "/?v=&c=&q={}".format(query)
89 | return await self.parser_result(start_time, url, session, idx=6)
90 |
91 | async def parser_result(self, start_time, url, session, idx=1):
92 | htmls = await Scraper().get_all_results(session, url)
93 | result, urls = self._parser(htmls, idx)
94 | if result is not None:
95 | results = await self._get_torrent(result, session, urls)
96 | results["time"] = time.time() - start_time
97 | results["total"] = len(results["data"])
98 | return results
99 | return result
100 |
101 | async def trending(self, category, page, limit):
102 | async with aiohttp.ClientSession() as session:
103 | start_time = time.time()
104 | self.LIMIT = limit
105 | idx = None
106 | if not category:
107 | url = self.BASE_URL + "/top.html"
108 | idx = 1
109 | else:
110 | if category == "books":
111 | category = "ebooks"
112 | url = self.BASE_URL + f"/{category}.html"
113 | idx = 4
114 | return await self.parser_result(start_time, url, session, idx)
115 |
116 | async def recent(self, category, page, limit):
117 | async with aiohttp.ClientSession() as session:
118 | start_time = time.time()
119 | self.LIMIT = limit
120 | idx = None
121 | if not category:
122 | url = self.BASE_URL + "/new.html"
123 | idx = 1
124 | else:
125 | if category == "books":
126 | category = "ebooks"
127 | url = self.BASE_URL + f"/{category}/latest.html"
128 | idx = 4
129 | return await self.parser_result(start_time, url, session, idx)
130 |
--------------------------------------------------------------------------------
/torrents/yts.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import re
3 | import time
4 | import aiohttp
5 | from bs4 import BeautifulSoup
6 | from helper.asyncioPoliciesFix import decorator_asyncio_fix
7 | from helper.html_scraper import Scraper
8 | from constants.base_url import YTS
9 | from constants.headers import HEADER_AIO
10 |
11 |
12 | class Yts:
13 | _name = "YTS"
14 | def __init__(self):
15 | self.BASE_URL = YTS
16 | self.LIMIT = None
17 |
18 | @decorator_asyncio_fix
19 | async def _individual_scrap(self, session, url, obj):
20 | try:
21 | async with session.get(url, headers=HEADER_AIO) as res:
22 | html = await res.text(encoding="ISO-8859-1")
23 | soup = BeautifulSoup(html, "html.parser")
24 | try:
25 | name = soup.select_one("div.hidden-xs h1").text
26 | div = soup.select("div.hidden-xs h2")
27 | date = div[0].text
28 | genre = div[1].text.split("/")
29 | rating = soup.select_one("[itemprop=ratingValue]").text
30 | poster = (
31 | soup.find("div", id="movie-poster")
32 | .find("img")["src"]
33 | .split("/")
34 | )
35 | poster[-1] = poster[-1].replace("medium", "large")
36 | poster = "/".join(poster)
37 | description = soup.select("div#synopsis > p")[0].text.strip()
38 | runtime = (
39 | soup.select_one(".tech-spec-info")
40 | .find_all("div", class_="row")[-1]
41 | .find_all("div")[-3]
42 | .text.strip()
43 | )
44 |
45 | screenshots = soup.find_all("a", class_="screenshot-group")
46 | screenshots = [a["href"] for a in screenshots]
47 | torrents = []
48 | for div in soup.find_all("div", class_="modal-torrent"):
49 | quality = (
50 | div.find("div", class_="modal-quality").find("span").text
51 | )
52 | all_p = div.find_all("p", class_="quality-size")
53 | quality_type = all_p[0].text
54 | size = all_p[1].text
55 | torrent_link = div.find("a", class_="download-torrent")["href"]
56 | magnet = div.find("a", class_="magnet-download")["href"]
57 | hash = re.search(r"([{a-f\d,A-F\d}]{32,40})\b", magnet).group(0)
58 | torrents.append(
59 | {
60 | "quality": quality,
61 | "type": quality_type,
62 | "size": size,
63 | "torrent": torrent_link,
64 | "magnet": magnet,
65 | "hash": hash,
66 | }
67 | )
68 | obj["name"] = name
69 | obj["date"] = date
70 | obj["genre"] = genre
71 | obj["rating"] = rating
72 | obj["poster"] = poster
73 | obj["description"] = description
74 | obj["runtime"] = runtime
75 | obj["screenshot"] = screenshots
76 | obj["torrents"] = torrents
77 | except:
78 | ...
79 | except:
80 | return None
81 |
82 | async def _get_torrent(self, result, session, urls):
83 | tasks = []
84 | for idx, url in enumerate(urls):
85 | for obj in result["data"]:
86 | if obj["url"] == url:
87 | task = asyncio.create_task(
88 | self._individual_scrap(session, url, result["data"][idx])
89 | )
90 | tasks.append(task)
91 | await asyncio.gather(*tasks)
92 | return result
93 |
94 | def _parser(self, htmls):
95 | try:
96 | for html in htmls:
97 | soup = BeautifulSoup(html, "html.parser")
98 | list_of_urls = []
99 | my_dict = {"data": []}
100 | for div in soup.find_all("div", class_="browse-movie-wrap"):
101 | url = div.find("a")["href"]
102 | list_of_urls.append(url)
103 | my_dict["data"].append({"url": url})
104 | if len(my_dict["data"]) == self.LIMIT:
105 | break
106 | try:
107 | ul = soup.find("ul", class_="tsc_pagination")
108 | current_page = ul.find("a", class_="current").text
109 | my_dict["current_page"] = int(current_page)
110 | if current_page:
111 | total_results = soup.select_one(
112 | "body > div.main-content > div.browse-content > div > h2 > b"
113 | ).text
114 | if "," in total_results:
115 | total_results = total_results.replace(",", "")
116 | total_page = int(total_results) / 20
117 | my_dict["total_pages"] = (
118 | int(total_page) + 1
119 | if type(total_page) == float
120 | else int(total_page)
121 | )
122 |
123 | except:
124 | ...
125 | return my_dict, list_of_urls
126 | except:
127 | return None, None
128 |
129 | async def search(self, query, page, limit):
130 | async with aiohttp.ClientSession() as session:
131 | start_time = time.time()
132 | self.LIMIT = limit
133 | if page != 1:
134 | url = (
135 | self.BASE_URL
136 | + "/browse-movies/{}/all/all/0/latest/0/all?page={}".format(
137 | query, page
138 | )
139 | )
140 | else:
141 | url = self.BASE_URL + "/browse-movies/{}/all/all/0/latest/0/all".format(
142 | query
143 | )
144 | return await self.parser_result(start_time, url, session)
145 |
146 | async def parser_result(self, start_time, url, session):
147 | htmls = await Scraper().get_all_results(session, url)
148 | result, urls = self._parser(htmls)
149 | if result is not None:
150 | results = await self._get_torrent(result, session, urls)
151 | results["time"] = time.time() - start_time
152 | results["total"] = len(results["data"])
153 | return results
154 | return result
155 |
156 | async def trending(self, category, page, limit):
157 | async with aiohttp.ClientSession() as session:
158 | start_time = time.time()
159 | self.LIMIT = limit
160 | url = self.BASE_URL + "/trending-movies"
161 | return await self.parser_result(start_time, url, session)
162 |
163 | async def recent(self, category, page, limit):
164 | async with aiohttp.ClientSession() as session:
165 | start_time = time.time()
166 | self.LIMIT = limit
167 | if page != 1:
168 | url = (
169 | self.BASE_URL
170 | + "/browse-movies/0/all/all/0/featured/0/all?page={}".format(page)
171 | )
172 | else:
173 | url = self.BASE_URL + "/browse-movies/0/all/all/0/featured/0/all"
174 | return await self.parser_result(start_time, url, session)
175 |
--------------------------------------------------------------------------------
/torrents/zooqle.py:
--------------------------------------------------------------------------------
1 | import re
2 | import time
3 | import aiohttp
4 | from bs4 import BeautifulSoup
5 | from helper.html_scraper import Scraper
6 | from constants.base_url import ZOOQLE
7 |
8 |
9 | class Zooqle:
10 | _name = "Zooqle"
11 | def __init__(self):
12 | self.BASE_URL = ZOOQLE
13 | self.LIMIT = None
14 |
15 | def _parser(self, htmls):
16 | try:
17 | for html in htmls:
18 | soup = BeautifulSoup(html, "html.parser")
19 |
20 | my_dict = {"data": []}
21 |
22 | for tr in soup.find_all("tr")[1:]:
23 | td = tr.find_all("td")
24 | name = td[1].find("a").get_text(strip=True)
25 | if name != "":
26 | magnet = td[2].find_all("a")[1]["href"]
27 | try:
28 | size = td[3].find_all("div")[1].text
29 | except IndexError:
30 | size = None
31 | url = td[1].find_all("a")[0]["href"]
32 | date = td[4].get_text(strip=True)
33 | seeders_leechers = td[5].find("div")["title"].split("|")
34 | seeders = seeders_leechers[0].replace("Seeders: ", "").strip()
35 | leechers = seeders_leechers[1].replace("Leechers: ", "").strip()
36 | my_dict["data"].append(
37 | {
38 | "name": name,
39 | "size": size,
40 | "seeders": seeders,
41 | "leechers": leechers,
42 | "hash": re.search(
43 | r"([{a-f\d,A-F\d}]{32,40})\b", magnet
44 | ).group(0),
45 | "magnet": magnet,
46 | "url": self.BASE_URL + url,
47 | "date": date,
48 | }
49 | )
50 | if len(my_dict["data"]) == self.LIMIT:
51 | break
52 | try:
53 | ul = soup.find("ul", class_="pagination")
54 | tpages = ul.find_all("a")[-3].text
55 | current_page = (ul.find("li", class_="active")).find("a").text
56 | my_dict["current_page"] = int(current_page)
57 | my_dict["total_pages"] = int(tpages)
58 | except:
59 | my_dict["current_page"] = None
60 | my_dict["total_pages"] = None
61 | return my_dict
62 | except:
63 | return None
64 |
65 | async def search(self, query, page, limit):
66 | async with aiohttp.ClientSession() as session:
67 | start_time = time.time()
68 | self.LIMIT = limit
69 | url = self.BASE_URL + "/search?pg={1}&q={0}&v=t".format(query, page)
70 | return await self.parser_result(start_time, url, session)
71 |
72 | async def parser_result(self, start_time, url, session):
73 | html = await Scraper().get_all_results(session, url)
74 | results = self._parser(html)
75 | if results is not None:
76 | results["time"] = time.time() - start_time
77 | results["total"] = len(results["data"])
78 | return results
79 | return results
80 |
--------------------------------------------------------------------------------