├── .gitignore
├── Downloads
└── readme.txt
├── README.md
├── main.py
├── LICENSE
└── persian.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Project exclude paths
2 | /venv/
--------------------------------------------------------------------------------
/Downloads/readme.txt:
--------------------------------------------------------------------------------
1 | Dont DELETE this directory
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # persian_subdl
2 | this script can download all persian subtitles from worldsubtitle.info
3 |
4 | for use this script you should
5 |
6 | for use this script you should give this command to your terminal or cmd:
7 |
8 |
9 | pip3 install requests
10 | pip3 install bs4
11 |
12 |
13 | after this you can run this script
14 |
15 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 | from zipfile import ZipFile
4 | import os
5 | from persian import PERSSIAN
6 |
7 |
8 | FILM = input("write film or series name \n")
9 | LINKSss = PERSSIAN(FILM)
10 |
11 |
12 | def download_url(url, save_path, chunk_size=128):
13 | r = requests.get(url, stream=True)
14 | with open(save_path, 'wb') as fd:
15 | for chunk in r.iter_content(chunk_size=chunk_size):
16 | fd.write(chunk)
17 |
18 |
19 |
20 | for LINKk in LINKSss:
21 | try:
22 | NAME = []
23 | NAME.append(LINKk.split('/' ))
24 | NAME = NAME[0][-1]
25 | print(NAME)
26 | Downloaddirectory =f"Downloads/{NAME}"
27 | download_url(LINKk,Downloaddirectory)
28 | with ZipFile(Downloaddirectory, 'r') as zipObj:
29 | zipObj.extractall("Downloads/")
30 | os.remove(Downloaddirectory)
31 |
32 | except:
33 | print(NAME + " Failed")
34 |
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Nima
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/persian.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 |
4 |
5 | def PERSSIAN(FILM):
6 | REQ = requests.get("https://worldsubtitle.info/?s={0}".format(FILM)).text
7 | try:
8 | LINK = BeautifulSoup(REQ, 'html.parser').find_all(title=FILM)
9 | ASLI = LINK[0]
10 |
11 | except:
12 | print('\nsorry i cant find it plz choose number of these')
13 | LINK = BeautifulSoup(REQ, 'html.parser').find_all(class_="cat-post-titel")
14 | for i in range(0, len(LINK)):
15 | print(f"{i + 1} : {LINK[i].string}\n")
16 | CHoosed = int(input("\nchoose number! \n"))
17 | FILM = LINK[CHoosed - 1].string
18 | print(FILM)
19 | ASLI = LINK[CHoosed - 1]
20 | ASLI = BeautifulSoup(f'{ASLI}', 'html.parser').find_all('a')[0]
21 |
22 | LINKPAGE = ASLI.get('href')
23 | print(LINKPAGE)
24 | REQ2 = requests.get(LINKPAGE).text
25 | DOWNLOADLIST = BeautifulSoup(REQ2, 'html.parser').find_all(class_="new-link-3")
26 | LINKSss = []
27 | for DOWNLOAD in DOWNLOADLIST:
28 | try:
29 | bs = BeautifulSoup(f'{DOWNLOAD}', 'html.parser').find_all('a')
30 | LINKSss.append(bs[0].get("href"))
31 | except:
32 | pass
33 | return LINKSss
--------------------------------------------------------------------------------