├── .gitattributes ├── .gitignore ├── LICENSE ├── Procfile ├── README.md ├── app.json ├── requirements.txt └── udemy ├── __init__.py ├── __main__.py ├── bot ├── __init__.py └── bot.py └── duce ├── __init__.py └── duce.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | *.session 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .nox/ 42 | .coverage 43 | .coverage.* 44 | .cache 45 | nosetests.xml 46 | coverage.xml 47 | *.cover 48 | .hypothesis/ 49 | .pytest_cache/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | db.sqlite3 59 | 60 | # Flask stuff: 61 | instance/ 62 | .webassets-cache 63 | 64 | # Scrapy stuff: 65 | .scrapy 66 | 67 | # Sphinx documentation 68 | docs/_build/ 69 | 70 | # PyBuilder 71 | target/ 72 | 73 | # Jupyter Notebook 74 | .ipynb_checkpoints 75 | 76 | # IPython 77 | profile_default/ 78 | ipython_config.py 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # SageMath parsed files 87 | *.sage.py 88 | 89 | # Environments 90 | .env 91 | .venv 92 | env/ 93 | venv/ 94 | ENV/ 95 | env.bak/ 96 | venv.bak/ 97 | 98 | # Spyder project settings 99 | .spyderproject 100 | .spyproject 101 | 102 | # Rope project settings 103 | .ropeproject 104 | 105 | # mkdocs documentation 106 | /site 107 | 108 | # mypy 109 | .mypy_cache/ 110 | .dmypy.json 111 | dmypy.json 112 | 113 | # Pyre type checker 114 | .pyre/ 115 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 GautamKumar 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | worker: python3 -m udemy -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ⚡ UdemyBot 😋 [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fgithub.com%2Fgautamajay52%2FUdemyBot&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://github.com/gautamajay52/UdemyBot) 2 | 3 | 4 | ## ⚡ Vars: 🔥 5 | 6 | * `API_HASH`: Get one from [HERE](https://my.telegram.org/apps) 7 | * `API_ID`: Get one from [HERE](https://my.telegram.org/apps) 8 | * `TOKEN`: From BotFather 9 | 10 | 11 | ## ⚡ Steps: 🔥 12 | 13 | 1) Tap on [![Deploy](https://www.herokucdn.com/deploy/button.svg)](https://heroku.com/deploy) 14 | 2) Fill all the details. 15 | 3) Turn on worker. 16 | 4) Enjoy 👌 17 | 18 | ## ⚡ ToDo: 🔥 19 | 20 | - [ ] Automatic Enroller. 21 | 22 | # ⚡ Credits: 🌎 23 | * [GautamKumar(me)](https://github.com/gautamajay52) for [Nothing](https://github.com/gautamajay52/UdemyBot)😬😁 -------------------------------------------------------------------------------- /app.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "UdemyBot", 3 | "description": "UdemyBot - A Simple Udemy Free Courses Scrapper", 4 | "keywords": [ 5 | "telegram", 6 | "github", 7 | "udemy" 8 | ], 9 | "repository": "https://github.com/gautamajay52/UdemyBot", 10 | "success_url": "https://t.me/gbotstoresupport", 11 | "website": "https://github.com/gautamajay52", 12 | "env": { 13 | "API_HASH": { 14 | "description": "Get one from https://my.telegram.org/apps", 15 | "value": "" 16 | }, 17 | "API_ID": { 18 | "description": "Get one from https://my.telegram.org/apps", 19 | "value": "" 20 | }, 21 | "TOKEN": { 22 | "description": "From BotFather", 23 | "value": "" 24 | } 25 | } 26 | } -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp 2 | telethon 3 | cryptg 4 | BeautifulSoup4 5 | bs4 6 | html5lib -------------------------------------------------------------------------------- /udemy/__init__.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | import os 6 | 7 | token = os.environ.get('TOKEN') 8 | api_id = os.environ.get('API_ID') 9 | api_hash = os.environ.get('API_HASH') 10 | 11 | START = """ 12 | Hey, I'm an UdemyBot. ⚡ 13 | 14 | I can send you free Udemy Courses Links. 15 | 16 | Commands: 17 | /discudemy page 18 | /udemy_freebies page 19 | /tutorialbar page 20 | /real_discount page 21 | /coursevania 22 | /idcoupons page 23 | 24 | page - which page you wanted to scrap and send links. Default is 1 25 | """ 26 | 27 | CMD = "(discudemy|coursevania|udemy_freebies|tutorialbar|real_discount|idcoupons)" 28 | -------------------------------------------------------------------------------- /udemy/__main__.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | from telethon import events 6 | 7 | from udemy import CMD, START 8 | from udemy.bot import UdemyBOT 9 | from udemy.duce import Scrapper 10 | 11 | bot = UdemyBOT() 12 | 13 | 14 | @bot.on(events.NewMessage(func=lambda e: e.is_private, pattern="/(start|help)")) 15 | async def _(event): 16 | await event.reply(START) 17 | 18 | 19 | @bot.on(events.NewMessage(func=lambda e: e.is_private, pattern=f"/{CMD}")) 20 | async def _(event): 21 | text = event.raw_text 22 | _cmd = text.split(" ", maxsplit=1) 23 | cmd = _cmd[0] 24 | page = "" 25 | if len(_cmd) == 2: 26 | page = _cmd[1] 27 | scp = Scrapper() 28 | msg = await event.reply("Wait...") 29 | if cmd == "/discudemy": 30 | if not page: 31 | page = 1 32 | links = await scp.discudemy(page) 33 | elif cmd == "/udemy_freebies": 34 | if not page: 35 | page = 1 36 | links = await scp.udemy_freebies(page) 37 | elif cmd == "/tutorialbar": 38 | if not page: 39 | page = 1 40 | links = await scp.tutorialbar(page) 41 | elif cmd == "/real_discount": 42 | if not page: 43 | page = 1 44 | links = await scp.real_discount(page) 45 | elif cmd == "/coursevania": 46 | links = await scp.coursevania() 47 | elif cmd == "/idcoupons": 48 | if not page: 49 | page = 1 50 | links = await scp.idcoupons(page) 51 | 52 | if not links: 53 | await msg.edit("No Free Courses Available 😞") 54 | return 55 | 56 | mg = "" 57 | for link in links: 58 | for lin in link: 59 | mg += f"{lin}\n" 60 | await event.reply(mg, link_preview=False) 61 | mg = "" 62 | await msg.delete() 63 | 64 | 65 | if __name__ == "__main__": 66 | bot.start_() 67 | -------------------------------------------------------------------------------- /udemy/bot/__init__.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | from udemy.bot.bot import UdemyBOT -------------------------------------------------------------------------------- /udemy/bot/bot.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | import asyncio 6 | 7 | from telethon import TelegramClient 8 | from udemy import api_hash, api_id, token 9 | 10 | 11 | class UdemyBOT(TelegramClient): 12 | def __init__(self): 13 | super().__init__("udemybot", api_id=api_id, api_hash=api_hash) 14 | 15 | async def __start(self): 16 | await super().start(bot_token=token) 17 | me = await self.get_me() 18 | print(f"<<< UdemyBot: Started at @{me.username} >>>\n") 19 | await super().run_until_disconnected() 20 | 21 | def start_(self): 22 | loop = asyncio.get_event_loop() 23 | loop.run_until_complete(self.__start()) 24 | -------------------------------------------------------------------------------- /udemy/duce/__init__.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | from udemy.duce.duce import Scrapper -------------------------------------------------------------------------------- /udemy/duce/duce.py: -------------------------------------------------------------------------------- 1 | # UdemyBot - A Simple Udemy Free Courses Scrapper 2 | 3 | # Copyright (C) 2021-Present Gautam Kumar 4 | 5 | 6 | import json 7 | import re 8 | from urllib.parse import unquote 9 | 10 | import aiohttp 11 | from bs4 import BeautifulSoup as bs 12 | from yarl import URL 13 | 14 | # this code/idea has taken from https://github.com/techtanic/Discounted-Udemy-Course-Enroller 15 | 16 | class Scrapper: 17 | """Udemy Free Courses Scrapper""" 18 | 19 | def __init__(self) -> None: 20 | self.head = { 21 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77", 22 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", 23 | } 24 | self.session = aiohttp.ClientSession 25 | 26 | async def __fetch_html(self, session: aiohttp.ClientSession, url) -> str: 27 | async with session.get(url) as response: 28 | return await response.text() 29 | 30 | async def __fetch_json(self, session: aiohttp.ClientSession, url) -> any: 31 | async with session.get(url) as response: 32 | return await response.json() 33 | 34 | async def __fetch_url(self, session: aiohttp.ClientSession, url) -> URL: 35 | async with session.get(url) as response: 36 | return response.url 37 | 38 | async def discudemy(self, page) -> list: 39 | du_links = [] 40 | async with self.session(headers=self.head) as ass: 41 | soup = bs( 42 | await self.__fetch_html( 43 | ass, "https://www.discudemy.com/all/" + str(page) 44 | ), 45 | "html5lib", 46 | ) 47 | all = soup.find_all("section", "card") 48 | for index, items in enumerate(all): 49 | try: 50 | title = items.a.text 51 | url = items.a["href"] 52 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 53 | next = soup.find("div", "ui center aligned basic segment") 54 | url = next.a["href"] 55 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 56 | du_links.append( 57 | title + "|:|" + soup.find("div", "ui segment").a["href"] 58 | ) 59 | except AttributeError: 60 | continue 61 | return self._parse(du_links) 62 | 63 | async def udemy_freebies(self, page) -> list: 64 | uf_links = [] 65 | async with self.session(headers=self.head) as ass: 66 | soup = bs( 67 | await self.__fetch_html( 68 | ass, "https://www.udemyfreebies.com/free-udemy-courses/" + str(page) 69 | ), 70 | "html5lib", 71 | ) 72 | all = soup.find_all("div", "coupon-name") 73 | for index, items in enumerate(all): 74 | try: 75 | title = items.a.text 76 | url = items.a["href"] 77 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 78 | next = soup.find("a", class_="button-icon") 79 | url = next["href"] 80 | uf_links.append( 81 | title + "|:|" + str(await self.__fetch_url(ass, url)) 82 | ) 83 | except AttributeError: 84 | continue 85 | return self._parse(uf_links) 86 | 87 | async def tutorialbar(self, page) -> list: 88 | tb_links = [] 89 | async with self.session(headers=self.head) as ass: 90 | soup = bs( 91 | await self.__fetch_html( 92 | ass, "https://www.tutorialbar.com/all-courses/page/" + str(page) 93 | ), 94 | "html5lib", 95 | ) 96 | all = soup.find_all( 97 | "div", class_="content_constructor pb0 pr20 pl20 mobilepadding" 98 | ) 99 | for index, items in enumerate(all): 100 | title = items.a.text 101 | url = items.a["href"] 102 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 103 | link = soup.find("a", class_="btn_offer_block re_track_btn")["href"] 104 | if "www.udemy.com" in link: 105 | tb_links.append(title + "|:|" + link) 106 | return self._parse(tb_links) 107 | 108 | async def real_discount(self, page) -> list: 109 | rd_links = [] 110 | async with self.session(headers=self.head) as ass: 111 | soup = bs( 112 | await self.__fetch_html( 113 | ass, "https://app.real.discount/stores/Udemy?page=" + str(page) 114 | ), 115 | "html5lib", 116 | ) 117 | all = soup.find_all("div", class_="col-xl-4 col-md-6") 118 | for index, items in enumerate(all): 119 | title = items.h3.text 120 | url = "https://app.real.discount" + items.a["href"] 121 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 122 | try: 123 | link = soup.select_one("a[href^='https://www.udemy.com']")["href"] 124 | rd_links.append(title + "|:|" + link) 125 | except: 126 | pass 127 | return self._parse(rd_links) 128 | 129 | async def coursevania(self) -> list: 130 | cv_links = [] 131 | async with self.session(headers=self.head) as ass: 132 | soup = bs( 133 | await self.__fetch_html(ass, "https://coursevania.com/courses/"), 134 | "html5lib", 135 | ) 136 | # nonce = soup.find_all("script") #[23].text # 22 was the working one, but now new method is used 137 | nonce = soup.find(string=re.compile('load_content')) 138 | # for _, a in enumerate(nonce): 139 | # if 'load_content' in a.text: 140 | # nonce = a.text 141 | # break 142 | nonce = json.loads(nonce.strip().strip(";").split('=')[1])["load_content"] 143 | url = ( 144 | "https://coursevania.com/wp-admin/admin-ajax.php?&template=courses/grid&args={%22posts_per_page%22:%2230%22}&action=stm_lms_load_content&nonce=" 145 | + nonce 146 | + "&sort=date_high" 147 | ) 148 | r = await self.__fetch_json(ass, url) 149 | soup = bs(r["content"], "html5lib") 150 | all = soup.find_all( 151 | "div", attrs={"class": "stm_lms_courses__single--title"} 152 | ) 153 | for _, items in enumerate(all): 154 | title = items.h5.text 155 | url = items.a["href"] 156 | soup = bs(await self.__fetch_html(ass, url), "html5lib") 157 | cv_links.append( 158 | title 159 | + "|:|" 160 | + soup.find("div", attrs={"class": "stm-lms-buy-buttons"}).a["href"] 161 | ) 162 | return self._parse(cv_links) 163 | 164 | async def idcoupons(self, page=1) -> list: 165 | idc_links = [] 166 | async with self.session(headers=self.head) as ass: 167 | soup = bs( 168 | await self.__fetch_html( 169 | ass, 170 | "https://idownloadcoupon.com/product-category/udemy-2/page/" 171 | + str(page), 172 | ), 173 | "html5lib", 174 | ) 175 | all = soup.find_all("a", attrs={"class": "button product_type_external"}) 176 | for index, items in enumerate(all): 177 | title = items["aria-label"] 178 | link = unquote(items["href"]).split("ulp=") 179 | try: 180 | link = link[1] 181 | except IndexError: 182 | link = link[0] 183 | if link.startswith("https://www.udemy.com"): 184 | idc_links.append(title + "|:|" + link) 185 | return self._parse(idc_links) 186 | 187 | @staticmethod 188 | def _parse(links) -> list: 189 | if not links: 190 | return links 191 | _links = [] 192 | r_links = [] 193 | f_links = [] 194 | n = 1 195 | for _link in links: 196 | link = _link.split("|:|")[1] 197 | title = _link.split("|:|")[0] 198 | lin = f"{n}) [{title}]({link})" 199 | _links.append(lin) 200 | if len(_links) == 20: 201 | f_links.append(_links) 202 | _links = [] 203 | r_links = [] 204 | else: 205 | r_links.append(lin) 206 | n += 1 207 | if r_links: 208 | f_links.append(r_links) 209 | 210 | return f_links 211 | --------------------------------------------------------------------------------