├── runtime.txt ├── Procfile ├── requirements.txt ├── Dockerfile ├── config.json ├── app.py ├── README.md ├── texts.py ├── main.py ├── ddl.py └── bypasser.py /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.9.14 2 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | worker: python3 main.py 2 | web: python3 app.py -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | cloudscraper 3 | bs4 4 | python-dotenv 5 | pyrogram 6 | tgcrypto 7 | lxml 8 | cfscrape 9 | urllib3==1.26 10 | flask==2.0.1 -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt /app/ 6 | RUN pip3 install -r requirements.txt 7 | COPY . /app 8 | 9 | CMD flask run -h 0.0.0.0 -p 10000 & python3 main.py -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "TOKEN": "", 3 | "ID": "", 4 | "HASH": "", 5 | "Laravel_Session": "", 6 | "XSRF_TOKEN": "", 7 | "GDTot_Crypt": "", 8 | "DCRYPT": "", 9 | "KCRYPT": "", 10 | "HCRYPT": "", 11 | "KATCRYPT": "", 12 | "UPTOBOX_TOKEN":"", 13 | "TERA_COOKIE":"", 14 | "CLOUDFLARE":"" 15 | } 16 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask 3 | 4 | app = Flask(__name__) 5 | 6 | @app.route('/') 7 | def home(): 8 | return """ 9 |
11 | {(title[0]['content']).replace('Download ' , '')}\n{gdlk}\n\n"
484 | asleep(1.5)
485 | return gd_txt
486 |
487 | elif "taemovies" in link:
488 | gd_txt, no = "", 0
489 | r = requests.get(link)
490 | soup = BeautifulSoup (r.text, "html.parser")
491 | links = soup.select('a[href*="shortingly"]')
492 | gd_txt = f"Total Links Found : {len(links)}\n\n"
493 | for a in links:
494 | glink = rocklinks(a["href"])
495 | t = requests.get(glink)
496 | soupt = BeautifulSoup(t.text, "html.parser")
497 | title = soupt.select('meta[property^="og:description"]')
498 | no += 1
499 | gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{glink}\n\n"
500 | return gd_txt
501 |
502 | elif "toonworld4all" in link:
503 | gd_txt, no = "", 0
504 | r = requests.get(link)
505 | soup = BeautifulSoup(r.text, "html.parser")
506 | links = soup.select('a[href*="redirect/main.php?"]')
507 | for a in links:
508 | down = requests.get(a['href'], stream=True, allow_redirects=False)
509 | link = down.headers["location"]
510 | glink = rocklinks(link)
511 | if glink and "gdtot" in glink:
512 | t = requests.get(glink)
513 | soupt = BeautifulSoup(t.text, "html.parser")
514 | title = soupt.select('meta[property^="og:description"]')
515 | no += 1
516 | gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{glink}\n\n"
517 | return gd_txt
518 |
519 | elif "animeremux" in link:
520 | gd_txt, no = "", 0
521 | r = requests.get(link)
522 | soup = BeautifulSoup (r.text, "html.parser")
523 | links = soup.select('a[href*="urlshortx.com"]')
524 | gd_txt = f"Total Links Found : {len(links)}\n\n"
525 | for a in links:
526 | link = a["href"]
527 | x = link.split("url=")[-1]
528 | t = requests.get(x)
529 | soupt = BeautifulSoup(t.text, "html.parser")
530 | title = soupt.title
531 | no += 1
532 | gd_txt += f"{no}. {title.text}\n{x}\n\n"
533 | asleep(1.5)
534 | return gd_txt
535 |
536 | else:
537 | res = requests.get(link)
538 | soup = BeautifulSoup(res.text, 'html.parser')
539 | mystx = soup.select(r'a[href^="magnet:?xt=urn:btih:"]')
540 | for hy in mystx:
541 | links.append(hy['href'])
542 | return links
543 |
544 |
545 | ###################################################
546 | # script links
547 |
548 | def getfinal(domain, url, sess):
549 |
550 | #sess = requests.session()
551 | res = sess.get(url)
552 | soup = BeautifulSoup(res.text,"html.parser")
553 | soup = soup.find("form").findAll("input")
554 | datalist = []
555 | for ele in soup:
556 | datalist.append(ele.get("value"))
557 |
558 | data = {
559 | '_method': datalist[0],
560 | '_csrfToken': datalist[1],
561 | 'ad_form_data': datalist[2],
562 | '_Token[fields]': datalist[3],
563 | '_Token[unlocked]': datalist[4],
564 | }
565 |
566 | sess.headers = {
567 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
568 | 'Accept': 'application/json, text/javascript, */*; q=0.01',
569 | 'Accept-Language': 'en-US,en;q=0.5',
570 | 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
571 | 'X-Requested-With': 'XMLHttpRequest',
572 | 'Origin': domain,
573 | 'Connection': 'keep-alive',
574 | 'Referer': url,
575 | 'Sec-Fetch-Dest': 'empty',
576 | 'Sec-Fetch-Mode': 'cors',
577 | 'Sec-Fetch-Site': 'same-origin',
578 | }
579 |
580 | # print("waiting 10 secs")
581 | time.sleep(10) # important
582 | response = sess.post(domain+'/links/go', data=data).json()
583 | furl = response["url"]
584 | return furl
585 |
586 |
587 | def getfirst(url):
588 |
589 | sess = requests.session()
590 | res = sess.get(url)
591 |
592 | soup = BeautifulSoup(res.text,"html.parser")
593 | soup = soup.find("form")
594 | action = soup.get("action")
595 | soup = soup.findAll("input")
596 | datalist = []
597 | for ele in soup:
598 | datalist.append(ele.get("value"))
599 | sess.headers = {
600 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
601 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
602 | 'Accept-Language': 'en-US,en;q=0.5',
603 | 'Origin': action,
604 | 'Connection': 'keep-alive',
605 | 'Referer': action,
606 | 'Upgrade-Insecure-Requests': '1',
607 | 'Sec-Fetch-Dest': 'document',
608 | 'Sec-Fetch-Mode': 'navigate',
609 | 'Sec-Fetch-Site': 'same-origin',
610 | 'Sec-Fetch-User': '?1',
611 | }
612 |
613 | data = {'newwpsafelink': datalist[1], "g-recaptcha-response": RecaptchaV3()}
614 | response = sess.post(action, data=data)
615 | soup = BeautifulSoup(response.text, "html.parser")
616 | soup = soup.findAll("div", class_="wpsafe-bottom text-center")
617 | for ele in soup:
618 | rurl = ele.find("a").get("onclick")[13:-12]
619 |
620 | res = sess.get(rurl)
621 | furl = res.url
622 | # print(furl)
623 | return getfinal(f'https://{furl.split("/")[-2]}/',furl,sess)
624 |
625 |
626 | ####################################################################################################
627 | # ez4short
628 |
629 | def ez4(url):
630 | client = cloudscraper.create_scraper(allow_brotli=False)
631 | DOMAIN = "https://ez4short.com"
632 | ref = "https://techmody.io/"
633 | h = {"referer": ref}
634 | resp = client.get(url,headers=h)
635 | soup = BeautifulSoup(resp.content, "html.parser")
636 | inputs = soup.find_all("input")
637 | data = { input.get('name'): input.get('value') for input in inputs }
638 | h = { "x-requested-with": "XMLHttpRequest" }
639 | time.sleep(8)
640 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
641 | try: return r.json()['url']
642 | except: return "Something went wrong :("
643 |
644 |
645 | ################################################
646 | # ola movies
647 |
648 | def olamovies(url):
649 |
650 | print("this takes time, you might want to take a break.")
651 | headers = {
652 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
653 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',
654 | 'Accept-Language': 'en-US,en;q=0.5',
655 | 'Referer': url,
656 | 'Alt-Used': 'olamovies.ink',
657 | 'Connection': 'keep-alive',
658 | 'Upgrade-Insecure-Requests': '1',
659 | 'Sec-Fetch-Dest': 'document',
660 | 'Sec-Fetch-Mode': 'navigate',
661 | 'Sec-Fetch-Site': 'same-origin',
662 | 'Sec-Fetch-User': '?1',
663 | }
664 |
665 | client = cloudscraper.create_scraper(allow_brotli=False)
666 | res = client.get(url)
667 | soup = BeautifulSoup(res.text,"html.parser")
668 | soup = soup.findAll("div", class_="wp-block-button")
669 |
670 | outlist = []
671 | for ele in soup:
672 | outlist.append(ele.find("a").get("href"))
673 |
674 | slist = []
675 | for ele in outlist:
676 | try:
677 | key = ele.split("?key=")[1].split("&id=")[0].replace("%2B","+").replace("%3D","=").replace("%2F","/")
678 | id = ele.split("&id=")[1]
679 | except:
680 | continue
681 |
682 | count = 3
683 | params = { 'key': key, 'id': id}
684 | soup = "None"
685 |
686 | while 'rocklinks.net' not in soup and "try2link.com" not in soup and "ez4short.com" not in soup:
687 | res = client.get("https://olamovies.ink/download/", params=params, headers=headers)
688 | soup = BeautifulSoup(res.text,"html.parser")
689 | soup = soup.findAll("a")[0].get("href")
690 | if soup != "":
691 | if "try2link.com" in soup or 'rocklinks.net' in soup or "ez4short.com" in soup: slist.append(soup)
692 | else: pass
693 | else:
694 | if count == 0: break
695 | else: count -= 1
696 |
697 | time.sleep(10)
698 |
699 | final = []
700 | for ele in slist:
701 | if "rocklinks.net" in ele:
702 | final.append(rocklinks(ele))
703 | elif "try2link.com" in ele:
704 | final.append(try2link_bypass(ele))
705 | elif "ez4short.com" in ele:
706 | final.append(ez4(ele))
707 | else:
708 | pass
709 |
710 | links = ""
711 | for ele in final:
712 | links = links + ele + "\n"
713 | return links[:-1]
714 |
715 |
716 | ###############################################
717 | # katdrive
718 |
719 | def parse_info_katdrive(res):
720 | info_parsed = {}
721 | title = re.findall('>(.*?)<\/h4>', res.text)[0]
722 | info_chunks = re.findall('>(.*?)<\/td>', res.text)
723 | info_parsed['title'] = title
724 | for i in range(0, len(info_chunks), 2):
725 | info_parsed[info_chunks[i]] = info_chunks[i+1]
726 | return info_parsed
727 |
728 | def katdrive_dl(url,katcrypt):
729 | client = requests.Session()
730 | client.cookies.update({'crypt': katcrypt})
731 |
732 | res = client.get(url)
733 | info_parsed = parse_info_katdrive(res)
734 | info_parsed['error'] = False
735 |
736 | up = urlparse(url)
737 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
738 |
739 | file_id = url.split('/')[-1]
740 | data = { 'id': file_id }
741 | headers = {'x-requested-with': 'XMLHttpRequest'}
742 |
743 | try:
744 | res = client.post(req_url, headers=headers, data=data).json()['file']
745 | except:
746 | return "Error"#{'error': True, 'src_url': url}
747 |
748 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0]
749 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}"
750 | info_parsed['src_url'] = url
751 | return info_parsed['gdrive_url']
752 |
753 |
754 | ###############################################
755 | # hubdrive
756 |
757 | def parse_info_hubdrive(res):
758 | info_parsed = {}
759 | title = re.findall('>(.*?)<\/h4>', res.text)[0]
760 | info_chunks = re.findall('>(.*?)<\/td>', res.text)
761 | info_parsed['title'] = title
762 | for i in range(0, len(info_chunks), 2):
763 | info_parsed[info_chunks[i]] = info_chunks[i+1]
764 | return info_parsed
765 |
766 | def hubdrive_dl(url,hcrypt):
767 | client = requests.Session()
768 | client.cookies.update({'crypt': hcrypt})
769 |
770 | res = client.get(url)
771 | info_parsed = parse_info_hubdrive(res)
772 | info_parsed['error'] = False
773 |
774 | up = urlparse(url)
775 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
776 |
777 | file_id = url.split('/')[-1]
778 | data = { 'id': file_id }
779 | headers = {'x-requested-with': 'XMLHttpRequest'}
780 |
781 | try:
782 | res = client.post(req_url, headers=headers, data=data).json()['file']
783 | except:
784 | return "Error"#{'error': True, 'src_url': url}
785 |
786 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0]
787 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}"
788 | info_parsed['src_url'] = url
789 | return info_parsed['gdrive_url']
790 |
791 |
792 | #################################################
793 | # drivefire
794 |
795 | def parse_info_drivefire(res):
796 | info_parsed = {}
797 | title = re.findall('>(.*?)<\/h4>', res.text)[0]
798 | info_chunks = re.findall('>(.*?)<\/td>', res.text)
799 | info_parsed['title'] = title
800 | for i in range(0, len(info_chunks), 2):
801 | info_parsed[info_chunks[i]] = info_chunks[i+1]
802 | return info_parsed
803 |
804 | def drivefire_dl(url,dcrypt):
805 | client = requests.Session()
806 | client.cookies.update({'crypt': dcrypt})
807 |
808 | res = client.get(url)
809 | info_parsed = parse_info_drivefire(res)
810 | info_parsed['error'] = False
811 |
812 | up = urlparse(url)
813 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
814 |
815 | file_id = url.split('/')[-1]
816 | data = { 'id': file_id }
817 | headers = {'x-requested-with': 'XMLHttpRequest'}
818 |
819 | try:
820 | res = client.post(req_url, headers=headers, data=data).json()['file']
821 | except:
822 | return "Error"#{'error': True, 'src_url': url}
823 |
824 | decoded_id = res.rsplit('/', 1)[-1]
825 | info_parsed = f"https://drive.google.com/file/d/{decoded_id}"
826 | return info_parsed
827 |
828 |
829 | ##################################################
830 | # kolop
831 |
832 | def parse_info_kolop(res):
833 | info_parsed = {}
834 | title = re.findall('>(.*?)<\/h4>', res.text)[0]
835 | info_chunks = re.findall('>(.*?)<\/td>', res.text)
836 | info_parsed['title'] = title
837 | for i in range(0, len(info_chunks), 2):
838 | info_parsed[info_chunks[i]] = info_chunks[i+1]
839 | return info_parsed
840 |
841 | def kolop_dl(url,kcrypt):
842 | client = requests.Session()
843 | client.cookies.update({'crypt': kcrypt})
844 |
845 | res = client.get(url)
846 | info_parsed = parse_info_kolop(res)
847 | info_parsed['error'] = False
848 |
849 | up = urlparse(url)
850 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download"
851 |
852 | file_id = url.split('/')[-1]
853 | data = { 'id': file_id }
854 | headers = { 'x-requested-with': 'XMLHttpRequest'}
855 |
856 | try:
857 | res = client.post(req_url, headers=headers, data=data).json()['file']
858 | except:
859 | return "Error"#{'error': True, 'src_url': url}
860 |
861 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0]
862 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}"
863 | info_parsed['src_url'] = url
864 |
865 | return info_parsed['gdrive_url']
866 |
867 |
868 | ##################################################
869 | # mediafire
870 |
871 | def mediafire(url):
872 |
873 | res = requests.get(url, stream=True)
874 | contents = res.text
875 |
876 | for line in contents.splitlines():
877 | m = re.search(r'href="((http|https)://download[^"]+)', line)
878 | if m:
879 | return m.groups()[0]
880 |
881 |
882 | ####################################################
883 | # zippyshare
884 |
885 | def zippyshare(url):
886 | resp = requests.get(url).text
887 | surl = resp.split("document.getElementById('dlbutton').href = ")[1].split(";")[0]
888 | parts = surl.split("(")[1].split(")")[0].split(" ")
889 | val = str(int(parts[0]) % int(parts[2]) + int(parts[4]) % int(parts[6]))
890 | surl = surl.split('"')
891 | burl = url.split("zippyshare.com")[0]
892 | furl = burl + "zippyshare.com" + surl[1] + val + surl[-2]
893 | return furl
894 |
895 |
896 | ####################################################
897 | # filercrypt
898 |
899 | def getlinks(dlc):
900 | headers = {
901 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0',
902 | 'Accept': 'application/json, text/javascript, */*',
903 | 'Accept-Language': 'en-US,en;q=0.5',
904 | 'X-Requested-With': 'XMLHttpRequest',
905 | 'Origin': 'http://dcrypt.it',
906 | 'Connection': 'keep-alive',
907 | 'Referer': 'http://dcrypt.it/',
908 | }
909 |
910 | data = {
911 | 'content': dlc,
912 | }
913 |
914 | response = requests.post('http://dcrypt.it/decrypt/paste', headers=headers, data=data).json()["success"]["links"]
915 | links = ""
916 | for link in response:
917 | links = links + link + "\n\n"
918 | return links[:-1]
919 |
920 |
921 | def filecrypt(url):
922 |
923 | client = cloudscraper.create_scraper(allow_brotli=False)
924 | headers = {
925 | "authority": "filecrypt.co",
926 | "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
927 | "accept-language": "en-US,en;q=0.9",
928 | "cache-control": "max-age=0",
929 | "content-type": "application/x-www-form-urlencoded",
930 | "dnt": "1",
931 | "origin": "https://filecrypt.co",
932 | "referer": url,
933 | "sec-ch-ua": '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"',
934 | "sec-ch-ua-mobile": "?0",
935 | "sec-ch-ua-platform": "Windows",
936 | "sec-fetch-dest": "document",
937 | "sec-fetch-mode": "navigate",
938 | "sec-fetch-site": "same-origin",
939 | "sec-fetch-user": "?1",
940 | "upgrade-insecure-requests": "1",
941 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36"
942 | }
943 |
944 |
945 | resp = client.get(url, headers=headers)
946 | soup = BeautifulSoup(resp.content, "html.parser")
947 |
948 | buttons = soup.find_all("button")
949 | for ele in buttons:
950 | line = ele.get("onclick")
951 | if line !=None and "DownloadDLC" in line:
952 | dlclink = "https://filecrypt.co/DLC/" + line.split("DownloadDLC('")[1].split("'")[0] + ".html"
953 | break
954 |
955 | resp = client.get(dlclink,headers=headers)
956 | return getlinks(resp.text,client)
957 |
958 |
959 | #####################################################
960 | # dropbox
961 |
962 | def dropbox(url):
963 | return url.replace("www.","").replace("dropbox.com","dl.dropboxusercontent.com").replace("?dl=0","")
964 |
965 |
966 | ######################################################
967 | # shareus
968 |
969 | def shareus(url):
970 | token = url.split("=")[-1]
971 | bypassed_url = "https://us-central1-my-apps-server.cloudfunctions.net/r?shortid="+ token
972 | response = requests.get(bypassed_url).text
973 | return response
974 |
975 |
976 | #######################################################
977 | # shortingly
978 |
979 | def shortingly(url):
980 | client = cloudscraper.create_scraper(allow_brotli=False)
981 | DOMAIN = "https://shortingly.in"
982 | url = url[:-1] if url[-1] == '/' else url
983 | code = url.split("/")[-1]
984 | final_url = f"{DOMAIN}/{code}"
985 | ref = "https://tech.gyanitheme.com/"
986 | h = {"referer": ref}
987 | resp = client.get(final_url,headers=h)
988 | soup = BeautifulSoup(resp.content, "html.parser")
989 | inputs = soup.find_all("input")
990 | data = { input.get('name'): input.get('value') for input in inputs }
991 | h = { "x-requested-with": "XMLHttpRequest" }
992 | time.sleep(5)
993 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
994 | try: return r.json()['url']
995 | except: return "Something went wrong :("
996 |
997 | #######################################################
998 | # Gyanilinks - gtlinks.me
999 |
1000 | def gyanilinks(url):
1001 | DOMAIN = "https://go.theforyou.in/"
1002 | client = cloudscraper.create_scraper(allow_brotli=False)
1003 | url = url[:-1] if url[-1] == '/' else url
1004 | code = url.split("/")[-1]
1005 | final_url = f"{DOMAIN}/{code}"
1006 | resp = client.get(final_url)
1007 | soup = BeautifulSoup(resp.content, "html.parser")
1008 | try: inputs = soup.find(id="go-link").find_all(name="input")
1009 | except: return "Incorrect Link"
1010 | data = { input.get('name'): input.get('value') for input in inputs }
1011 | h = { "x-requested-with": "XMLHttpRequest" }
1012 | time.sleep(5)
1013 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1014 | try: return r.json()['url']
1015 | except: return "Something went wrong :("
1016 |
1017 |
1018 | #######################################################
1019 | # Flashlink
1020 |
1021 | def flashl(url):
1022 | client = cloudscraper.create_scraper(allow_brotli=False)
1023 | DOMAIN = "https://files.earnash.com/"
1024 | url = url[:-1] if url[-1] == '/' else url
1025 | code = url.split("/")[-1]
1026 | final_url = f"{DOMAIN}/{code}"
1027 | ref = "https://flash1.cordtpoint.co.in"
1028 | h = {"referer": ref}
1029 | resp = client.get(final_url,headers=h)
1030 | soup = BeautifulSoup(resp.content, "html.parser")
1031 | inputs = soup.find_all("input")
1032 | data = { input.get('name'): input.get('value') for input in inputs }
1033 | h = { "x-requested-with": "XMLHttpRequest" }
1034 | time.sleep(15)
1035 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1036 | try: return r.json()['url']
1037 | except: return "Something went wrong :("
1038 |
1039 |
1040 | #######################################################
1041 | # short2url
1042 |
1043 | def short2url(url):
1044 | client = cloudscraper.create_scraper(allow_brotli=False)
1045 | DOMAIN = "https://techyuth.xyz/blog"
1046 | url = url[:-1] if url[-1] == '/' else url
1047 | code = url.split("/")[-1]
1048 | final_url = f"{DOMAIN}/{code}"
1049 | ref = "https://blog.coin2pay.xyz/"
1050 | h = {"referer": ref}
1051 | resp = client.get(final_url, headers=h)
1052 | soup = BeautifulSoup(resp.content, "html.parser")
1053 | inputs = soup.find_all("input")
1054 | data = { input.get('name'): input.get('value') for input in inputs }
1055 | h = { "x-requested-with": "XMLHttpRequest" }
1056 | time.sleep(10)
1057 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1058 | try:
1059 | return r.json()['url']
1060 | except: return "Something went wrong :("
1061 |
1062 |
1063 | #######################################################
1064 | # anonfiles
1065 |
1066 | def anonfile(url):
1067 |
1068 | headersList = { "Accept": "*/*"}
1069 | payload = ""
1070 |
1071 | response = requests.request("GET", url, data=payload, headers=headersList).text.split("\n")
1072 | for ele in response:
1073 | if "https://cdn" in ele and "anonfiles.com" in ele and url.split("/")[-2] in ele:
1074 | break
1075 |
1076 | return ele.split('href="')[1].split('"')[0]
1077 |
1078 |
1079 | ##########################################################
1080 | # pixl
1081 |
1082 | def pixl(url):
1083 | count = 1
1084 | dl_msg = ""
1085 | currentpage = 1
1086 | settotalimgs = True
1087 | totalimages = ""
1088 | client = cloudscraper.create_scraper(allow_brotli=False)
1089 | resp = client.get(url)
1090 | if resp.status_code == 404:
1091 | return "File not found/The link you entered is wrong!"
1092 | soup = BeautifulSoup(resp.content, "html.parser")
1093 | if "album" in url and settotalimgs:
1094 | totalimages = soup.find("span", {"data-text": "image-count"}).text
1095 | settotalimgs = False
1096 | thmbnailanch = soup.findAll(attrs={"class": "--media"})
1097 | links = soup.findAll(attrs={"data-pagination": "next"})
1098 | try:
1099 | url = links[0].attrs["href"]
1100 | except BaseException:
1101 | url = None
1102 | for ref in thmbnailanch:
1103 | imgdata = client.get(ref.attrs["href"])
1104 | if not imgdata.status_code == 200:
1105 | time.sleep(5)
1106 | continue
1107 | imghtml = BeautifulSoup(imgdata.text, "html.parser")
1108 | downloadanch = imghtml.find(attrs={"class": "btn-download"})
1109 | currentimg = downloadanch.attrs["href"]
1110 | currentimg = currentimg.replace(" ", "%20")
1111 | dl_msg += f"{count}. {currentimg}\n"
1112 | count += 1
1113 | currentpage += 1
1114 | fld_msg = f"Your provided Pixl.is link is of Folder and I've Found {count - 1} files in the folder.\n"
1115 | fld_link = f"\nFolder Link: {url}\n"
1116 | final_msg = fld_link + "\n" + fld_msg + "\n" + dl_msg
1117 | return final_msg
1118 |
1119 |
1120 | ############################################################
1121 | # sirigan ( unused )
1122 |
1123 | def siriganbypass(url):
1124 | client = requests.Session()
1125 | res = client.get(url)
1126 | url = res.url.split('=', maxsplit=1)[-1]
1127 |
1128 | while True:
1129 | try: url = base64.b64decode(url).decode('utf-8')
1130 | except: break
1131 |
1132 | return url.split('url=')[-1]
1133 |
1134 |
1135 | ############################################################
1136 | # shorte
1137 |
1138 | def sh_st_bypass(url):
1139 | client = requests.Session()
1140 | client.headers.update({'referer': url})
1141 | p = urlparse(url)
1142 |
1143 | res = client.get(url)
1144 |
1145 | sess_id = re.findall('''sessionId(?:\s+)?:(?:\s+)?['|"](.*?)['|"]''', res.text)[0]
1146 |
1147 | final_url = f"{p.scheme}://{p.netloc}/shortest-url/end-adsession"
1148 | params = {
1149 | 'adSessionId': sess_id,
1150 | 'callback': '_'
1151 | }
1152 | time.sleep(5) # !important
1153 |
1154 | res = client.get(final_url, params=params)
1155 | dest_url = re.findall('"(.*?)"', res.text)[1].replace('\/','/')
1156 |
1157 | return {
1158 | 'src': url,
1159 | 'dst': dest_url
1160 | }['dst']
1161 |
1162 |
1163 | #############################################################
1164 | # gofile
1165 |
1166 | def gofile_dl(url,password=""):
1167 | api_uri = 'https://api.gofile.io'
1168 | client = requests.Session()
1169 | res = client.get(api_uri+'/createAccount').json()
1170 |
1171 | data = {
1172 | 'contentId': url.split('/')[-1],
1173 | 'token': res['data']['token'],
1174 | 'websiteToken': '12345',
1175 | 'cache': 'true',
1176 | 'password': hashlib.sha256(password.encode('utf-8')).hexdigest()
1177 | }
1178 | res = client.get(api_uri+'/getContent', params=data).json()
1179 |
1180 | content = []
1181 | for item in res['data']['contents'].values():
1182 | content.append(item)
1183 |
1184 | return {
1185 | 'accountToken': data['token'],
1186 | 'files': content
1187 | }["files"][0]["link"]
1188 |
1189 |
1190 | ################################################################
1191 | # sharer pw
1192 |
1193 | def parse_info_sharer(res):
1194 | f = re.findall(">(.*?)<\/td>", res.text)
1195 | info_parsed = {}
1196 | for i in range(0, len(f), 3):
1197 | info_parsed[f[i].lower().replace(' ', '_')] = f[i+2]
1198 | return info_parsed
1199 |
1200 | def sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=False):
1201 | client = cloudscraper.create_scraper(allow_brotli=False)
1202 | client.cookies.update({
1203 | "XSRF-TOKEN": XSRF_TOKEN,
1204 | "laravel_session": Laravel_Session
1205 | })
1206 | res = client.get(url)
1207 | token = re.findall("_token\s=\s'(.*?)'", res.text, re.DOTALL)[0]
1208 | ddl_btn = etree.HTML(res.content).xpath("//button[@id='btndirect']")
1209 | info_parsed = parse_info_sharer(res)
1210 | info_parsed['error'] = True
1211 | info_parsed['src_url'] = url
1212 | info_parsed['link_type'] = 'login'
1213 | info_parsed['forced_login'] = forced_login
1214 | headers = {
1215 | 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
1216 | 'x-requested-with': 'XMLHttpRequest'
1217 | }
1218 | data = {
1219 | '_token': token
1220 | }
1221 | if len(ddl_btn):
1222 | info_parsed['link_type'] = 'direct'
1223 | if not forced_login:
1224 | data['nl'] = 1
1225 | try:
1226 | res = client.post(url+'/dl', headers=headers, data=data).json()
1227 | except:
1228 | return info_parsed
1229 | if 'url' in res and res['url']:
1230 | info_parsed['error'] = False
1231 | info_parsed['gdrive_link'] = res['url']
1232 | if len(ddl_btn) and not forced_login and not 'url' in info_parsed:
1233 | # retry download via login
1234 | return sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=True)
1235 | return info_parsed["gdrive_link"]
1236 |
1237 |
1238 | #################################################################
1239 | # gdtot
1240 |
1241 | def gdtot(url):
1242 | cget = create_scraper().request
1243 | try:
1244 | res = cget('GET', f'https://gdbot.xyz/file/{url.split("/")[-1]}')
1245 | except Exception as e:
1246 | return (f'ERROR: {e.__class__.__name__}')
1247 | token_url = etree.HTML(res.content).xpath(
1248 | "//a[contains(@class,'inline-flex items-center justify-center')]/@href")
1249 | if not token_url:
1250 | try:
1251 | url = cget('GET', url).url
1252 | p_url = urlparse(url)
1253 | res = cget(
1254 | "GET", f"{p_url.scheme}://{p_url.hostname}/ddl/{url.split('/')[-1]}")
1255 | except Exception as e:
1256 | return (f'ERROR: {e.__class__.__name__}')
1257 | if (drive_link := re.findall(r"myDl\('(.*?)'\)", res.text)) and "drive.google.com" in drive_link[0]:
1258 | return drive_link[0]
1259 | else:
1260 | return (
1261 | 'ERROR: Drive Link not found, Try in your broswer')
1262 | token_url = token_url[0]
1263 | try:
1264 | token_page = cget('GET', token_url)
1265 | except Exception as e:
1266 | return (
1267 | f'ERROR: {e.__class__.__name__} with {token_url}')
1268 | path = re.findall('\("(.*?)"\)', token_page.text)
1269 | if not path:
1270 | return ('ERROR: Cannot bypass this')
1271 | path = path[0]
1272 | raw = urlparse(token_url)
1273 | final_url = f'{raw.scheme}://{raw.hostname}{path}'
1274 | return ddl.sharer_scraper(final_url)
1275 |
1276 |
1277 | ##################################################################
1278 | # adfly
1279 |
1280 | def decrypt_url(code):
1281 | a, b = '', ''
1282 | for i in range(0, len(code)):
1283 | if i % 2 == 0: a += code[i]
1284 | else: b = code[i] + b
1285 | key = list(a + b)
1286 | i = 0
1287 | while i < len(key):
1288 | if key[i].isdigit():
1289 | for j in range(i+1,len(key)):
1290 | if key[j].isdigit():
1291 | u = int(key[i]) ^ int(key[j])
1292 | if u < 10: key[i] = str(u)
1293 | i = j
1294 | break
1295 | i+=1
1296 | key = ''.join(key)
1297 | decrypted = base64.b64decode(key)[16:-16]
1298 | return decrypted.decode('utf-8')
1299 |
1300 |
1301 | def adfly(url):
1302 | client = cloudscraper.create_scraper(allow_brotli=False)
1303 | res = client.get(url).text
1304 | out = {'error': False, 'src_url': url}
1305 | try:
1306 | ysmm = re.findall("ysmm\s+=\s+['|\"](.*?)['|\"]", res)[0]
1307 | except:
1308 | out['error'] = True
1309 | return out
1310 | url = decrypt_url(ysmm)
1311 | if re.search(r'go\.php\?u\=', url):
1312 | url = base64.b64decode(re.sub(r'(.*?)u=', '', url)).decode()
1313 | elif '&dest=' in url:
1314 | url = unquote(re.sub(r'(.*?)dest=', '', url))
1315 | out['bypassed_url'] = url
1316 | return out
1317 |
1318 |
1319 | ##############################################################################################
1320 | # gplinks
1321 |
1322 | def gplinks(url: str):
1323 | client = cloudscraper.create_scraper(allow_brotli=False)
1324 | token = url.split("/")[-1]
1325 | domain ="https://gplinks.co/"
1326 | referer = "https://mynewsmedia.co/"
1327 | vid = client.get(url, allow_redirects= False).headers["Location"].split("=")[-1]
1328 | url = f"{url}/?{vid}"
1329 | response = client.get(url, allow_redirects=False)
1330 | soup = BeautifulSoup(response.content, "html.parser")
1331 | inputs = soup.find(id="go-link").find_all(name="input")
1332 | data = { input.get('name'): input.get('value') for input in inputs }
1333 | time.sleep(10)
1334 | headers={"x-requested-with": "XMLHttpRequest"}
1335 | bypassed_url = client.post(domain+"links/go", data=data, headers=headers).json()["url"]
1336 | try: return bypassed_url
1337 | except: return 'Something went wrong :('
1338 |
1339 |
1340 | ######################################################################################################
1341 | # droplink
1342 |
1343 | def droplink(url):
1344 | client = cloudscraper.create_scraper(allow_brotli=False)
1345 | res = client.get(url, timeout=5)
1346 |
1347 | ref = re.findall("action[ ]{0,}=[ ]{0,}['|\"](.*?)['|\"]", res.text)[0]
1348 | h = {"referer": ref}
1349 | res = client.get(url, headers=h)
1350 |
1351 | bs4 = BeautifulSoup(res.content, "html.parser")
1352 | inputs = bs4.find_all("input")
1353 | data = {input.get("name"): input.get("value") for input in inputs}
1354 | h = {
1355 | "content-type": "application/x-www-form-urlencoded",
1356 | "x-requested-with": "XMLHttpRequest",
1357 | }
1358 |
1359 | p = urlparse(url)
1360 | final_url = f"{p.scheme}://{p.netloc}/links/go"
1361 | time.sleep(3.1)
1362 | res = client.post(final_url, data=data, headers=h).json()
1363 |
1364 | if res["status"] == "success": return res["url"]
1365 | return 'Something went wrong :('
1366 |
1367 |
1368 | #####################################################################################################################
1369 | # link vertise
1370 |
1371 | def linkvertise(url):
1372 | params = {'url': url,}
1373 | response = requests.get('https://bypass.pm/bypass2', params=params).json()
1374 | if response["success"]: return response["destination"]
1375 | else: return response["msg"]
1376 |
1377 |
1378 | ###################################################################################################################
1379 | # others
1380 |
1381 | def others(url):
1382 | return "API Currently not Available"
1383 |
1384 |
1385 | #################################################################################################################
1386 | # ouo
1387 |
1388 | # RECAPTCHA v3 BYPASS
1389 | # code from https://github.com/xcscxr/Recaptcha-v3-bypass
1390 | def RecaptchaV3(ANCHOR_URL="https://www.google.com/recaptcha/api2/anchor?ar=1&k=6Lcr1ncUAAAAAH3cghg6cOTPGARa8adOf-y9zv2x&co=aHR0cHM6Ly9vdW8uaW86NDQz&hl=en&v=1B_yv3CBEV10KtI2HJ6eEXhJ&size=invisible&cb=4xnsug1vufyr"):
1391 | url_base = 'https://www.google.com/recaptcha/'
1392 | post_data = "v={}&reason=q&c={}&k={}&co={}"
1393 | client = requests.Session()
1394 | client.headers.update({
1395 | 'content-type': 'application/x-www-form-urlencoded'
1396 | })
1397 | matches = re.findall('([api2|enterprise]+)\/anchor\?(.*)', ANCHOR_URL)[0]
1398 | url_base += matches[0]+'/'
1399 | params = matches[1]
1400 | res = client.get(url_base+'anchor', params=params)
1401 | token = re.findall(r'"recaptcha-token" value="(.*?)"', res.text)[0]
1402 | params = dict(pair.split('=') for pair in params.split('&'))
1403 | post_data = post_data.format(params["v"], token, params["k"], params["co"])
1404 | res = client.post(url_base+'reload', params=f'k={params["k"]}', data=post_data)
1405 | answer = re.findall(r'"rresp","(.*?)"', res.text)[0]
1406 | return answer
1407 |
1408 |
1409 | # code from https://github.com/xcscxr/ouo-bypass/
1410 | def ouo(url):
1411 | client = requests.Session()
1412 | tempurl = url.replace("ouo.press", "ouo.io")
1413 | p = urlparse(tempurl)
1414 | id = tempurl.split('/')[-1]
1415 |
1416 | res = client.get(tempurl)
1417 | next_url = f"{p.scheme}://{p.hostname}/go/{id}"
1418 |
1419 | for _ in range(2):
1420 | if res.headers.get('Location'):
1421 | break
1422 | bs4 = BeautifulSoup(res.content, 'lxml')
1423 | inputs = bs4.form.findAll("input", {"name": re.compile(r"token$")})
1424 | data = { input.get('name'): input.get('value') for input in inputs }
1425 |
1426 | ans = RecaptchaV3()
1427 | data['x-token'] = ans
1428 | h = {
1429 | 'content-type': 'application/x-www-form-urlencoded'
1430 | }
1431 | res = client.post(next_url, data=data, headers=h, allow_redirects=False)
1432 | next_url = f"{p.scheme}://{p.hostname}/xreallcygo/{id}"
1433 |
1434 | return res.headers.get('Location')
1435 |
1436 |
1437 | ####################################################################################################################
1438 | # mdisk
1439 |
1440 | def mdisk(url):
1441 | header = {
1442 | 'Accept': '*/*',
1443 | 'Accept-Language': 'en-US,en;q=0.5',
1444 | 'Accept-Encoding': 'gzip, deflate, br',
1445 | 'Referer': 'https://mdisk.me/',
1446 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
1447 | }
1448 |
1449 | inp = url
1450 | fxl = inp.split("/")
1451 | cid = fxl[-1]
1452 |
1453 | URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={cid}'
1454 | res = requests.get(url=URL, headers=header).json()
1455 | return res['download'] + '\n\n' + res['source']
1456 |
1457 |
1458 | ##################################################################################################################
1459 | # AppDrive or DriveApp etc. Look-Alike Link and as well as the Account Details (Required for Login Required Links only)
1460 |
1461 | def unified(url):
1462 |
1463 | if ddl.is_share_link(url):
1464 | if 'https://gdtot' in url: return ddl.gdtot(url)
1465 | else: return ddl.sharer_scraper(url)
1466 |
1467 | try:
1468 | Email = "chzeesha4@gmail.com"
1469 | Password = "zeeshi#789"
1470 |
1471 | account = {"email": Email, "passwd": Password}
1472 | client = cloudscraper.create_scraper(allow_brotli=False)
1473 | client.headers.update(
1474 | {
1475 | "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36"
1476 | }
1477 | )
1478 | data = {"email": account["email"], "password": account["passwd"]}
1479 | client.post(f"https://{urlparse(url).netloc}/login", data=data)
1480 | res = client.get(url)
1481 | key = re.findall('"key",\s+"(.*?)"', res.text)[0]
1482 | ddl_btn = etree.HTML(res.content).xpath("//button[@id='drc']")
1483 | info = re.findall(">(.*?)<\/li>", res.text)
1484 | info_parsed = {}
1485 | for item in info:
1486 | kv = [s.strip() for s in item.split(": ", maxsplit=1)]
1487 | info_parsed[kv[0].lower()] = kv[1]
1488 | info_parsed = info_parsed
1489 | info_parsed["error"] = False
1490 | info_parsed["link_type"] = "login"
1491 | headers = {
1492 | "Content-Type": f"multipart/form-data; boundary={'-'*4}_",
1493 | }
1494 | data = {"type": 1, "key": key, "action": "original"}
1495 | if len(ddl_btn):
1496 | info_parsed["link_type"] = "direct"
1497 | data["action"] = "direct"
1498 | while data["type"] <= 3:
1499 | boundary = f'{"-"*6}_'
1500 | data_string = ""
1501 | for item in data:
1502 | data_string += f"{boundary}\r\n"
1503 | data_string += f'Content-Disposition: form-data; name="{item}"\r\n\r\n{data[item]}\r\n'
1504 | data_string += f"{boundary}--\r\n"
1505 | gen_payload = data_string
1506 | try:
1507 | response = client.post(url, data=gen_payload, headers=headers).json()
1508 | break
1509 | except BaseException:
1510 | data["type"] += 1
1511 | if "url" in response:
1512 | info_parsed["gdrive_link"] = response["url"]
1513 | elif "error" in response and response["error"]:
1514 | info_parsed["error"] = True
1515 | info_parsed["error_message"] = response["message"]
1516 | else:
1517 | info_parsed["error"] = True
1518 | info_parsed["error_message"] = "Something went wrong :("
1519 | if info_parsed["error"]:
1520 | return info_parsed
1521 | if "driveapp" in urlparse(url).netloc and not info_parsed["error"]:
1522 | res = client.get(info_parsed["gdrive_link"])
1523 | drive_link = etree.HTML(res.content).xpath(
1524 | "//a[contains(@class,'btn')]/@href"
1525 | )[0]
1526 | info_parsed["gdrive_link"] = drive_link
1527 | info_parsed["src_url"] = url
1528 | if "drivehub" in urlparse(url).netloc and not info_parsed["error"]:
1529 | res = client.get(info_parsed["gdrive_link"])
1530 | drive_link = etree.HTML(res.content).xpath(
1531 | "//a[contains(@class,'btn')]/@href"
1532 | )[0]
1533 | info_parsed["gdrive_link"] = drive_link
1534 | if "gdflix" in urlparse(url).netloc and not info_parsed["error"]:
1535 | res = client.get(info_parsed["gdrive_link"])
1536 | drive_link = etree.HTML(res.content).xpath(
1537 | "//a[contains(@class,'btn')]/@href"
1538 | )[0]
1539 | info_parsed["gdrive_link"] = drive_link
1540 |
1541 | if "drivesharer" in urlparse(url).netloc and not info_parsed["error"]:
1542 | res = client.get(info_parsed["gdrive_link"])
1543 | drive_link = etree.HTML(res.content).xpath(
1544 | "//a[contains(@class,'btn')]/@href"
1545 | )[0]
1546 | info_parsed["gdrive_link"] = drive_link
1547 | if "drivebit" in urlparse(url).netloc and not info_parsed["error"]:
1548 | res = client.get(info_parsed["gdrive_link"])
1549 | drive_link = etree.HTML(res.content).xpath(
1550 | "//a[contains(@class,'btn')]/@href"
1551 | )[0]
1552 | info_parsed["gdrive_link"] = drive_link
1553 | if "drivelinks" in urlparse(url).netloc and not info_parsed["error"]:
1554 | res = client.get(info_parsed["gdrive_link"])
1555 | drive_link = etree.HTML(res.content).xpath(
1556 | "//a[contains(@class,'btn')]/@href"
1557 | )[0]
1558 | info_parsed["gdrive_link"] = drive_link
1559 | if "driveace" in urlparse(url).netloc and not info_parsed["error"]:
1560 | res = client.get(info_parsed["gdrive_link"])
1561 | drive_link = etree.HTML(res.content).xpath(
1562 | "//a[contains(@class,'btn')]/@href"
1563 | )[0]
1564 | info_parsed["gdrive_link"] = drive_link
1565 | if "drivepro" in urlparse(url).netloc and not info_parsed["error"]:
1566 | res = client.get(info_parsed["gdrive_link"])
1567 | drive_link = etree.HTML(res.content).xpath(
1568 | "//a[contains(@class,'btn')]/@href"
1569 | )[0]
1570 | info_parsed["gdrive_link"] = drive_link
1571 | if info_parsed["error"]:
1572 | return "Faced an Unknown Error!"
1573 | return info_parsed["gdrive_link"]
1574 | except BaseException:
1575 | return "Unable to Extract GDrive Link"
1576 |
1577 |
1578 | #####################################################################################################
1579 | # urls open
1580 |
1581 | def urlsopen(url):
1582 | client = cloudscraper.create_scraper(allow_brotli=False)
1583 | DOMAIN = "https://blogpost.viewboonposts.com/e998933f1f665f5e75f2d1ae0009e0063ed66f889000"
1584 | url = url[:-1] if url[-1] == '/' else url
1585 | code = url.split("/")[-1]
1586 | final_url = f"{DOMAIN}/{code}"
1587 | ref = "https://blog.textpage.xyz/"
1588 | h = {"referer": ref}
1589 | resp = client.get(final_url,headers=h)
1590 | soup = BeautifulSoup(resp.content, "html.parser")
1591 | inputs = soup.find_all("input")
1592 | data = { input.get('name'): input.get('value') for input in inputs }
1593 | h = { "x-requested-with": "XMLHttpRequest" }
1594 | time.sleep(2)
1595 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1596 | try: return r.json()['url']
1597 | except: return "Something went wrong :("
1598 |
1599 |
1600 | ####################################################################################################
1601 | # URLShortX - xpshort
1602 |
1603 | def xpshort(url):
1604 | client = cloudscraper.create_scraper(allow_brotli=False)
1605 | DOMAIN = "https://xpshort.com"
1606 | url = url[:-1] if url[-1] == '/' else url
1607 | code = url.split("/")[-1]
1608 | final_url = f"{DOMAIN}/{code}"
1609 | ref = "https://m.awmnews.in/"
1610 | h = {"referer": ref}
1611 | resp = client.get(final_url,headers=h)
1612 | soup = BeautifulSoup(resp.content, "html.parser")
1613 | inputs = soup.find_all("input")
1614 | data = { input.get('name'): input.get('value') for input in inputs }
1615 | h = { "x-requested-with": "XMLHttpRequest" }
1616 | time.sleep(8)
1617 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1618 | try: return r.json()['url']
1619 | except: return "Something went wrong :("
1620 |
1621 |
1622 | ####################################################################################################
1623 | # Vnshortner-
1624 |
1625 | def vnshortener(url):
1626 | client = cloudscraper.create_scraper(allow_brotli=False)
1627 | DOMAIN = "https://vnshortener.com/"
1628 | url = url[:-1] if url[-1] == '/' else url
1629 | code = url.split("/")[-1]
1630 | final_url = f"{DOMAIN}/{code}"
1631 | ref = "https://nishankhatri.com.np/"
1632 | h = {"referer": ref}
1633 | resp = client.get(final_url,headers=h)
1634 | soup = BeautifulSoup(resp.content, "html.parser")
1635 | inputs = soup.find_all("input")
1636 | data = { input.get('name'): input.get('value') for input in inputs }
1637 | h = { "x-requested-with": "XMLHttpRequest" }
1638 | time.sleep(8)
1639 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1640 | try: return r.json()['url']
1641 | except: return "Something went wrong :("
1642 |
1643 |
1644 | #####################################################################################################
1645 | # onepagelink
1646 |
1647 | def onepagelink(url):
1648 | client = cloudscraper.create_scraper(allow_brotli=False)
1649 | DOMAIN = "go.onepagelink.in"
1650 | url = url[:-1] if url[-1] == "/" else url
1651 | code = url.split("/")[-1]
1652 | final_url = f"https://{DOMAIN}/{code}"
1653 | ref = "gorating.in"
1654 | h = {"referer": ref}
1655 | response = client.get(final_url, headers=h)
1656 | soup = BeautifulSoup(response.text, "html.parser")
1657 | inputs = soup.find_all("input")
1658 | data = {input.get("name"): input.get("value") for input in inputs}
1659 | h = {"x-requested-with": "XMLHttpRequest"}
1660 | time.sleep(9)
1661 | r = client.post(f"https://{DOMAIN}/links/go", data=data, headers=h)
1662 | try:
1663 | return r.json()["url"]
1664 | except BaseException:
1665 | return "Something went wrong :("
1666 |
1667 |
1668 | #####################################################################################################
1669 | # dulink
1670 |
1671 | def dulink(url):
1672 | client = cloudscraper.create_scraper(allow_brotli=False)
1673 | DOMAIN = "https://du-link.in"
1674 | url = url[:-1] if url[-1] == '/' else url
1675 | ref = "https://profitshort.com/"
1676 | h = {"referer": ref}
1677 | resp = client.get(url, headers=h)
1678 | soup = BeautifulSoup(resp.content, "html.parser")
1679 | inputs = soup.find_all("input")
1680 | data = { input.get('name'): input.get('value') for input in inputs }
1681 | h = { "x-requested-with": "XMLHttpRequest" }
1682 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1683 | try: return r.json()['url']
1684 | except: return "Something went wrong :("
1685 |
1686 |
1687 | #####################################################################################################
1688 | # krownlinks
1689 |
1690 | def krownlinks(url):
1691 | client = requests.session()
1692 | DOMAIN = "https://tech.bloggertheme.xyz"
1693 | url = url[:-1] if url[-1] == '/' else url
1694 | code = url.split("/")[-1]
1695 | final_url = f"{DOMAIN}/{code}"
1696 | resp = client.get(final_url)
1697 | soup = BeautifulSoup(resp.content, "html.parser")
1698 | try: inputs = soup.find(id="go-link").find_all(name="input")
1699 | except: return "Incorrect Link"
1700 | data = { input.get('name'): input.get('value') for input in inputs }
1701 | h = { "x-requested-with": "XMLHttpRequest" }
1702 | time.sleep(10)
1703 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1704 | try: return r.json()['url']
1705 | except: return "Something went wrong :("
1706 |
1707 |
1708 | ####################################################################################################
1709 | # adrinolink
1710 |
1711 | def adrinolink (url):
1712 | if "https://adrinolinks.in/" not in url: url = "https://adrinolinks.in/" + url.split("/")[-1]
1713 | client = cloudscraper.create_scraper(allow_brotli=False)
1714 | DOMAIN = "https://adrinolinks.in"
1715 | ref = "https://wikitraveltips.com/"
1716 | h = {"referer": ref}
1717 | resp = client.get(url,headers=h)
1718 | soup = BeautifulSoup(resp.content, "html.parser")
1719 | inputs = soup.find_all("input")
1720 | data = { input.get('name'): input.get('value') for input in inputs }
1721 | h = { "x-requested-with": "XMLHttpRequest" }
1722 | time.sleep(8)
1723 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1724 | try: return r.json()['url']
1725 | except: return "Something went wrong :("
1726 |
1727 |
1728 | #####################################################################################################
1729 | # mdiskshortners
1730 |
1731 | def mdiskshortners(url):
1732 | client = cloudscraper.create_scraper(allow_brotli=False)
1733 | DOMAIN = "https://mdiskshortners.in/"
1734 | url = url[:-1] if url[-1] == '/' else url
1735 | code = url.split("/")[-1]
1736 | final_url = f"{DOMAIN}/{code}"
1737 | ref = "https://www.adzz.in/"
1738 | h = {"referer": ref}
1739 | resp = client.get(final_url,headers=h)
1740 | soup = BeautifulSoup(resp.content, "html.parser")
1741 | inputs = soup.find_all("input")
1742 | data = { input.get('name'): input.get('value') for input in inputs }
1743 | h = { "x-requested-with": "XMLHttpRequest" }
1744 | time.sleep(2)
1745 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1746 | try: return r.json()['url']
1747 | except: return "Something went wrong :("
1748 |
1749 |
1750 | #####################################################################################################
1751 | # tinyfy
1752 |
1753 | def tiny(url):
1754 | client = requests.session()
1755 | DOMAIN = "https://tinyfy.in"
1756 | url = url[:-1] if url[-1] == '/' else url
1757 | code = url.split("/")[-1]
1758 | final_url = f"{DOMAIN}/{code}"
1759 | ref = "https://www.yotrickslog.tech/"
1760 | h = {"referer": ref}
1761 | resp = client.get(final_url,headers=h)
1762 | soup = BeautifulSoup(resp.content, "html.parser")
1763 | inputs = soup.find_all("input")
1764 | data = { input.get('name'): input.get('value') for input in inputs }
1765 | h = { "x-requested-with": "XMLHttpRequest" }
1766 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1767 | try: return r.json()['url']
1768 | except: return "Something went wrong :("
1769 |
1770 |
1771 | #####################################################################################################
1772 | # earnl
1773 |
1774 | def earnl(url):
1775 | client = requests.session()
1776 | DOMAIN = "https://v.earnl.xyz"
1777 | url = url[:-1] if url[-1] == '/' else url
1778 | code = url.split("/")[-1]
1779 | final_url = f"{DOMAIN}/{code}"
1780 | ref = "https://link.modmakers.xyz/"
1781 | h = {"referer": ref}
1782 | resp = client.get(final_url,headers=h)
1783 | soup = BeautifulSoup(resp.content, "html.parser")
1784 | inputs = soup.find_all("input")
1785 | data = { input.get('name'): input.get('value') for input in inputs }
1786 | h = { "x-requested-with": "XMLHttpRequest" }
1787 | time.sleep(5)
1788 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1789 | try:
1790 | return r.json()['url']
1791 | except: return "Something went wrong :("
1792 |
1793 |
1794 | #####################################################################################################
1795 | # moneykamalo
1796 |
1797 | def moneykamalo(url):
1798 | client = requests.session()
1799 | DOMAIN = "https://go.moneykamalo.com"
1800 | url = url[:-1] if url[-1] == '/' else url
1801 | code = url.split("/")[-1]
1802 | final_url = f"{DOMAIN}/{code}"
1803 | ref = "https://techkeshri.com/"
1804 | h = {"referer": ref}
1805 | resp = client.get(final_url,headers=h)
1806 | soup = BeautifulSoup(resp.content, "html.parser")
1807 | inputs = soup.find_all("input")
1808 | data = { input.get('name'): input.get('value') for input in inputs }
1809 | h = { "x-requested-with": "XMLHttpRequest" }
1810 | time.sleep(5)
1811 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1812 | try:
1813 | return r.json()['url']
1814 | except: return "Something went wrong :("
1815 |
1816 |
1817 | #####################################################################################################
1818 | # easysky
1819 |
1820 | def easysky(url):
1821 | client = cloudscraper.create_scraper(allow_brotli=False)
1822 | DOMAIN = "https://techy.veganab.co/"
1823 | url = url[:-1] if url[-1] == '/' else url
1824 | code = url.split("/")[-1]
1825 | final_url = f"{DOMAIN}/{code}"
1826 | ref = "https://veganab.co/"
1827 | h = {"referer": ref}
1828 | resp = client.get(final_url,headers=h)
1829 | soup = BeautifulSoup(resp.content, "html.parser")
1830 | inputs = soup.find_all("input")
1831 | data = { input.get('name'): input.get('value') for input in inputs }
1832 | h = { "x-requested-with": "XMLHttpRequest" }
1833 | time.sleep(8)
1834 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1835 | try: return r.json()['url']
1836 | except: return "Something went wrong :("
1837 |
1838 |
1839 | #####################################################################################################
1840 | # indiurl
1841 |
1842 | def indi(url):
1843 | client = requests.session()
1844 | DOMAIN = "https://file.earnash.com/"
1845 | url = url[:-1] if url[-1] == '/' else url
1846 | code = url.split("/")[-1]
1847 | final_url = f"{DOMAIN}/{code}"
1848 | ref = "https://indiurl.cordtpoint.co.in/"
1849 | h = {"referer": ref}
1850 | resp = client.get(final_url,headers=h)
1851 | soup = BeautifulSoup(resp.content, "html.parser")
1852 | inputs = soup.find_all("input")
1853 | data = { input.get('name'): input.get('value') for input in inputs }
1854 | h = { "x-requested-with": "XMLHttpRequest" }
1855 | time.sleep(10)
1856 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1857 | try:
1858 | return r.json()['url']
1859 | except: return "Something went wrong :("
1860 |
1861 |
1862 | #####################################################################################################
1863 | # linkbnao
1864 |
1865 | def linkbnao(url):
1866 | client = cloudscraper.create_scraper(allow_brotli=False)
1867 | DOMAIN = "https://vip.linkbnao.com"
1868 | url = url[:-1] if url[-1] == '/' else url
1869 | code = url.split("/")[-1]
1870 | final_url = f"{DOMAIN}/{code}"
1871 | ref = "https://ffworld.xyz/"
1872 | h = {"referer": ref}
1873 | resp = client.get(final_url,headers=h)
1874 | soup = BeautifulSoup(resp.content, "html.parser")
1875 | inputs = soup.find_all("input")
1876 | data = { input.get('name'): input.get('value') for input in inputs }
1877 | h = { "x-requested-with": "XMLHttpRequest" }
1878 | time.sleep(2)
1879 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1880 | try:
1881 | return r.json()['url']
1882 | except: return "Something went wrong :("
1883 |
1884 |
1885 | #####################################################################################################
1886 | # omegalinks
1887 |
1888 | def mdiskpro(url):
1889 | client = cloudscraper.create_scraper(allow_brotli=False)
1890 | DOMAIN = "https://mdisk.pro"
1891 | ref = "https://m.meclipstudy.in/"
1892 | h = {"referer": ref}
1893 | resp = client.get(url,headers=h)
1894 | soup = BeautifulSoup(resp.content, "html.parser")
1895 | inputs = soup.find_all("input")
1896 | data = { input.get('name'): input.get('value') for input in inputs }
1897 | h = { "x-requested-with": "XMLHttpRequest" }
1898 | time.sleep(8)
1899 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1900 | try:
1901 | return r.json()['url']
1902 | except: return "Something went wrong :("
1903 |
1904 |
1905 | #####################################################################################################
1906 | # tnshort
1907 |
1908 | def tnshort(url):
1909 | client = cloudscraper.create_scraper(allow_brotli=False)
1910 | DOMAIN = "https://page.tnlink.in/"
1911 | url = url[:-1] if url[-1] == '/' else url
1912 | code = url.split("/")[-1]
1913 | final_url = f"{DOMAIN}/{code}"
1914 | ref = "https://business.usanewstoday.club/"
1915 | h = {"referer": ref}
1916 | resp = client.get(final_url,headers=h)
1917 | soup = BeautifulSoup(resp.content, "html.parser")
1918 | inputs = soup.find_all("input")
1919 | data = { input.get('name'): input.get('value') for input in inputs }
1920 | h = { "x-requested-with": "XMLHttpRequest" }
1921 | time.sleep(8)
1922 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1923 | try: return r.json()['url']
1924 | except: return "Something went wrong :("
1925 |
1926 |
1927 | #####################################################################################################
1928 | # indianshortner
1929 |
1930 | def indshort(url):
1931 | client = cloudscraper.create_scraper(allow_brotli=False)
1932 | DOMAIN = "https://indianshortner.com/"
1933 | url = url[:-1] if url[-1] == '/' else url
1934 | code = url.split("/")[-1]
1935 | final_url = f"{DOMAIN}/{code}"
1936 | ref = "https://moddingzone.in/"
1937 | h = {"referer": ref}
1938 | resp = client.get(final_url,headers=h)
1939 | soup = BeautifulSoup(resp.content, "html.parser")
1940 | inputs = soup.find_all("input")
1941 | data = { input.get('name'): input.get('value') for input in inputs }
1942 | h = { "x-requested-with": "XMLHttpRequest" }
1943 | time.sleep(5)
1944 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1945 | try: return r.json()['url']
1946 | except: return "Something went wrong :("
1947 |
1948 |
1949 | #####################################################################################################
1950 | # mdisklink
1951 |
1952 | def mdisklink(url):
1953 | client = cloudscraper.create_scraper(allow_brotli=False)
1954 | DOMAIN = "https://mdisklink.link/"
1955 | url = url[:-1] if url[-1] == '/' else url
1956 | code = url.split("/")[-1]
1957 | final_url = f"{DOMAIN}/{code}"
1958 | ref = "https://m.proappapk.com/"
1959 | h = {"referer": ref}
1960 | resp = client.get(final_url,headers=h)
1961 | soup = BeautifulSoup(resp.content, "html.parser")
1962 | inputs = soup.find_all("input")
1963 | data = { input.get('name'): input.get('value') for input in inputs }
1964 | h = { "x-requested-with": "XMLHttpRequest" }
1965 | time.sleep(2)
1966 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h)
1967 | try: return r.json()['url']
1968 | except: return "Something went wrong :("
1969 |
1970 |
1971 | #####################################################################################################
1972 | # rslinks
1973 |
1974 | def rslinks(url):
1975 | client = requests.session()
1976 | download = requests.get(url, stream=True, allow_redirects=False)
1977 | v = download.headers["location"]
1978 | code = v.split('ms9')[-1]
1979 | final = f"http://techyproio.blogspot.com/p/short.html?{code}=="
1980 | try: return final
1981 | except: return "Something went wrong :("
1982 |
1983 |
1984 | #####################################################################################################
1985 | # bitly + tinyurl
1986 |
1987 | def bitly_tinyurl(url: str) -> str:
1988 | response = requests.get(url).url
1989 | try: return response
1990 | except: return "Something went wrong :("
1991 |
1992 | #####################################################################################################
1993 | # thinfi
1994 |
1995 | def thinfi(url: str) -> str :
1996 | response = requests.get(url)
1997 | soup = BeautifulSoup(response.content, "html.parser").p.a.get("href")
1998 | try: return soup
1999 | except: return "Something went wrong :("
2000 |
2001 | #####################################################################################################
2002 | # helpers
2003 |
2004 | # check if present in list
2005 | def ispresent(inlist,url):
2006 | for ele in inlist:
2007 | if ele in url:
2008 | return True
2009 | return False
2010 |
2011 |
2012 | # shortners
2013 | def shortners(url):
2014 |
2015 | # igg games
2016 | if "https://igg-games.com/" in url:
2017 | print("entered igg: ",url)
2018 | return igggames(url)
2019 |
2020 | # ola movies
2021 | elif "https://olamovies." in url:
2022 | print("entered ola movies: ",url)
2023 | return olamovies(url)
2024 |
2025 | # katdrive
2026 | elif "https://katdrive." in url:
2027 | if KATCRYPT == "":
2028 | return "🚫 __You can't use this because__ **KATDRIVE_CRYPT** __ENV is not set__"
2029 |
2030 | print("entered katdrive: ",url)
2031 | return katdrive_dl(url, KATCRYPT)
2032 |
2033 | # kolop
2034 | elif "https://kolop." in url:
2035 | if KCRYPT == "":
2036 | return "🚫 __You can't use this because__ **KOLOP_CRYPT** __ENV is not set__"
2037 |
2038 | print("entered kolop: ",url)
2039 | return kolop_dl(url, KCRYPT)
2040 |
2041 | # hubdrive
2042 | elif "https://hubdrive." in url:
2043 | if HCRYPT == "":
2044 | return "🚫 __You can't use this because__ **HUBDRIVE_CRYPT** __ENV is not set__"
2045 |
2046 | print("entered hubdrive: ",url)
2047 | return hubdrive_dl(url, HCRYPT)
2048 |
2049 | # drivefire
2050 | elif "https://drivefire." in url:
2051 | if DCRYPT == "":
2052 | return "🚫 __You can't use this because__ **DRIVEFIRE_CRYPT** __ENV is not set__"
2053 |
2054 | print("entered drivefire: ",url)
2055 | return drivefire_dl(url, DCRYPT)
2056 |
2057 | # filecrypt
2058 | elif (("https://filecrypt.co/") in url or ("https://filecrypt.cc/" in url)):
2059 | print("entered filecrypt: ",url)
2060 | return filecrypt(url)
2061 |
2062 | # shareus
2063 | elif "shareus.io" in url or "shareus.in" in url:
2064 | print("entered shareus: ",url)
2065 | return shareus(url)
2066 |
2067 | # shortingly
2068 | elif "https://shortingly.in/" in url:
2069 | print("entered shortingly: ",url)
2070 | return shortingly(url)
2071 |
2072 | # vnshortner
2073 | elif "https://vnshortener.com/" in url:
2074 | print("entered vnshortener: ",url)
2075 | return vnshortener(url)
2076 |
2077 | # onepagelink
2078 | elif "https://onepagelink.in/" in url:
2079 | print("entered onepagelink: ",url)
2080 | return onepagelink(url)
2081 |
2082 | # gyanilinks
2083 | elif "https://gyanilinks.com/" in url or "https://gtlinks.me/" in url:
2084 | print("entered gyanilinks: ",url)
2085 | return gyanilinks(url)
2086 |
2087 | # flashlink
2088 | elif "https://go.flashlink.in" in url:
2089 | print("entered flashlink: ",url)
2090 | return flashl(url)
2091 |
2092 | # short2url
2093 | elif "https://short2url.in/" in url:
2094 | print("entered short2url: ",url)
2095 | return short2url(url)
2096 |
2097 | # shorte
2098 | elif "https://shorte.st/" in url:
2099 | print("entered shorte: ",url)
2100 | return sh_st_bypass(url)
2101 |
2102 | # psa
2103 | elif "https://psa.wf/" in url:
2104 | print("entered psa: ",url)
2105 | return psa_bypasser(url)
2106 |
2107 | # sharer pw
2108 | elif "https://sharer.pw/" in url:
2109 | if XSRF_TOKEN == "" or Laravel_Session == "":
2110 | return "🚫 __You can't use this because__ **XSRF_TOKEN** __and__ **Laravel_Session** __ENV is not set__"
2111 |
2112 | print("entered sharer: ",url)
2113 | return sharer_pw(url, Laravel_Session, XSRF_TOKEN)
2114 |
2115 | # gdtot url
2116 | elif "gdtot.cfd" in url:
2117 | print("entered gdtot: ",url)
2118 | return gdtot(url)
2119 |
2120 | # adfly
2121 | elif "https://adf.ly/" in url:
2122 | print("entered adfly: ",url)
2123 | out = adfly(url)
2124 | return out['bypassed_url']
2125 |
2126 | # gplinks
2127 | elif "https://gplinks.co/" in url:
2128 | print("entered gplink: ",url)
2129 | return gplinks(url)
2130 |
2131 | # droplink
2132 | elif "https://droplink.co/" in url:
2133 | print("entered droplink: ",url)
2134 | return droplink(url)
2135 |
2136 | # linkvertise
2137 | elif "https://linkvertise.com/" in url:
2138 | print("entered linkvertise: ",url)
2139 | return linkvertise(url)
2140 |
2141 | # rocklinks
2142 | elif "https://rocklinks.net/" in url:
2143 | print("entered rocklinks: ",url)
2144 | return rocklinks(url)
2145 |
2146 | # ouo
2147 | elif "https://ouo.press/" in url:
2148 | print("entered ouo: ",url)
2149 | return ouo(url)
2150 |
2151 | # try2link
2152 | elif "https://try2link.com/" in url:
2153 | print("entered try2links: ",url)
2154 | return try2link_bypass(url)
2155 |
2156 | # urlsopen
2157 | elif "https://urlsopen." in url:
2158 | print("entered urlsopen: ",url)
2159 | return urlsopen(url)
2160 |
2161 | # xpshort
2162 | elif "https://xpshort.com/" in url or "https://push.bdnewsx.com/" in url or "https://techymozo.com/" in url:
2163 | print("entered xpshort: ",url)
2164 | return xpshort(url)
2165 |
2166 | # dulink
2167 | elif "https://du-link.in/" in url:
2168 | print("entered dulink: ",url)
2169 | return dulink(url)
2170 |
2171 | # ez4short
2172 | elif "https://ez4short.com/" in url:
2173 | print("entered ez4short: ",url)
2174 | return ez4(url)
2175 |
2176 | # krownlinks
2177 | elif "https://krownlinks.me/" in url:
2178 | print("entered krownlinks: ",url)
2179 | return krownlinks(url)
2180 |
2181 | # adrinolink
2182 | elif "https://adrinolinks." in url:
2183 | print("entered adrinolink: ",url)
2184 | return adrinolink(url)
2185 |
2186 | # tnlink
2187 | elif "https://link.tnlink.in/" in url:
2188 | print("entered tnlink: ",url)
2189 | return tnlink(url)
2190 |
2191 | # mdiskshortners
2192 | elif "https://mdiskshortners.in/" in url:
2193 | print("entered mdiskshortners: ",url)
2194 | return mdiskshortners(url)
2195 |
2196 | # tinyfy
2197 | elif "tinyfy.in" in url:
2198 | print("entered tinyfy: ",url)
2199 | return tiny(url)
2200 |
2201 | # earnl
2202 | elif "go.earnl.xyz" in url:
2203 | print("entered earnl: ",url)
2204 | return earnl(url)
2205 |
2206 | # moneykamalo
2207 | elif "earn.moneykamalo.com" in url:
2208 | print("entered moneykamalo: ",url)
2209 | return moneykamalo(url)
2210 |
2211 | # easysky
2212 | elif "m.easysky.in" in url:
2213 | print("entered easysky: ",url)
2214 | return easysky(url)
2215 |
2216 | # indiurl
2217 | elif "go.indiurl.in.net" in url:
2218 | print("entered indiurl: ",url)
2219 | return indi(url)
2220 |
2221 | # linkbnao
2222 | elif "linkbnao.com" in url:
2223 | print("entered linkbnao: ",url)
2224 | return linkbnao(url)
2225 |
2226 | # omegalinks
2227 | elif "mdisk.pro" in url:
2228 | print("entered mdiskpro: ",url)
2229 | return mdiskpro(url)
2230 |
2231 | # tnshort
2232 | elif "tnshort.in" in url:
2233 | print("entered tnshort: ",url)
2234 | return tnshort(url)
2235 |
2236 | # indianshortner
2237 | elif "indianshortner.in" in url:
2238 | print("entered indianshortner: ",url)
2239 | return indshort(url)
2240 |
2241 | # mdisklink
2242 | elif "mdisklink.link" in url:
2243 | print("entered mdisklink: ",url)
2244 | return mdisklink(url)
2245 |
2246 | # rslinks
2247 | elif "rslinks.net" in url:
2248 | print("entered rslinks: ",url)
2249 | return rslinks(url)
2250 |
2251 | # bitly + tinyurl
2252 | elif "bit.ly" in url or "tinyurl.com" in url:
2253 | print("entered bitly_tinyurl: ",url)
2254 | return bitly_tinyurl(url)
2255 |
2256 | # pdisk
2257 | elif "pdisk.pro" in url:
2258 | print("entered pdisk: ",url)
2259 | return pdisk(url)
2260 |
2261 | # thinfi
2262 | elif "thinfi.com" in url:
2263 | print("entered thinfi: ",url)
2264 | return thinfi(url)
2265 |
2266 | # htpmovies sharespark cinevood
2267 | elif "https://htpmovies." in url or 'https://sharespark.me/' in url or "https://cinevood." in url or "https://atishmkv." in url \
2268 | or "https://teluguflix" in url or 'https://taemovies' in url or "https://toonworld4all" in url or "https://animeremux" in url:
2269 | print("entered htpmovies sharespark cinevood atishmkv: ",url)
2270 | return scrappers(url)
2271 |
2272 | # gdrive look alike
2273 | elif ispresent(gdlist,url):
2274 | print("entered gdrive look alike: ",url)
2275 | return unified(url)
2276 |
2277 | # others
2278 | elif ispresent(otherslist,url):
2279 | print("entered others: ",url)
2280 | return others(url)
2281 |
2282 | # else
2283 | else: return "Not in Supported Sites"
2284 |
2285 |
2286 | ################################################################################################################################
2287 |
--------------------------------------------------------------------------------