├── README.md └── test.py /README.md: -------------------------------------------------------------------------------- 1 | # Pornpics images downloader. 2 | 3 | With this script you can download all images of any pornstar from [pornpics](https://www.pornpics.com). Just enter any pornstar's name and sit back it will download all images. 4 | 5 | ## Prerequisites:- 6 | 7 | 1. bs4 8 | 2. requests 9 | 10 | ## How to use? 11 | ### Step 1: 12 | - Just git clone this repository and start working by editing the code. 13 | 14 | `git clone https://github.com/whitehatjrchintu/pornpics.git` 15 | 16 | `cd pornpics` 17 | 18 | - Or download this [repository](https://github.com/whitehatjrchintu/pornpics/archive/main.zip) as zip. 19 | 20 | ### Step 2: 21 | - `python3 test.py` and enter name of your pornstar. 22 | 23 | ## Example:- 24 | 25 | ![example](https://i.ibb.co/pfCfrTL/Capture.png "example") 26 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Pool 2 | from bs4 import BeautifulSoup 3 | import urllib.request 4 | import traceback 5 | import requests 6 | import shutil 7 | import time 8 | 9 | name = input("Enter name :: ") 10 | def getting_post_urls(): 11 | #url = 'https://www.pornpics.com/search/srch.php?q=lisa+ann&limit=695&offset=' 12 | url = 'https://www.pornpics.com/search/srch.php?q=' + str(name) + '&limit=100000&offset=' 13 | headers = {"Connection": "close", "DNT": "1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Sec-Fetch-Site": "none", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-User": "?1", "Sec-Fetch-Dest": "document", "Accept-Encoding": "gzip, deflatre", "Accept-Language": "en-US,en;q=0.9,hi;q=0.8"} 14 | cookies = {"cookie" : "__ae_uid_sess_id=b162cbb3-9e86-4a55-ac81-f1b1cccdd6e0; PP_UVM=1; _stat=2598133695.1528214785.23479322.3739817806; _ga=GA1.2.1272764643.1603974465; _gid=GA1.2.1206331922.1605948774"} 15 | req = requests.get(url, headers=headers, cookies=cookies) 16 | json_data = req.json() 17 | for linkss in json_data: 18 | links = linkss['g_url'] 19 | link = [] 20 | link.append(links) 21 | with open(name + "'s post_links.txt", "a") as file: 22 | for urls in link: 23 | file.write(str(urls) + "\n") 24 | print("Got all post's urls.") 25 | 26 | def getting_image_urls(): 27 | with open(name + "'s post_links.txt", 'r') as f: 28 | for line in f: 29 | time.sleep(2) 30 | headers = {"Connection": "close", "DNT": "1", "Upgrade-Insecure-Requests": "1", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", "Sec-Fetch-Site": "none", "Sec-Fetch-Mode": "navigate", "Sec-Fetch-User": "?1", "Sec-Fetch-Dest": "document", "Accept-Encoding": "gzip, deflatre", "Accept-Language": "en-US,en;q=0.9,hi;q=0.8"} 31 | cookies = {"cookie" : "__ae_uid_sess_id=b162cbb3-9e86-4a55-ac81-f1b1cccdd6e0; PP_UVM=1; _stat=2598133695.1528214785.23479322.3739817806; _ga=GA1.2.1272764643.1603974465; _gid=GA1.2.1206331922.1605948774"} 32 | payload = requests.get(line.strip(), headers=headers, cookies=cookies) 33 | soup = BeautifulSoup(payload.content,'html.parser') 34 | for images in soup.find_all("a", attrs = {'class' : 'rel-link'}): 35 | imgg = images.get("href") 36 | img = [] 37 | img.append(imgg) 38 | #print(img) 39 | with open(name + "'s image_links.txt", "a") as file: 40 | for img_urls in img: 41 | file.write(str(img_urls) + "\n") 42 | print("Got all image's urls.") 43 | #counting images 44 | with open(name + "'s image_links.txt", 'r') as f: 45 | data = f.read() 46 | global linee 47 | linee = data.splitlines() 48 | print('Total images are ::', len(linee)) 49 | 50 | def download(url): 51 | try: 52 | response = requests.get(url, stream=True) 53 | filename = url.split("/")[-1] 54 | with open(filename, 'wb') as out_file: 55 | shutil.copyfileobj(response.raw, out_file) 56 | del response 57 | except Exception: 58 | pass 59 | 60 | def main(): 61 | print("Downloading" ,len(linee), "images. Please wait.") 62 | number_of_workers = 4 63 | count = len(open(name + "'s image_links.txt").readlines()) 64 | for i in range(0, count): 65 | with open(name + "'s image_links.txt",'r') as f: 66 | urls = f.read().split('\n') 67 | 68 | with Pool(number_of_workers) as pool: 69 | pool.map(download, urls) 70 | 71 | print("Done.") 72 | 73 | if __name__ == "__main__": 74 | getting_post_urls() 75 | getting_image_urls() 76 | main() 77 | --------------------------------------------------------------------------------