├── .github
└── workflows
│ └── black.yml
├── DevFest_India_2020_Schedule
├── README.md
├── devfest_schedule.py
├── requirements.txt
├── schedule.png
└── working.png
├── Digital_clock
├── Digital Clock.PNG
├── README.md
└── digital_clock.py
├── Dns_record
├── README.md
├── dns_record.py
└── requirements.txt
├── Extract_zip_files
├── README.md
└── extract_zip_files.py
├── Flipkart_Scrapper
├── Flipkart_smartphones.xlsx
└── main.py
├── Geocoding
├── README.md
├── geocoding.py
└── requirements.txt
├── Hacktoberfest_Events
├── README.md
├── hacktoberfest_events.py
└── requirements.txt
├── IMDB_scrapper
├── README.md
├── imdb_scrapper.py
├── requirements.txt
└── sample.PNG
├── Json_to_yaml
├── README.md
├── example.json
├── example.yaml
├── json_to_yaml.py
└── requirements.txt
├── LICENSE
├── Pycon_Proposals
├── README.md
├── pycon_proposals.py
└── requirements.txt
├── README.md
├── Scrape_Hospital_Website
└── scrape.py
├── Time_to_load_website
├── README.md
├── sample.PNG
└── time_to_load_website.py
├── Wallpaper Setter
├── README.md
└── wallpaper_setter.py
└── XKCD_downloader
├── README.md
├── requirements.txt
└── xkcd_dowloader.py
/.github/workflows/black.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | lint:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v2
10 | - uses: actions/setup-python@v2
11 | - uses: psf/black@stable
12 | with:
13 | args: ". --check"
14 |
--------------------------------------------------------------------------------
/DevFest_India_2020_Schedule/README.md:
--------------------------------------------------------------------------------
1 |
DevFest India 2020 Schedule
2 |
3 | Scrapes all the event schedules of DevFest India 2020 and stores them in a csv file
4 |
5 | ## *Author Name*
6 | [Anshul Pandey](https://github.com/Anshul275)
7 |
8 | ## Pre-Requisites
9 |
10 | Run The Command `pip install -r requirements.txt`
11 |
12 | ### To install the chrome web-driver:
13 | `1.` Check the chrome-version you are currently using `chrome://settings/help`
14 | `2.` Download the desired Chrome web-driver for your version `https://chromedriver.chromium.org/downloads` and extract the zip file
15 |
16 | `IMP` - Add the full path of the `chromedriver.exe` file in `driver_path` variable of `devfest_schedule.py` file
17 |
18 | ## To Run the File
19 |
20 | For Windows - `python devfest_schedule.py`
21 |
22 | For Ubuntu/Linux - ` ./devfest_schedule.py`
23 |
24 | ## Screenshots -
25 |
26 | ### Working Screenshot
27 |
28 | 
29 |
30 | ### Generated DevFest Schedule csv file
31 |
32 | 
--------------------------------------------------------------------------------
/DevFest_India_2020_Schedule/devfest_schedule.py:
--------------------------------------------------------------------------------
1 | import csv
2 | from selenium import webdriver
3 | from bs4 import BeautifulSoup
4 |
5 | # Method to scrape and store DevFest Schedule in csv file
6 | def devfest_schedule():
7 | url = "https://devfestindia.com/schedule"
8 |
9 | # Running the driver in headless mode
10 | options = webdriver.ChromeOptions()
11 | options.add_argument("headless")
12 |
13 | # Change the driver_path to where your chrome driver is installed
14 | driver_path = "/Users/pc/Desktop/Rough/DevFest_India_2020_Schedule/chromedriver/chromedriver.exe"
15 | driver = webdriver.Chrome(executable_path=driver_path, options=options)
16 |
17 | # Requesting the desired webpage through selenium Chrome driver
18 | driver.get(url)
19 | select_page_2 = "/html/body/div/div/div[3]/main/div/div[1]/div/div/header/div[2]/div/div/div[2]/div/a[2]"
20 | select_page_3 = "/html/body/div/div/div[3]/main/div/div[1]/div/div/header/div[2]/div/div/div[2]/div/a[3]"
21 | driver.find_element_by_xpath(select_page_2).click()
22 | driver.find_element_by_xpath(select_page_3).click()
23 |
24 | # Storing the entire devfest schedule webpage in html variable
25 | html = driver.page_source
26 | driver.quit()
27 |
28 | soup = BeautifulSoup(html, "lxml")
29 |
30 | day_wise_schedule = soup.find_all("div", attrs={"class": "v-window-item"})
31 |
32 | with open("devfest_schedule.csv", "w") as csv_file:
33 | writer = csv.writer(csv_file)
34 |
35 | # Initializing the first row with the column title
36 | writer.writerow(["Name of Event", "Date", "Timings", "Tag", "Author"])
37 |
38 | starting_date = 16
39 | for schedule in day_wise_schedule:
40 | events = schedule.find_all(
41 | "div",
42 | attrs={
43 | "class": "row pa-0 my-0 align-center justify-center row-border-white"
44 | },
45 | )
46 | for event in events:
47 | event_details = event.find(
48 | "div", attrs={"class": "py-3 ma-1 fill-height"}
49 | )
50 | event_timings = event.find(
51 | "div", attrs={"class": "text-right my-0 py-0 col-md-2 col-3"}
52 | ).find_all("p")
53 |
54 | event_name = event_details.find("p").text
55 | event_date = "October " + str(starting_date)
56 | event_time = (
57 | event_timings[0].text.replace(" ", "")
58 | + "-"
59 | + event_timings[1].text
60 | + " "
61 | + event_timings[2].text.replace(" ", "")
62 | )
63 | event_tag = event_details.find(
64 | "span",
65 | attrs={
66 | "class": "mt-2 mr-2 v-chip v-chip--label v-chip--no-color theme--light v-size--small"
67 | },
68 | ).text
69 | authors = event_details.find_all(
70 | "span",
71 | attrs={
72 | "class": "mt-2 mr-2 v-chip v-chip--label v-chip--no-color v-chip--outlined theme--light v-size--small"
73 | },
74 | )
75 | event_authors = ""
76 | for author in authors:
77 | event_authors = event_authors + author.text.replace(" ", "") + " "
78 |
79 | # Adding each event to csv file
80 | writer.writerow(
81 | [event_name, event_date, event_time, event_tag, event_authors]
82 | )
83 |
84 | starting_date = starting_date + 1
85 |
86 |
87 | if __name__ == "__main__":
88 | # Scraping the DevFest India 2020 Schedule and storing it in csv file
89 | devfest_schedule()
90 | print("devfest_schedule.csv file has been generated")
91 |
--------------------------------------------------------------------------------
/DevFest_India_2020_Schedule/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/DevFest_India_2020_Schedule/requirements.txt
--------------------------------------------------------------------------------
/DevFest_India_2020_Schedule/schedule.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/DevFest_India_2020_Schedule/schedule.png
--------------------------------------------------------------------------------
/DevFest_India_2020_Schedule/working.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/DevFest_India_2020_Schedule/working.png
--------------------------------------------------------------------------------
/Digital_clock/Digital Clock.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/Digital_clock/Digital Clock.PNG
--------------------------------------------------------------------------------
/Digital_clock/README.md:
--------------------------------------------------------------------------------
1 | # Digital Clock using Python and Tkinter
2 | This script create a digital clock as per the system's current time.
3 |
4 | ## Library Used
5 | * tkinter
6 | * time
7 |
8 | ### To install required external modules
9 | * Run `pip install tkinter`
10 |
11 | ### How to run the script
12 | - Execute `python3 digital_clock.py`
13 |
14 | ### Screenshot/GIF showing the sample use of the script
15 |
16 | 
17 |
--------------------------------------------------------------------------------
/Digital_clock/digital_clock.py:
--------------------------------------------------------------------------------
1 | import tkinter as tk
2 | from time import strftime
3 |
4 |
5 | def light_theme():
6 | frame = tk.Frame(root, bg="white")
7 | frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
8 | lbl_1 = tk.Label(
9 | frame, font=("calibri", 40, "bold"), background="White", foreground="black"
10 | )
11 | lbl_1.pack(anchor="s")
12 |
13 | def time():
14 | string = strftime("%I:%M:%S %p")
15 | lbl_1.config(text=string)
16 | lbl_1.after(1000, time)
17 |
18 | time()
19 |
20 |
21 | def dark_theme():
22 | frame = tk.Frame(root, bg="#22478a")
23 | frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
24 | lbl_2 = tk.Label(
25 | frame, font=("calibri", 40, "bold"), background="#22478a", foreground="black"
26 | )
27 | lbl_2.pack(anchor="s")
28 |
29 | def time():
30 | string = strftime("%I:%M:%S %p")
31 | lbl_2.config(text=string)
32 | lbl_2.after(1000, time)
33 |
34 | time()
35 |
36 |
37 | root = tk.Tk()
38 | root.title("Digital-Clock")
39 | canvas = tk.Canvas(root, height=140, width=400)
40 | canvas.pack()
41 |
42 | frame = tk.Frame(root, bg="#22478a")
43 | frame.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)
44 | lbl = tk.Label(
45 | frame, font=("calibri", 40, "bold"), background="#22478a", foreground="black"
46 | )
47 | lbl.pack(anchor="s")
48 |
49 |
50 | def time():
51 | string = strftime("%I:%M:%S %p")
52 | lbl.config(text=string)
53 | lbl.after(1000, time)
54 |
55 |
56 | time()
57 |
58 | menubar = tk.Menu(root)
59 | theme_menu = tk.Menu(menubar, tearoff=0)
60 | theme_menu.add_command(label="Light", command=light_theme)
61 | theme_menu.add_command(label="Dark", command=dark_theme)
62 | menubar.add_cascade(label="Theme", menu=theme_menu)
63 | root.config(menu=menubar)
64 | root.mainloop()
65 |
--------------------------------------------------------------------------------
/Dns_record/README.md:
--------------------------------------------------------------------------------
1 | ## DNS Record
2 |
3 | This script takes the website name as input and returns its dns records.
4 |
5 | #Requirements to run this file:
6 |
7 | External library called dnspython has been used here and it can be installed easily by using the following command:
8 |
9 | pip install -r requirements.txt
10 |
11 | #How to use this script?
12 |
13 | 1.Install the requirements.
14 |
15 | 2. Type the following command
16 |
17 | python dns_record.py
18 |
19 | 3.It will ask for a website:
20 |
21 | You can give any website name for example: google.com
22 |
--------------------------------------------------------------------------------
/Dns_record/dns_record.py:
--------------------------------------------------------------------------------
1 | # Simple program to fetch dns record of a given website
2 |
3 | import dns.resolver
4 |
5 | # Dictionary to store the dns record of a website
6 | dns_record = {}
7 |
8 | # User defined website
9 | website = input("Enter the name of the website: ")
10 |
11 | # Fetching the 'A' record of the website and storing it in the dictionary
12 | a_record = dns.resolver.resolve(website, "A")
13 | for ipval in a_record:
14 | dns_record["A_Record_IP"] = ipval.to_text()
15 |
16 | # List to store the mx records of a website
17 | mx_record_list = []
18 |
19 | # Fetching the mx records and storing them in the dictionary
20 | mx_record = dns.resolver.resolve(website, "MX")
21 | for server in mx_record:
22 | mx_record_list.append(server)
23 | for i, element in enumerate(mx_record_list):
24 | dns_record["MX_Record", i + 1] = element
25 |
26 | # Displaying the record on the screen
27 | for key, value in dns_record.items():
28 | print(f"{key} = {value}")
29 |
--------------------------------------------------------------------------------
/Dns_record/requirements.txt:
--------------------------------------------------------------------------------
1 | dnspython==2.0.0
--------------------------------------------------------------------------------
/Extract_zip_files/README.md:
--------------------------------------------------------------------------------
1 | ## Extract zip files
2 |
3 | This script takes a zip file as input and extracts its content into a separate folder.
4 | The folder is named same as the input zip file and is saved in the current directory
5 |
6 | ### How to use this?
7 | Just type the following in the command prompt:
8 |
9 | python extract_zip_files.py -l
10 |
11 | Example:
12 |
13 | python extract_zip_files.py -l Amazing_Spiderman_subtitles.zip
14 |
--------------------------------------------------------------------------------
/Extract_zip_files/extract_zip_files.py:
--------------------------------------------------------------------------------
1 | import os
2 | import zipfile
3 | import sys
4 | import argparse
5 |
6 | # Code to add the cli
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("-l", "--zippedfile", required=True, help="Zipped file")
9 | args = vars(parser.parse_args())
10 |
11 | # Catching the user defined zip file
12 | zip_file = args["zippedfile"]
13 |
14 | file_name = zip_file
15 |
16 | # To check if the entered zip file is present in the directory
17 | if os.path.exists(zip_file) == False:
18 | sys.exit("No such file present in the directory")
19 |
20 | # Function to extract the zip file
21 | def extract(zip_file):
22 | file_name = zip_file.split(".zip")[0]
23 | if zip_file.endswith(".zip"):
24 |
25 | # Will use this to save the unzipped file in the current directory
26 | current_working_directory = os.getcwd()
27 | new_directory = current_working_directory + "/" + file_name
28 | # Logic to unzip the file
29 | with zipfile.ZipFile(zip_file, "r") as zip_object:
30 | zip_object.extractall(new_directory)
31 | print("Extracted successfully!!!")
32 | else:
33 | print("Not a zip file")
34 |
35 |
36 | extract(zip_file)
37 |
--------------------------------------------------------------------------------
/Flipkart_Scrapper/Flipkart_smartphones.xlsx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/Flipkart_Scrapper/Flipkart_smartphones.xlsx
--------------------------------------------------------------------------------
/Flipkart_Scrapper/main.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 | import pandas as pd
4 |
5 | base_url = "https://www.flipkart.com"
6 | product = input("Enter a product of your choice: ").lower()
7 | # product = "smartphones"
8 | product_links = list()
9 | product_offered_prices = list()
10 | product_original_prices = list()
11 | product_names = list()
12 |
13 | for i in range(1, 4): # In order to have atleast 50 records
14 | response = requests.get(f"{base_url}/search?q={product}&page={i}")
15 | soup = BeautifulSoup(response.text, "lxml")
16 | product_link_list = soup.find_all("a", class_="_1fQZEK")
17 | for product_link in product_link_list:
18 | product_links.append(base_url + product_link["href"])
19 | names = soup.find_all("div", class_="_4rR01T")
20 | for name in names:
21 | product_names.append(name.text.strip())
22 | offer_prices = soup.find_all("div", class_="_30jeq3 _1_WHN1")
23 | for offer_price in offer_prices:
24 | product_offered_prices.append(offer_price.text)
25 | original_prices = soup.find_all("div", class_="_3I9_wc _27UcVY")
26 | for original_price in original_prices:
27 | product_original_prices.append(original_price.text)
28 |
29 | df = pd.DataFrame(
30 | {
31 | "Product Name": product_names,
32 | "Offered Price": product_offered_prices,
33 | "Original Price": product_original_prices,
34 | "Product Url": product_links,
35 | }
36 | )
37 |
38 | df.to_excel(f"Flipkart_{product}.xlsx", index=False)
39 |
--------------------------------------------------------------------------------
/Geocoding/README.md:
--------------------------------------------------------------------------------
1 | ## Geocoding Script
2 |
3 | ### This script takes an address and return its latitude and longitude.This process is called geocoding
4 |
5 | #### I have used the locationiq website's geocoding api inorder to solve this problem.
6 |
7 | #### To be able to use this script you have to create a *free account* at https://locationiq.com/ and obtain your *private token*.
8 |
9 | #### Remember, *don't share* your private token with anyone.
10 |
--------------------------------------------------------------------------------
/Geocoding/geocoding.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | # Base Url for geocoding
4 | url = "https://us1.locationiq.com/v1/search.php"
5 |
6 | address = input("Input the address: ")
7 |
8 | # Your unique private_token should replace value of the private_token variable.
9 | # To know how to obtain a unique private_token please refer the README file for this script.
10 | private_token = "Your_private_token"
11 |
12 | data = {"key": private_token, "q": address, "format": "json"}
13 |
14 | response = requests.get(url, params=data)
15 |
16 | latitude = response.json()[0]["lat"]
17 | longitude = response.json()[0]["lon"]
18 |
19 | print(f"The latitude of the given address is: {latitude}")
20 | print(f"The longitude of the given address is: {longitude}")
21 | print("Thanks for using this script")
22 |
--------------------------------------------------------------------------------
/Geocoding/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2020.6.20
2 | chardet==3.0.4
3 | idna==2.10
4 | requests==2.24.0
5 | urllib3==1.26.5
6 |
--------------------------------------------------------------------------------
/Hacktoberfest_Events/README.md:
--------------------------------------------------------------------------------
1 | ## Hacktoberfest Events
2 |
3 | ### This script scrapes all the events from the Hacktoberfest events website and stores them in a csv file.
4 |
5 | ### How to use this script?
6 |
7 | 1. Make sure all the requirements for the script are present in your system by running:
8 |
9 | pip install -r requirements.txt
10 |
11 | 2. Run the following command:
12 |
13 | python hacktoberfest_events.py
14 |
15 | ### Author
16 |
17 | [Aditya Jetely](https://github.com/AdityaJ7)
--------------------------------------------------------------------------------
/Hacktoberfest_Events/hacktoberfest_events.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup as bs
3 | import pandas as pd
4 |
5 |
6 | def scrape_tablerows():
7 | """This function scrapes the tablerows related to our target elements.
8 | Our target element here are the events of hactoberfest.
9 |
10 | Returns:
11 | tablerows[list]: A list of tablerows of our taget elements.
12 | """
13 | hacktoberfest_events_url = "https://hacktoberfest.digitalocean.com/events"
14 | response = requests.get(hacktoberfest_events_url)
15 | soup = bs(response.content, "html.parser")
16 | mydivs = soup.findAll("tbody", {"class": "list"})
17 | tablerows = mydivs[0].findAll("tr")
18 | return tablerows
19 |
20 |
21 | def hacktoberfest_events(tablerows):
22 | """This function takes the list of tablerows as input and performs
23 | scraping of required elements as well as stores the scraped data
24 | into a dictionary and returns that dictionary
25 |
26 | Args:
27 | tablerows (list): Lis of tablerows of the target elements.
28 | """
29 | events = {}
30 | for i, tablerow in enumerate(tablerows):
31 | location = tablerow.find("td", {"class": "location"}).text
32 | link = tablerow.find("a")["href"]
33 | name = tablerow.find("td", {"class": "event_name"}).text.strip()
34 | date = tablerow.find("td", {"class": "date is-hidden"}).text.strip()
35 | events[i] = [name, date, location, link]
36 | return events
37 |
38 |
39 | def make_csv(events):
40 | """This function converts the dictionary input into
41 | a csv file.
42 |
43 | Args:
44 | events (dict): Dictionary object containing the event information.
45 | """
46 | df = pd.DataFrame.from_dict(events, orient="index")
47 | df.columns = ["Name", "Date", "Location", "Link"]
48 | df.to_csv("hacktoberfest_events.csv")
49 |
50 |
51 | if __name__ == "__main__":
52 | tablerows = scrape_tablerows()
53 | events = hacktoberfest_events(tablerows)
54 | make_csv(events)
55 | print("The events have been stored successfully")
56 |
--------------------------------------------------------------------------------
/Hacktoberfest_Events/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4==4.9.1
2 | certifi==2020.6.20
3 | chardet==3.0.4
4 | idna==2.10
5 | requests==2.24.0
6 | soupsieve==2.0.1
7 | urllib3==1.26.5
8 | pandas==1.1.2
--------------------------------------------------------------------------------
/IMDB_scrapper/README.md:
--------------------------------------------------------------------------------
1 | ## IMDB Scrapper
2 |
3 | This script takes a movie name as input and returns the information about that movie.
4 |
5 | ## Requirements for this script:
6 |
7 | 1. BeautifulSoup4
8 | 2. requests
9 |
10 | install these two by running the following command:
11 |
12 | pip install -r requirements.txt
13 |
14 | ## How to use this script?
15 |
16 | Just type the following in your command prompt:
17 |
18 | python imdb_scrapper.py -l
19 |
20 | ## Sample of the script in action:
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/IMDB_scrapper/imdb_scrapper.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup as bs
3 | import sys
4 | import argparse
5 |
6 | # Code to add the command line interface
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("-l", "--movie", required=True, help="Movie Name")
9 | args = vars(parser.parse_args())
10 |
11 | # Base IMBD URL to search for movie titles
12 | IMDB_URL = "https://www.imdb.com/search/title/?title="
13 |
14 | # Movie the user wants to search for
15 | USER_DEFINED_MOVIE = args["movie"]
16 |
17 | # To handle connection error
18 | try:
19 | response = requests.get(IMDB_URL + USER_DEFINED_MOVIE)
20 |
21 | except requests.exceptions.ConnectionError as error:
22 | sys.exit("Check your connection!")
23 |
24 | # Creating a soup object
25 | soup = bs(response.content, "html.parser")
26 |
27 | # Function to scrap the details about the movie, set n/a if some detail is missing and store the detail into a dictionary
28 | def scrap_and_store(soup):
29 |
30 | # This dictionary stores the movie information
31 | movie_info = {}
32 |
33 | # Try and except blocks to ensure correct data retrival
34 | try:
35 | movie = soup.select(".lister-item-content")[0]
36 | except:
37 | movie = "N/A"
38 |
39 | if movie == "N/A":
40 | sys.exit("Movie not found in IMDB")
41 |
42 | try:
43 | movie_info["Name"] = movie.find("a").contents[0]
44 | except:
45 | movie_info["Name"] = "N/A"
46 | try:
47 | movie_info["Rating"] = movie.select(".value")[0].contents[0]
48 | except:
49 | movie_info["Rating"] = "N/A"
50 | try:
51 | movie_info["Released"] = movie.find_all("span")[1].contents[0][1:-1]
52 | except:
53 | movie_info["Released"] = "N/A"
54 | try:
55 | movie_info["Certificate"] = movie.select(".certificate")[0].contents[0]
56 | except:
57 | movie_info["Certificate"] = "N/A"
58 | try:
59 | movie_info["Runtime"] = movie.select(".runtime")[0].contents[0]
60 | except:
61 | movie_info["Runtime"] = "N/A"
62 | try:
63 | movie_info["Genre"] = movie.select(".genre")[0].contents[0].strip()
64 | except:
65 | movie_info["Genre"] = "N/A"
66 | try:
67 | movie_info["Summary"] = movie.select(".text-muted")[2].contents[0].strip()
68 | except:
69 | movie_info["Summary"] = "N/A"
70 | try:
71 | movie_info["Director"] = movie.find_all("p")[2].find_all("a")[0].contents[0]
72 | except:
73 | movie_info["Director"] = "N/A"
74 |
75 | try:
76 | cast_members = movie.find_all("p")[2].find_all("a")[1:]
77 | except:
78 | cast_members = []
79 |
80 | cast_name = ""
81 | for member in cast_members:
82 | cast_name += member.contents[0] + ", "
83 |
84 | movie_info["Cast"] = cast_name[:-2]
85 | return movie_info
86 |
87 |
88 | # This function fetches the movie information and prints it systematically
89 | def main():
90 |
91 | info = scrape_and_store(soup)
92 | for key, value in info.items():
93 | print(f"{key} : {value}")
94 |
--------------------------------------------------------------------------------
/IMDB_scrapper/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4==4.9.1
2 | certifi==2020.6.20
3 | chardet==3.0.4
4 | idna==2.10
5 | requests==2.24.0
6 | soupsieve==2.0.1
7 | urllib3==1.26.5
8 |
--------------------------------------------------------------------------------
/IMDB_scrapper/sample.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/IMDB_scrapper/sample.PNG
--------------------------------------------------------------------------------
/Json_to_yaml/README.md:
--------------------------------------------------------------------------------
1 | ### JSON_to_YAML
2 |
3 | This script converts a JSON file into a YAML File.
4 |
5 | ### Requirements
6 |
7 | This script uses an external library call PyYAML which can be installed using either of the following two approaches:
8 |
9 | 1. pip3 install pyyaml
10 |
11 | 2. pip3 install -r requirements.txt
12 |
13 | ### How to run this script?
14 |
15 | Just type the following command:
16 |
17 | python3 json_to_yaml.py -l "Your_json_file"
18 |
19 | Example:
20 |
21 | python3 json_to_yaml.py -l example.json
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/Json_to_yaml/example.json:
--------------------------------------------------------------------------------
1 | {
2 | "fruit": "Apple",
3 | "size": "Large",
4 | "color": "Red"
5 | }
--------------------------------------------------------------------------------
/Json_to_yaml/example.yaml:
--------------------------------------------------------------------------------
1 | fruit: Apple
2 | size: Large
3 | color: Red
4 |
--------------------------------------------------------------------------------
/Json_to_yaml/json_to_yaml.py:
--------------------------------------------------------------------------------
1 | import json
2 | import sys
3 | import yaml
4 | import argparse
5 |
6 | # Code to add the cli
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("-l", "--jsonfile", required=True, help="JSON file ")
9 | args = vars(parser.parse_args())
10 |
11 | # to capture the user defined value in a variable
12 | json_file = args["jsonfile"]
13 |
14 | # End the program if the file extension is incorrect
15 | if json_file.endswith("json") == False:
16 | sys.exit("Please enter a json file only")
17 |
18 | # Reading the json file and ensuring the formatting
19 | with open(json_file, "r") as file_object:
20 | try:
21 | data = json.load(file_object)
22 | except:
23 | sys.exit(" Please check the formatting of your json file")
24 |
25 | # Extracting the name of the file to make sure the yaml file has the same name
26 | file_name = json_file.split(".")[0]
27 | file_in_yaml_format = file_name + ".yaml"
28 |
29 | # Here the sorted_keys pararmeter is kept false to maintain the order of keys as present in the input json file.
30 | with open(file_in_yaml_format, "w") as file_object_2:
31 | yaml.dump(data, file_object_2, sort_keys=False)
32 |
33 |
34 | print("Your yaml file has been created successfully")
35 |
--------------------------------------------------------------------------------
/Json_to_yaml/requirements.txt:
--------------------------------------------------------------------------------
1 | PyYAML==5.4
2 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Pycon_Proposals/README.md:
--------------------------------------------------------------------------------
1 | ## Pycon_Proposals
2 |
3 | ### This script scrapes the selected proposals and the total proposals from the PyCon Website and stores these proposals into two seperate csv files.
4 |
5 | ### How to use this script?
6 |
7 | 1. Make sure all the requirements for the script are present in your system by running:
8 |
9 | pip install -r requirements.txt
10 |
11 | 2. Run the following command:
12 |
13 | python pycon_proposals.py
14 |
15 | ### Author
16 |
17 | [Aditya Jetely](https://github.com/AdityaJ7)
--------------------------------------------------------------------------------
/Pycon_Proposals/pycon_proposals.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup as bs
3 | import pandas as pd
4 |
5 |
6 | def scrape_divs():
7 | """This function scrapes all the proposal elements and stores them
8 | in a list.
9 | """
10 | response = requests.get("https://in.pycon.org/cfp/2020/proposals/")
11 | soup = bs(response.content, "html.parser")
12 | mydivs = soup.findAll("div", {"class": "col-sm-11 col-xs-12"})
13 | return mydivs
14 |
15 |
16 | def selected_proposals(mydivs, df_columns):
17 | """This function takes the list of selected proposal elements from the
18 | scarpe_divs function as well as a list of columns and stores the value
19 | of the elements in a csv file.
20 | Args:
21 | mydivs (list): List of proposal elements
22 | df_columns (list): List of column names
23 | """
24 | final = {}
25 | for i, div in enumerate(mydivs[:43]):
26 | title = div.text
27 | titlex = title.split("\n")
28 | test_list = list(filter(lambda x: x != "", titlex))
29 | no_of_votes = test_list[2]
30 | no_of_messages = test_list[0]
31 | title = test_list[4]
32 | tag1 = test_list[5]
33 | tag2 = test_list[7]
34 | author = test_list[11].strip()
35 | date = test_list[14].strip()
36 | final[i] = [no_of_votes, no_of_messages, title, tag1, tag2, author, date]
37 |
38 | df1 = pd.DataFrame.from_dict(final, orient="index")
39 | df1.columns = df_columns
40 | df1.to_csv("selected_proposals.csv")
41 |
42 |
43 | def total_proposals(mydivs, df_columns):
44 | """This function takes the list of total proposal elements from the scarpe_divs
45 | function as well as a list of columns and stores the value of the
46 | elements in a csv file.
47 | Args:
48 | mydivs (list): List of proposal elements
49 | df_columns (list): List of column names
50 | """
51 | final_two = {}
52 | for i, div in enumerate(mydivs[43:]):
53 | title = div.text
54 | titlex = title.split("\n")
55 | test_list = list(filter(lambda x: x != "", titlex))
56 | no_of_votes = test_list[2]
57 | no_of_messages = test_list[0]
58 | title = test_list[4]
59 | tag1 = test_list[6]
60 | tag2 = test_list[8]
61 | author = test_list[12].strip()
62 | date = test_list[15].strip()
63 | final_two[i] = [no_of_votes, no_of_messages, title, tag1, tag2, author, date]
64 | df2 = pd.DataFrame.from_dict(final_two, orient="index")
65 | df2.columns = df_columns
66 | df2.to_csv("total_proposals.csv")
67 |
68 |
69 | if __name__ == "__main__":
70 | df_columns = ["Votes", "Messages", "Title", "Tag1", "Tag2", "Author", "Date"]
71 | mydivs = scrape_divs()
72 | selected_proposals(mydivs, df_columns)
73 | total_proposals(mydivs, df_columns)
74 | print("The proposals have been saved successfully!!!")
75 |
--------------------------------------------------------------------------------
/Pycon_Proposals/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4==4.9.1
2 | certifi==2020.6.20
3 | chardet==3.0.4
4 | idna==2.10
5 | requests==2.24.0
6 | soupsieve==2.0.1
7 | urllib3==1.26.5
8 | pandas==1.1.2
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # All_my_Python_work
2 | Collection of all the mini projects made by me so far.
3 |
--------------------------------------------------------------------------------
/Scrape_Hospital_Website/scrape.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup
3 |
4 | all_links = set()
5 | all_names = set()
6 |
7 | number_of_pages_to_scrape = 1
8 | # Total no of pages are 188 which can be put here
9 |
10 |
11 | def get_links():
12 | i = 1
13 | while i <= number_of_pages_to_scrape:
14 | url = f"https://doctors.ololrmc.com/search?sort=networks&page={i}"
15 | r = requests.get(url)
16 | soup = BeautifulSoup(r.text, "html.parser")
17 | links = soup.find_all("h2", class_="css-1yi8h8m-ProviderName e16v8r6n5")
18 | for link in links:
19 | all_links.add(link.a["href"])
20 | i += 1
21 |
22 |
23 | def get_names():
24 | for link in all_links:
25 | r_2 = requests.get("https://doctors.ololrmc.com" + link)
26 | soup_2 = BeautifulSoup(r_2.text, "html.parser")
27 | name = soup_2.find("h1", class_="fw-6 fs-l").span.text
28 | position = name.find("About")
29 | all_names.add(name[position + 6 :])
30 |
31 |
32 | if __name__ == "__main__":
33 | get_links()
34 | get_names()
35 |
36 | for name in all_names:
37 | print(name)
38 |
--------------------------------------------------------------------------------
/Time_to_load_website/README.md:
--------------------------------------------------------------------------------
1 | ## Time to load website
2 |
3 | This script takes a url from the user and returns the time taken to load that website.
4 |
5 | ## How to use this ?
6 |
7 | 1. Just type the following on the command prompt:
8 |
9 | python time_to_load_website.py
10 |
11 | 2. It will reuest you to provide a url. Provide the url and hit enter to see the script in action.
12 |
13 | ## Sample use:
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/Time_to_load_website/sample.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/AdityaJ7/All_my_Python_work/e9417d7ba1bbee58f6c75c2915fd21f2cc25c81e/Time_to_load_website/sample.PNG
--------------------------------------------------------------------------------
/Time_to_load_website/time_to_load_website.py:
--------------------------------------------------------------------------------
1 | from urllib.request import urlopen
2 | import time
3 |
4 |
5 | def get_load_time(url):
6 | """This function takes a user defined url as input
7 | and returns the time taken to load that url in seconds.
8 |
9 | Args:
10 | url (string): The user defined url.
11 |
12 | Returns:
13 | time_to_load (float): The time taken to load the website in seconds.
14 | """
15 |
16 | if ("https" or "http") in url: # Checking for presence of protocols
17 | open_this_url = urlopen(url) # Open the url as entered by the user
18 | else:
19 | open_this_url = urlopen("https://" + url) # Adding https to the url
20 | start_time = time.time() # Time stamp before the reading of url starts
21 | open_this_url.read() # Reading the user defined url
22 | end_time = time.time() # Time stamp after the reading of the url
23 | open_this_url.close() # Closing the instance of the urlopen object
24 | time_to_load = end_time - start_time
25 |
26 | return time_to_load
27 |
28 |
29 | if __name__ == "__main__":
30 | url = input("Enter the url whose loading time you want to check: ")
31 | print(f"\nThe time taken to load {url} is {get_load_time(url):.2} seconds.")
32 |
--------------------------------------------------------------------------------
/Wallpaper Setter/README.md:
--------------------------------------------------------------------------------
1 | ### Wallpaper setter
2 |
3 |
4 | #### This script can be used to change the wallpaper on a **Windows PC**.It takes a jpg file as input and sets it as the desktop wallpaper
5 |
6 | #### How to use this script?
7 |
8 | Go to the commant prompt and type the following :
9 |
10 | python Wallpaper.py -l "Your_jpg_file"
11 |
12 | Example :
13 |
14 | python Wallpaper.py -l awesome.jpg
15 |
16 | #### Output you get:
17 |
18 | Your desktop wallpaper gets changed
19 |
20 | #### PS: Provide absolute path if the wallpaper is in other directory than the current one. Would prefer running this script from wallpaper folder.
--------------------------------------------------------------------------------
/Wallpaper Setter/wallpaper_setter.py:
--------------------------------------------------------------------------------
1 | import ctypes
2 | import os
3 | import argparse
4 | import sys
5 |
6 | # Code to add the cli
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument("-l", "--jpgfile", required=True, help="JPG File")
9 | args = vars(parser.parse_args())
10 |
11 |
12 | # Storing the absolute path of the jpg file provided by the user
13 | filename = args["jpgfile"]
14 |
15 | # To make sure a jpg file has been provided
16 | if filename.endswith(".jpg"):
17 |
18 | # Code to get the absolute path of the jpg file
19 | def getabsolutepath(filename):
20 | path = os.path.abspath(filename)
21 | return path
22 |
23 | # The code for setting wallpaper in SystemParametersInfoW function is 20
24 | SPI_SETDESKWALLPAPER = 20
25 |
26 | # Main code to change the wallpaper
27 | output_from_wallpaper_change = ctypes.windll.user32.SystemParametersInfoW(
28 | SPI_SETDESKWALLPAPER, 0, getabsolutepath(filename), 0
29 | )
30 |
31 | if output_from_wallpaper_change == 1:
32 | print("Your wallpaper has been changed successfully!!!")
33 | else:
34 | print("Sorry cant set this file as your wallpaper")
35 | else:
36 | sys.exit("Enter a jpg file only please")
37 |
--------------------------------------------------------------------------------
/XKCD_downloader/README.md:
--------------------------------------------------------------------------------
1 | ## XKCD Downloader
2 |
3 | ### This script can be used to download any issue of the xkcd comics with just a simple command.
4 |
5 | ### How to use this script?
6 |
7 | ## 1. Install the requirements with the following line:
8 |
9 | pip install -r requirements.txt
10 |
11 | ## 2. Run the following command from your terminal
12 |
13 | python3 xkcd_downloader.py -l 'issue-number'
14 |
15 | Example :
16 |
17 | python3 xkcd_downloader.py -l 956
18 |
19 |
--------------------------------------------------------------------------------
/XKCD_downloader/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4==4.9.1
2 | certifi==2020.6.20
3 | chardet==3.0.4
4 | idna==2.10
5 | requests==2.24.0
6 | soupsieve==2.0.1
7 | urllib3==1.26.5
8 |
--------------------------------------------------------------------------------
/XKCD_downloader/xkcd_dowloader.py:
--------------------------------------------------------------------------------
1 | import requests
2 | from bs4 import BeautifulSoup as bs
3 | import shutil
4 | import argparse
5 |
6 | # Code to add the cli
7 |
8 | parser = argparse.ArgumentParser()
9 | parser.add_argument("-l", "--issue", required=True, help="Comics Issue Number")
10 | args = vars(parser.parse_args())
11 |
12 |
13 | # Storing the comic issue number provided by the user
14 | issue_number = args["issue"]
15 |
16 | # Complete url for the issue
17 | url = "https://xkcd.com/" + issue_number
18 |
19 |
20 | response = requests.get(url)
21 |
22 | # Checking if we can fetch the url or not
23 | if response.status_code == 200:
24 | soup = bs(response.content, "html.parser")
25 | image_link = soup.find_all("img")[2]["src"]
26 | image_name = image_link.split("/")[-1]
27 | image_url = "https:" + image_link
28 | r = requests.get(image_url, stream=True)
29 | if r.status_code == 200:
30 | # This ensures the image file is loaded correctly
31 | r.raw.decode_content = True
32 |
33 | # Creating the image file
34 | with open(image_name, "wb") as f:
35 | shutil.copyfileobj(r.raw, f)
36 |
37 | print("Image successfully Downloaded: ", image_name)
38 | else:
39 | print("Image Couldn't be retreived")
40 | else:
41 | print("Issue number is invalid")
42 |
--------------------------------------------------------------------------------