at 0x7fda3c2329e0>
33 | # ===============
34 |
35 | # =============== UNCOMMENT THE PRINT STATEMENT TO RUN
36 | # query the length of a generator
37 | # print(len(even_gen))
38 | # Output: TypeError: object of type 'generator' has no len()
39 | # ===============
40 |
41 | # ===============
42 | gen = (i**2 for i in range(100))
43 | # computes the sum 0 + 1 + 4 + 9 + 25 + ... + 9801
44 | print(sum(gen))
45 | # prints 328350
46 | # ===============
47 |
48 | # ===============
49 | print(sum(gen))
50 | # prints 0
51 | # ===============
52 |
53 | # =============== UNCOMMENT THE LAST PRINT STATEMENT TO RUN
54 | # Iterating over generators using next
55 | short_gen = (i**2 for i in range(3))
56 | print(next(short_gen)) # prints 0
57 | print(next(short_gen)) # prints 1
58 | print(next(short_gen)) # prints 4
59 | # print(next(short_gen)) # gives StopIteration exception
60 | # ===============
61 |
62 | # ===============
63 | # a simple list comprehension
64 | print([i**2 for i in range(10)])
65 | # prints [0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
66 | # ===============
67 |
68 | # ===============
69 | # Finding indices of None in a list in one line
70 | example_list = [2, None, -10, None, 4, 8]
71 | print([idx for idx, item in enumerate(example_list) if item is None])
72 | # prints [1, 3]
73 | # ===============
74 |
75 | # ===============
76 | # zip function
77 | names = ["Angie", "Brian", "Cassie", "David"]
78 | exam_1_scores = [90, 82, 79, 87]
79 | exam_2_scores = [95, 84, 72, 91]
80 | print(list(zip(names, exam_1_scores, exam_2_scores)))
81 | # prints [('Angie', 90, 95), ('Brian', 82, 84), ('Cassie', 79, 72), ('David', 87, 91)]
82 | # ===============
83 |
84 | # ===============
85 | from itertools import combinations
86 | print(list(combinations([0, 1, 2, 3], 3)))
87 | # prints [(0, 1, 2), (0, 1, 3), (0, 2, 3), (1, 2, 3)]
88 | # ===============
--------------------------------------------------------------------------------
/03_Class/.project:
--------------------------------------------------------------------------------
1 |
2 |
3 | 03_Class
4 |
5 |
6 |
7 |
8 |
9 | org.python.pydev.PyDevBuilder
10 |
11 |
12 |
13 |
14 |
15 | org.python.pydev.pythonNature
16 |
17 |
18 |
--------------------------------------------------------------------------------
/03_Class/.pydevproject:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | /${PROJECT_DIR_NAME}
5 |
6 | python interpreter
7 | Default
8 |
9 |
--------------------------------------------------------------------------------
/03_Class/Door.py:
--------------------------------------------------------------------------------
1 | class Door:
2 | def __init__(self, number, status):
3 | self.number = number
4 | self.status = status
5 |
6 | def open(self):
7 | self.status = 'open'
8 |
9 | def close(self):
10 | self.status = 'closed'
11 |
12 | class SecurityDoor(Door):
13 | def __init__(self, number, status, locked=True):
14 | super().__init__(number, status)
15 | self.locked=locked
16 |
17 | def lockDoor(self):
18 | self.locked=True
19 |
20 | def unlockDoor(self):
21 | self.locked=False
22 |
23 | def open(self):
24 | if self.locked:
25 | return
26 | super().open()
27 |
28 | if __name__ == '__main__':
29 | door1 = Door(1, 'closed')
30 | print(door1.number)
31 | print(door1.status)
32 | print(type(door1))
33 |
34 | door1.open()
35 | print(door1.status)
36 |
37 | door2 = Door(1, 'closed')
38 | print("Address of door1 object: {}".format(hex(id(door1))))
39 | print("Address of door2 object: {}".format(hex(id(door2))))
40 |
41 | print("Address of class of door1 object: {}".format(hex(id(door1.__class__))))
42 | print("Address of class of door2 object: {}".format(hex(id(door2.__class__))))
43 |
44 | print(Door.__dict__)
45 | print(door1.__dict__)
46 | print(door1.__dict__['status'])
47 |
48 | sdoor1 = SecurityDoor(2,'closed')
49 | print(sdoor1.status) # prints 'closed'
50 | # Remember that the door is locked,
51 | # so open will not have any effect
52 | sdoor1.open()
53 | print(sdoor1.status) # prints 'closed'
54 | # Now unlock the door
55 | sdoor1.unlockDoor()
56 | sdoor1.open()
57 | print(sdoor1.status) # prints 'open'
58 |
59 | # sdoor2 = SecurityDoor(2,'closed',False)
60 | # print(sdoor2.status)
61 | # # Remember that the door is unlocked, so open will have effect
62 | # sdoor2.open()
63 | # print(sdoor2.status)
--------------------------------------------------------------------------------
/03_Class/Door_Standard.py:
--------------------------------------------------------------------------------
1 | class Door:
2 | def __init__(self, number, status):
3 | self._number = number
4 | self._status = status
5 |
6 | def get_number(self):
7 | return self._number
8 |
9 | def set_number(self, number):
10 | self._number = number
11 |
12 | def get_status(self):
13 | return self._status
14 |
15 | def set_status(self, status):
16 | self._status = status
17 |
18 | def open(self):
19 | self.status = 'open'
20 |
21 | def close(self):
22 | self.status = 'closed'
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/03_Class/Points_1D.py:
--------------------------------------------------------------------------------
1 | class Points_1D:
2 | def __init__(self, points):
3 | self._points = points
4 |
5 | def __len__(self):
6 | max_pt = max(self._points)
7 | min_pt = min(self._points)
8 | return max_pt - min_pt
9 |
10 | if __name__ == '__main__':
11 | point_set = Points_1D((5, 8, 9, -5, -2, 18))
12 | print(len(point_set))
13 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_01/DobleLinkedBase.py:
--------------------------------------------------------------------------------
1 | class _DoubleLinkedBase:
2 | """ A base class providing a doubly linked list representation."""
3 |
4 | class _Node:
5 | """ Lightweight, nonpublic class for storing a doubly linked node"""
6 | __slots__ = '_element', '_prev', '_next' # streamline memory
7 |
8 | def __init__(self, element, prev, next): # initialize node's fields
9 | self._element = element
10 | self._prev = prev
11 | self._next = next
12 |
13 | def __init__(self):
14 | """Create an empty list"""
15 | self._header = self._Node(None, None, None)
16 | self._trailer = self._Node(None, None, None)
17 | self._header._next = self._trailer
18 | self._trailer._prev = self._header
19 | self._size = 0 # number of elements
20 |
21 | def __len__(self):
22 | """Return the number of elements in the list"""
23 | # ===== Start writing your code here =====
24 | pass # Remove this statement once you write your code
25 | # ===== End writing your code here =====
26 |
27 | def is_empty(self):
28 | """Return true if list is empty"""
29 | # ===== Start writing your code here =====
30 | pass # Remove this statement once you write your code
31 | # ===== End writing your code here =====
32 |
33 | def _insert_between(self, e, predecessor, successor):
34 | """Add element e between two existing nodes and return new node"""
35 | newest = self._Node(e, predecessor, successor)
36 | # ===== Start writing your code here =====
37 | pass # Remove this statement once you write your code
38 | # ===== End writing your code here =====
39 |
40 | def _delete_node(self, node):
41 | """Delete nonsentinel node from the list and return its elements"""
42 | # ===== Start writing your code here =====
43 | pass # Remove this statement once you write your code
44 | # ===== End writing your code here =====
45 |
46 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_01/Question.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_01/Question.pdf
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/.DS_Store
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/README.md:
--------------------------------------------------------------------------------
1 | # Software Lab
2 |
3 | ## Python Datascience Assignment
4 |
5 | In this assignment we will deal with **Image Captioning**. Image Captioning is the process of generating textual description of an image. You have to create a python package for transforming images and analysing their effect on the captions of an image captioning model. We are providing you with a pretrained captioning model, all you need to do is to call the model on the image and get the outputs.
6 |
7 | A python package means that one can install the package in the python environment and can import the modules in any python script, irrespective of the location of the script. Creating a python package is fairly easy, just follow the steps [here](https://packaging.python.org/tutorials/packaging-projects/).
8 |
9 | ## Installation instructions
10 |
11 | Note: To install the dependencies you need to run the following commands:
12 | - `pip install -r requirements.txt`
13 | - `python3 -m spacy download en_core_web_sm`
14 | - Download LAVIS zip into the project directory from https://github.com/salesforce/LAVIS, Unzip LAVIS-main.zip and install lavis using the following commands
15 | * `cd LAVIS-main/`
16 | * `pip install .`
17 |
18 | ## File descriptions
19 |
20 | 1. `main.py`: This is the main file which is to be called to execute the program. The main file calls the corresponding functions as needed while execution. The main file should call the appropriate function to prepare the dataset, then transform the images read, obtain the captions in the image by calling the captioner model, and then plot the obtained images by calling the appropriate functions from the package described below.
21 |
22 | 2. `./my_package/model.py`: This file contains the image captioning model definition. Consider it as a black-box model which takes an image and number of captions to be generated as input and provides the captions as output.
23 |
24 |
25 |
26 |
27 |
28 |
29 | Fig. 1. Sample Output of the Captioner.
30 |
31 |
32 |
33 | 3. `./my_package/data/dataset.py`: This file contains the class ```Dataset``` that reads the provided dataset from the annotation file and provides the transformed image object. The annotation format is provided in `data/README.md`
34 |
35 | 4. `./my_package/data/transforms`: This folder contains 5 files. Each of these files is responsible for performing the corresponding transformation, as follows:
36 |
37 | a) `crop.py`: This file takes an image as input and crops it based on the provided arguments. Declare a class `CropImage()` for performing the operation.
38 |
39 |
40 |
41 |
42 |
43 |
44 | Fig. (a). Crop Operation.
45 |
46 |
47 | b) `flip.py`: This file takes an image as input and flips it based on the provided arguments. Declare a class `FlipImage()` for performing the operation.
48 |
49 |
50 |
51 |
52 |
53 |
54 | Fig. (b). Flip Operation.
55 |
56 |
57 | c) `rotate.py`: This file takes an image as input and rotates it based on the provided arguments. Declare a class `RotateImage()` for performing the operation.
58 |
59 |
60 |
61 |
62 |
63 |
64 | Fig. (c). Rotate Operation.
65 |
66 |
67 | d) `rescale.py`: This file takes an image as input and rescales it based on the provided arguments. Declare a class `RescaleImage()` for performing the operation.
68 |
69 |
70 |
71 |
72 |
73 |
74 | Fig. (d). Rescale Operation.
75 |
76 |
77 | e) `blur.py`: This file takes an image as input and applies a gaussian blur to it based on the provided arguments. Declare a class `GaussBlurImage()` for performing the operation.
78 |
79 |
80 |
81 |
82 |
83 |
84 | Fig. (e). Blur Operation.
85 |
86 |
87 | 5. `./my_package/data/download.py` : This file takes in url (to download the image) and path (to store the downloaded image).
88 |
89 | 6. `setup.py`: Use this file for constructing the package `my_package`.
90 |
91 | ## Coding Task [30 marks]
92 |
93 | Note: For handling images, e.g. reading images, etc. we would recommend using PIL instead of OpenCV as OpenCV uses `BGR` format instead of `RGB`.
94 |
95 | 1. Write the various transformations in `./my_package/data/transforms`. There are five files, as already mentioned. Although these functions are easily implementable using any image processing libraries like PIL, skimage or opencv. [2x5=10 marks]
96 |
97 | 2. Complete the `Dataset` class in `./my_package/data/dataset.py`. This class will accept the path to the annotation file and the list of transformation classes. Ideally you should be directly using transformation classes but you may also use strings to identify the transformations. [5 marks]
98 |
99 | 3. Complete the `Download` class in `./my_package/data/download.py` that will download the image from the url supplied and consequently store the image at the correct path and filename should be same as in the annotaion file (`./data/annotations.jsonl`).[5 marks]
100 |
101 | 4. Create a python package ``` my_package```. For this you need to write ``` setup.py```. It must be noted that files called ```___init__.py``` need to be added in the hierarchy. We leave it to you to search where they should be added. Note that the user will generally not know the exact files where the classes are written. That means, he/she does not know that their exist a file ```crop.py``` where the class ```CropImage()``` is defined. Rather he/she simply knows that this class is defined in ```transforms```. So, a good coding practice is to allow an import statement ```from my_package.data.transforms import CropImage```. [5 marks]
102 |
103 | 5. Write ```main.py``` where you will test the different transformations you have written on the instance segmentor. The outputs for each of the experiments should be organized properly in the outputs folder. [5 marks]
104 |
105 | ## Analysis Task [10 marks]
106 |
107 | 1. Obtain and display the captions for all the images (which you dowmload) in the `data/imgs` folder. [3 marks]
108 |
109 | 2. Consider the image with name same as the last digit of your roll number, i.e. if your roll number is 20CS####7 then consider the image `7.jpg` then save the transformed images while printing the captions in console: [1x7=7 marks]
110 |
111 | a) The original image along with the 3 generated captions.
112 |
113 | b) Horizontally flipped original image along with the 3 generated captions.
114 |
115 | c) Blurred image (with some degree of blurring) along with the 3 generated captions.
116 |
117 | d) Twice Rescaled image (2X scaled) along with the 3 generated captions.
118 |
119 | e) Half Rescaled image (0.5X scaled) along with the 3 generated captions.
120 |
121 | f) 90 degree right rotated image along with the 3 generated captions.
122 |
123 | g) 45 degree left rotated image along with the 3 generated captions.
124 |
125 | **Please read the class definitions very carefully. In this assignment you do not need to code a lot, but you need to understand how to integrate several custom modules together in a clean way. More details on the arguments and the return types are provided in the corresponding files.**
126 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/data/README.md:
--------------------------------------------------------------------------------
1 | ## Dataset Format
2 |
3 | Images are to be downloaded in the ``` imgs ``` folder.
4 |
5 | ``` annotations.jsonl ``` contains the annotations for the images. The format of the annotations are:
6 |
7 | ```
8 | {
9 | "file_name":
10 | "url":
11 | "captions": [ {
12 | "caption":
13 | }
14 | .
15 | .
16 | ]
17 | }
18 | ```
19 |
20 | A sample of annotation file for 10 images are given.
21 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/data/annotations.jsonl:
--------------------------------------------------------------------------------
1 | {"file_name": "0.jpg","url": "http://farm5.staticflickr.com/4127/5172389204_31214fdc50_z.jpg", "captions": [{"caption": "A large white airplane and a person on a lot."},{"caption": "View from gate of jet connected to jet way for passengers to board or deplane"},{"caption": "The plane is parked at the gate at the airport terminal."},{"caption": "Airline employees by an aircraft parked at the gate"},{"caption": "A large jetliner sitting on top of an airport runway."}]}
2 | {"file_name": "1.jpg","url": "http://farm8.staticflickr.com/7355/8825114508_b0fa4d7168_z.jpg","captions" : [{"caption": "a man falls off his skateboard in a skate park."},{"caption": "Athletes performing tricks on a BMX bicycle and a skateboard."},{"caption": "A skateboarder jumps into the air as he performs a skateboard trick."},{"caption": "A man on a skateboard performs a trick at the skate park"},{"caption": "A person on a skateboard and bike at a skate park."}]}
3 | {"file_name": "2.jpg","url": "http://farm8.staticflickr.com/7020/6478877255_242f741dd1_z.jpg","captions" : [{"caption": "a bike sits parked next to a street "},{"caption": "A bicycle is locked up to a post"},{"caption": "A blue bicycle sits on a sidewalk near a street."},{"caption": "A bicycle is chained to a fixture on a city street"},{"caption": "a blue bike parked on a side walk "}]}
4 | {"file_name": "3.jpg","url": "http://farm1.staticflickr.com/50/138352202_f4983aa717_z.jpg","captions": [{"caption": "A painting that has a gold frame on it."},{"caption": "A painting of a candlestick holder with a candle, several pieces of fruit and a vase, with a gold frame around the painting."},{"caption": "a painting of fruit and a candle with a vase"},{"caption": "Painting of oranges, a bowl, candle, and a pitcher"},{"caption": "A painting of a table with fruit on top of it."}]}
5 | {"file_name": "4.jpg","url": "http://farm4.staticflickr.com/3646/3426989867_e5b8439938_z.jpg", "captions": [{"caption": "A street with two busses and people walking."},{"caption": "Pedestrians crossing a street between buses and cars. "},{"caption": "A couple of people are crossing a busy street"},{"caption": "People crossing a busy city street full of vehicles."},{"caption": "Two buses parked in a parking lot next to cars."}]}
6 | {"file_name": "5.jpg","url": "http://farm3.staticflickr.com/2253/1755223462_fabbeb8dc3_z.jpg","captions": [{"caption": "A plaster external wall with multiple old paper images attached."},{"caption": "a yellow and brown wall a gray door and a sign "},{"caption": "a run down building with two planters outside the door"},{"caption": "a building with dirty walls and dirty doors"},{"caption": "A building wall and pair of doors that are open, along with vases of flowers on the outside of the building."}]}
7 | {"file_name": "6.jpg","url": "http://farm5.staticflickr.com/4088/4980393979_fb7325e0b6_z.jpg","captions" : [{"caption": "a woman is sitting in front of a desk"},{"caption": "The woman is working on her computer at the desk."},{"caption": "A woman sitting at a desk in her work station"},{"caption": "a woman at her desk sits intently and happy"},{"caption": "a person sitting at a desk with a keyboard and monitor "}]}
8 | {"file_name": "7.jpg","url": "http://farm4.staticflickr.com/3577/3491669985_d81e1050c6_z.jpg","captions" : [{"caption": "A view of motorcyclists riding their bikes through heavy city traffic."},{"caption": "people ride their motorcycles beside some cars, passing by an empty street with stores and apartment buildings"},{"caption": "A bunch of bikers are gathered on a city street. "},{"caption": "A group of motorists pass very large buildings in asia. "},{"caption": "People are walking and riding motorcycles on the street"}]}
9 | {"file_name": "8.jpg","url": "http://farm3.staticflickr.com/2336/1634911562_703ff01cff_z.jpg","captions" : [{"caption": "The front end of a red motorcycle that is on display."},{"caption": "A brand new motorcycle on display at a show."},{"caption": "A motorcycle is parked inside of a building."},{"caption": "The new motorcycle on display is very shiny."},{"caption": "The shiny motorcycle has been put on display."}]}
10 | {"file_name": "9.jpg","url": "http://farm4.staticflickr.com/3446/3232237447_13d84bd0a1_z.jpg","captions": [{"caption": "People are walking on the street by a homeless person."},{"caption": "a homeless man holding a cup and standing next to a shopping cart on a street"},{"caption": "People walking past a homeless man begging on a city street"},{"caption": "City dwellers walk by as a homeless man begs for cash."},{"caption": "a person with a shopping cart on a city street "}]}
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/data/imgs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/data/imgs/.gitkeep
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/main.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from my_package.model import ImageCaptioningModel
3 | from my_package.data import Dataset, Download
4 | from my_package.data.transforms import FlipImage, RescaleImage, BlurImage, CropImage, RotateImage
5 | import numpy as np
6 | from PIL import Image
7 |
8 |
9 | def experiment(annotation_file, captioner, transforms, outputs):
10 | '''
11 | Function to perform the desired experiments
12 |
13 | Arguments:
14 | annotation_file: Path to annotation file
15 | captioner: The image captioner
16 | transforms: List of transformation classes
17 | outputs: Path of the output folder to store the images
18 | '''
19 |
20 | #Create the instances of the dataset, download
21 |
22 |
23 | #Print image names and their captions from annotation file using dataset object
24 |
25 |
26 | #Download images to ./data/imgs/ folder using download object
27 |
28 |
29 | #Transform the required image (roll number mod 10) and save it seperately
30 |
31 |
32 | #Get the predictions from the captioner for the above saved transformed image
33 |
34 |
35 | def main():
36 | captioner = ImageCaptioningModel()
37 | experiment('./data/annotations.jsonl', captioner, [FlipImage(), BlurImage(1)], None) # Sample arguments to call experiment()
38 |
39 |
40 | if __name__ == '__main__':
41 | main()
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/dataset.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | import jsonlines
3 | from PIL import Image
4 | import os
5 | import numpy as np
6 |
7 | class Dataset(object):
8 | '''
9 | A class for the dataset that will return data items as per the given index
10 | '''
11 |
12 | def __init__(self, annotation_file, transforms=None):
13 | '''
14 | Arguments:
15 | annotation_file: path to the annotation file
16 | transforms: list of transforms (class instances)
17 | For instance, [, ]
18 | '''
19 |
20 |
21 | def __len__(self):
22 | '''
23 | return the number of data points in the dataset
24 | '''
25 |
26 |
27 | def __getann__(self, idx):
28 | '''
29 | return the data items for the index idx as an object
30 | '''
31 |
32 |
33 | def __transformitem__(self, path):
34 | '''
35 | return transformed PIL Image object for the image in the given path
36 | '''
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/download.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | import requests
3 | from io import BytesIO
4 |
5 | class Download(object):
6 | '''
7 | A class for helping in dowloading the required images from the given url to the specified path
8 | '''
9 |
10 | def __call__(self, path, url):
11 | '''
12 | Arguments:
13 | path: download path with the file name
14 | url: required image URL
15 | '''
16 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/transforms/blur.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from PIL import Image, ImageFilter
3 |
4 | class BlurImage(object):
5 | '''
6 | Applies Gaussian Blur on the image.
7 | '''
8 |
9 | def __init__(self, radius):
10 | '''
11 | Arguments:
12 | radius (int): radius to blur
13 | '''
14 |
15 |
16 | def __call__(self, image):
17 | '''
18 | Arguments:
19 | image (numpy array or PIL Image)
20 |
21 | Returns:
22 | image (numpy array or PIL Image)
23 | '''
24 |
25 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/transforms/crop.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from PIL import Image
3 | import random
4 |
5 | class CropImage(object):
6 | '''
7 | Performs either random cropping or center cropping.
8 | '''
9 |
10 | def __init__(self, shape, crop_type='center'):
11 | '''
12 | Arguments:
13 | shape: output shape of the crop (h, w)
14 | crop_type: center crop or random crop. Default: center
15 | '''
16 |
17 | def __call__(self, image):
18 | '''
19 | Arguments:
20 | image (numpy array or PIL image)
21 |
22 | Returns:
23 | image (numpy array or PIL image)
24 | '''
25 |
26 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/transforms/flip.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from PIL import Image
3 |
4 | class FlipImage(object):
5 | '''
6 | Flips the image.
7 | '''
8 |
9 | def __init__(self, flip_type='horizontal'):
10 | '''
11 | Arguments:
12 | flip_type: 'horizontal' or 'vertical' Default: 'horizontal'
13 | '''
14 |
15 | def __call__(self, image):
16 | '''
17 | Arguments:
18 | image (numpy array or PIL image)
19 |
20 | Returns:
21 | image (numpy array or PIL image)
22 | '''
23 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/transforms/rescale.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from PIL import Image
3 |
4 | class RescaleImage(object):
5 | '''
6 | Rescales the image to a given size.
7 | '''
8 |
9 | def __init__(self, output_size):
10 | '''
11 | Arguments:
12 | output_size (tuple or int): Desired output size. If tuple, output is
13 | matched to output_size. If int, smaller of image edges is matched
14 | to output_size keeping aspect ratio the same.
15 | '''
16 |
17 |
18 | def __call__(self, image):
19 | '''
20 | Arguments:
21 | image (numpy array or PIL image)
22 |
23 | Returns:
24 | image (numpy array or PIL image)
25 |
26 | Note: You do not need to resize the bounding boxes. ONLY RESIZE THE IMAGE.
27 | '''
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/data/transforms/rotate.py:
--------------------------------------------------------------------------------
1 | #Imports
2 | from PIL import Image
3 |
4 | class RotateImage(object):
5 | '''
6 | Rotates the image about the centre of the image.
7 | '''
8 |
9 | def __init__(self, degrees):
10 | '''
11 | Arguments:
12 | degrees: rotation degree.
13 | '''
14 |
15 | def __call__(self, sample):
16 | '''
17 | Arguments:
18 | image (numpy array or PIL image)
19 |
20 | Returns:
21 | image (numpy array or PIL image)
22 | '''
23 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/my_package/model.py:
--------------------------------------------------------------------------------
1 | import en_core_web_sm
2 | nlp = en_core_web_sm.load()
3 |
4 | import torch
5 | from lavis.models import load_model_and_preprocess
6 | from PIL import Image
7 |
8 | # setup device to use
9 | device = torch.device("cpu")
10 |
11 | # Class definition for the model
12 | class ImageCaptioningModel(object):
13 | '''
14 | The blackbox image captioning model (LAVIS).
15 | Given an image path, it generates the required number of captions.
16 | '''
17 |
18 | # __init__ function
19 | def __init__(self):
20 | self.model, self.vis_processors, _ = load_model_and_preprocess(
21 | name="blip_caption", model_type="large_coco", is_eval=True, device=device
22 | )
23 | self.vis_processors.keys()
24 |
25 | # function for calling the caption model
26 | def __call__(self, input_path, num_captions):
27 | raw_image = Image.open(input_path).convert("RGB")
28 | image = self.vis_processors["eval"](raw_image).unsqueeze(0).to(device)
29 | return self.model.generate({"image": image}, use_nucleus_sampling=True, num_captions=num_captions)
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy==1.21.5
2 | opencv-python-headless
3 | Pillow==9.4.0
4 | spacy
5 | torchvision
6 | jsonlines==3.1.0
7 |
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/blur.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/blur.png
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/crop.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/crop.png
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/flip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/flip.png
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/picandcaptions.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/picandcaptions.png
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/rescale.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/rescale.png
--------------------------------------------------------------------------------
/Python_DS_Assignment_Question_02/sample_imgs/rotate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_DS_Assignment_Question_02/sample_imgs/rotate.png
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/.DS_Store
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/ImageViewerGUI.py:
--------------------------------------------------------------------------------
1 | from my_package.model import ImageCaptioningModel
2 | from my_package.model import ImageClassificationModel
3 | from tkinter import *
4 | from functools import partial
5 | from PIL import ImageTk, Image
6 | from tkinter import filedialog
7 |
8 |
9 | def fileClick(clicked):
10 | # Define the function you want to call when the filebrowser button (Open) is clicked.
11 | # This function should pop-up a dialog for the user to select an input image file.
12 | # To have a better clarity, please check out the sample video.
13 |
14 |
15 | def process(clicked, captioner, classifier):
16 | # This function will produce the required output when 'Process' button is clicked.
17 | # Note: This should handle the case if the user clicks on the `Process` button without selecting any image file.
18 |
19 |
20 | if __name__ == '__main__':
21 | # Complete the main function preferably in this order:
22 | # Instantiate the root window.
23 | # Provide a title to the root window.
24 | # Instantiate the captioner, classifier models.
25 | # Declare the file browsing button.
26 | # Declare the drop-down button.
27 | # Declare the process button.
28 | # Declare the output label.
29 |
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/README.md:
--------------------------------------------------------------------------------
1 | # Software Lab
2 |
3 | ## Python GUI Assignment (tkinter)
4 |
5 | This is a follow up assignment to the Python Datascience assignment which we have already completed. In this assignment we will design a GUI using `tkinter` which would have the following overall functionality:
6 |
7 | * The GUI would provide the user to select a file from the computer.
8 | * It will have a dropdown menu to toggle between two output options: `Image Captioning` and `Image Classification`
9 | * If `Image Captioning` is selected then it should show the caption for the selected image file along with the original image file side-by-side.
10 | * For `Image Classification` it should display the classification class instead of the captions.
11 | * We will obtain the captions by taking help from the previous assignment (which you have already done).
12 | * While for classification, we use Image Classification Model (updated in model.py)
13 |
14 | Note: Please follow the installation instructions from the previous Python assignment then install tkinter and functools.
15 |
16 | ## Coding Task:
17 |
18 | For this assignment you are expected to modify a single file, which is `ImageViewerGUI.py`.
19 |
20 | 1. Define the function `fileClick`: This function should pop-up a dialog for the user to select an input image file. Once the image is selected by the user, it should automatically get the corresponding outputs from the captioner (call the captioner from here). Once the output is computed it should be shown automatically based on choice the dropdown button is at.
21 | 2. Define the function `process`: Should show the corresponding captions or classification classes with the input image side-by-side wrt the choice provided. This function will just show the output, which should have been already computed in the `fileClick` function above. Also, you should handle the case if the user clicks on the `Process` button without selecting any image file.
22 | 3. Complete the `main` function and add the required `imports` at the top.
23 |
24 | All the details are mentioned as comments in the code file as well.
25 |
26 | In order to be super clear on how the final GUI should function like, here is a sample video showing it. We would expect something similar to this, but individual creativity and additional functionalities are most welcome and encouraged!
27 |
28 |
29 |
30 | (https://youtu.be/fcHV8_7QJUc)
31 |
32 |
33 | Sample Video Link.
34 |
35 |
36 |
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/.gitkeep
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/0.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/0.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/1.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/2.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/3.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/3.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/4.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/4.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/5.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/5.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/6.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/6.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/7.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/7.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/8.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/8.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/data/imgs/9.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/data/imgs/9.jpg
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/my_package/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/dasabir/CS29006_SW_Lab_Spr2023/0b7d51bef2cfe3c2ea6c8fbbbde424c6ba624426/Python_Tkinter_Assignment/my_package/__init__.py
--------------------------------------------------------------------------------
/Python_Tkinter_Assignment/my_package/model.py:
--------------------------------------------------------------------------------
1 | from PIL import Image
2 | from lavis.models import load_model_and_preprocess
3 | from lavis.processors.blip_processors import BlipCaptionProcessor
4 | import torch
5 | import en_core_web_sm
6 | nlp = en_core_web_sm.load()
7 |
8 | class_names = [
9 | "person",
10 | "bicycle",
11 | "car",
12 | "motorcycle",
13 | "airplane",
14 | "bus",
15 | "train",
16 | "truck",
17 | "boat",
18 | "trafficlight",
19 | "firehydrant",
20 | "stopsign",
21 | "parkingmeter",
22 | "bench",
23 | "bird",
24 | "cat",
25 | "dog",
26 | "horse",
27 | "sheep",
28 | "cow",
29 | "elephant",
30 | "bear",
31 | "zebra",
32 | "giraffe",
33 | "backpack",
34 | "umbrella",
35 | "handbag",
36 | "tie",
37 | "suitcase",
38 | "frisbee",
39 | "skis",
40 | "snowboard",
41 | "sportsball",
42 | "kite",
43 | "baseballbat",
44 | "baseballglove",
45 | "skateboard",
46 | "surfboard",
47 | "tennisracket",
48 | "bottle",
49 | "wineglass",
50 | "cup",
51 | "fork",
52 | "knife",
53 | "spoon",
54 | "bowl",
55 | "banana",
56 | "apple",
57 | "sandwich",
58 | "orange",
59 | "broccoli",
60 | "carrot",
61 | "hotdog",
62 | "pizza",
63 | "donut",
64 | "cake",
65 | "chair",
66 | "couch",
67 | "pottedplant",
68 | "bed",
69 | "diningtable",
70 | "toilet",
71 | "tv",
72 | "laptop",
73 | "mouse",
74 | "remote",
75 | "keyboard",
76 | "cellphone",
77 | "microwave",
78 | "oven",
79 | "toaster",
80 | "sink",
81 | "refrigerator",
82 | "book",
83 | "clock",
84 | "vase",
85 | "scissors",
86 | "teddybear",
87 | "hairdrier",
88 | "toothbrush"
89 | ]
90 |
91 |
92 | # setup device to use
93 | device = torch.device("cpu")
94 |
95 | # Class definitions for the models
96 |
97 |
98 | class ImageCaptioningModel(object):
99 | '''
100 | The blackbox image captioning model (LAVIS).
101 | Given an image path, it generates the required number of captions.
102 | '''
103 |
104 | def __init__(self):
105 | self.model, self.vis_processors, _ = load_model_and_preprocess(
106 | name="blip_caption", model_type="large_coco", is_eval=True, device=device
107 | )
108 | self.vis_processors.keys()
109 |
110 | def __call__(self, input_path, num_captions=3):
111 | raw_image = Image.open(input_path).convert("RGB")
112 | image = self.vis_processors["eval"](raw_image).unsqueeze(0).to(device)
113 | return self.model.generate({"image": image}, use_nucleus_sampling=True, num_captions=num_captions)
114 |
115 |
116 | class ImageClassificationModel(object):
117 | '''
118 | The blackbox image classification model (LAVIS).
119 | Given an image path, it generates the required number of top classes.
120 | '''
121 |
122 | def __init__(self):
123 | self.model, self.vis_processors, _ = load_model_and_preprocess(
124 | "blip_feature_extractor", model_type="base", is_eval=True, device=device)
125 | self.cls_names = class_names
126 | self.text_processor = BlipCaptionProcessor(prompt="A picture of ")
127 | self.cls_prompt = [self.text_processor(
128 | cls_nm) for cls_nm in self.cls_names]
129 |
130 | def __call__(self, input_path, num_classes=3):
131 | raw_image = Image.open(input_path).convert("RGB")
132 | image = self.vis_processors["eval"](raw_image).unsqueeze(0).to(device)
133 | sample = {"image": image, "text_input": self.cls_prompt}
134 | image_features = self.model.extract_features(
135 | sample, mode="image").image_embeds_proj[:, 0]
136 | text_features = self.model.extract_features(
137 | sample, mode="text").text_embeds_proj[:, 0]
138 | sims = (image_features @ text_features.t())[0] / self.model.temp
139 | probs = torch.nn.Softmax(dim=0)(sims).tolist()
140 | res = []
141 | for i in range(0, 80):
142 | res.append((probs[i], self.cls_names[i]))
143 | res = sorted(res, reverse=True)
144 | return res[0:num_classes]
145 |
--------------------------------------------------------------------------------