├── .DS_Store ├── Centroid ├── .DS_Store ├── BottomLeft.png ├── BottomRight.png ├── Centroid.py ├── Signature.png ├── TopLeft.png └── TopRight.png ├── Connected Component Labelling ├── .DS_Store ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── ccl.png ├── ccl4.py └── input.png ├── Gradient ├── .DS_Store ├── gradient.jpg ├── gradient.py └── lena.jpg ├── Histogram Equalization ├── .DS_Store ├── high_contrast.png ├── hist2.jpg ├── hist2.tif └── hist_eq.py ├── Image Negative ├── .DS_Store ├── binary.jpg ├── binary_inverted.png ├── grayscale.png ├── grayscale_inverted.png ├── negative.py ├── rgb.jpg └── rgb_inverted.png ├── Image Segmentation ├── .DS_Store ├── Capture.PNG ├── Capture2.PNG ├── Capture3.PNG ├── Segmentation.py └── image.png ├── Local Histogram Analysis ├── .DS_Store ├── Original.png ├── SlidingWindowHistogram.png ├── Slidingwindow.py ├── high_contrast.png ├── high_contrast_global.png ├── high_contrast_local.png ├── high_contrast_local_img.png ├── mountains.jpg └── tilingGlobal.py ├── Morphology ├── .DS_Store ├── closing.png ├── dilation.png ├── erosion.png ├── inp.jpg ├── opening.png ├── segment.png ├── segment.py ├── signature.png └── simple.py ├── README.md ├── Sharpening ├── .DS_Store ├── inp1.jpg ├── sharpen.jpg └── sharpen.py ├── Skeletonization ├── .DS_Store ├── Skeletonization.py ├── Thumb.png └── output.png ├── Smoothing ├── .DS_Store ├── AvergingFilter.py ├── Filter.py ├── averaging.jpg ├── gaussian.jpg ├── gaussian.py ├── inp1.jpeg ├── inp1.tif ├── inp2.jpeg ├── inp2.tif ├── inp3.jpeg ├── inp3.tif ├── median.jpg ├── median.py ├── unsharp_masking.jpg ├── unsharp_masking.py ├── weightedavg.jpg └── weightedavg.py ├── Template Matching ├── TemplateMatching.py ├── image.png ├── matchedTemplate.png ├── matchedTemplateCutOut.png └── template.png └── XY_Cuts ├── .DS_Store ├── XY-cuts.png ├── XY_Cuts.py └── xycut.png /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/.DS_Store -------------------------------------------------------------------------------- /Centroid/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/.DS_Store -------------------------------------------------------------------------------- /Centroid/BottomLeft.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/BottomLeft.png -------------------------------------------------------------------------------- /Centroid/BottomRight.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/BottomRight.png -------------------------------------------------------------------------------- /Centroid/Centroid.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | img = cv2.imread("signature.png", 0) 4 | 5 | ret,binary = cv2.threshold(img,10,255,cv2.THRESH_BINARY) 6 | 7 | def findBB(im): 8 | h, w = im.shape[0], im.shape[1] 9 | left, top = w, h 10 | right, bottom = 0, 0 11 | 12 | for x in xrange(h): 13 | for y in xrange(w): 14 | if (im[x,y] == 0): 15 | right = x if x > right else right 16 | left = x if x < left else left 17 | bottom = y if y > bottom else bottom 18 | top = y if y < top else top 19 | 20 | return (left, right, top, bottom) 21 | 22 | def findCentroid(im): 23 | h, w = im.shape[0], im.shape[1] 24 | cx, cy, n = 0, 0, 0 25 | for x in xrange(h): 26 | for y in xrange(w): 27 | if (im[x,y] == 0): 28 | cx += x 29 | cy += y 30 | n += 1 31 | cx /= n 32 | cy /= n 33 | return (cx, cy) 34 | 35 | def divideImgIntoFour(im, cent): 36 | h, w = im.shape[0], im.shape[1] 37 | cx, cy = cent 38 | img1 = im[0:cx, 0:cy] 39 | img2 = im[0:cx, cy:w] 40 | img3 = im[cx:h, 0:cy] 41 | img4 = im[cx:h, cy:w] 42 | return [img1, img2, img3, img4] 43 | 44 | def calculateTransitions(im): 45 | h, w = im.shape[0], im.shape[1] 46 | prev = im[0,0] 47 | n = 0 48 | for x in range(1, h): 49 | for y in range(1, w): 50 | curr = im[x,y] 51 | # check if the is black to white transition 52 | n = n+1 if curr == 255 and prev == 0 else n 53 | prev = curr 54 | return n 55 | 56 | boundingBox = findBB(binary) 57 | cropImg = binary[boundingBox[0]:boundingBox[1], boundingBox[2]:boundingBox[3]] 58 | centroid = findCentroid(cropImg) 59 | segments = divideImgIntoFour(cropImg, centroid) 60 | transitions = [calculateTransitions(seg) for seg in segments] 61 | 62 | print "Bounding Box:", boundingBox 63 | print "Coordinates of centroid:", centroid 64 | print "Black to white transitions (4 segments):", transitions 65 | 66 | cv2.imshow("TopLeft", segments[0]) 67 | cv2.imwrite("TopLeft.png", segments[0]) 68 | cv2.imshow("TopRight", segments[1]) 69 | cv2.imwrite("TopRight.png", segments[1]) 70 | cv2.imshow("BottomLeft", segments[2]) 71 | cv2.imwrite("BottomLeft.png", segments[2]) 72 | cv2.imshow("BottomRight", segments[3]) 73 | cv2.imwrite("BottomRight.png", segments[3]) 74 | cv2.waitKey(0) 75 | 76 | -------------------------------------------------------------------------------- /Centroid/Signature.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/Signature.png -------------------------------------------------------------------------------- /Centroid/TopLeft.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/TopLeft.png -------------------------------------------------------------------------------- /Centroid/TopRight.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Centroid/TopRight.png -------------------------------------------------------------------------------- /Connected Component Labelling/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/.DS_Store -------------------------------------------------------------------------------- /Connected Component Labelling/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/1.png -------------------------------------------------------------------------------- /Connected Component Labelling/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/2.png -------------------------------------------------------------------------------- /Connected Component Labelling/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/3.png -------------------------------------------------------------------------------- /Connected Component Labelling/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/4.png -------------------------------------------------------------------------------- /Connected Component Labelling/ccl.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/ccl.png -------------------------------------------------------------------------------- /Connected Component Labelling/ccl4.py: -------------------------------------------------------------------------------- 1 | import Image, ImageOps 2 | import sys 3 | import random 4 | import numpy 5 | 6 | 7 | def colourize(img): 8 | height, width = img.shape 9 | 10 | colors = [] 11 | colors.append([]) 12 | colors.append([]) 13 | color = 1 14 | # Displaying distinct components with distinct colors 15 | coloured_img = Image.new("RGB", (width, height)) 16 | coloured_data = coloured_img.load() 17 | 18 | for i in range(len(img)): 19 | for j in range(len(img[0])): 20 | if img[i][j] > 0: 21 | if img[i][j] not in colors[0]: 22 | colors[0].append(img[i][j]) 23 | colors[1].append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))) 24 | 25 | ind = colors[0].index(img[i][j]) 26 | coloured_data[j, i] = colors[1][ind] 27 | 28 | return coloured_img 29 | 30 | 31 | def binarize(img_array, threshold=130): 32 | for i in range(len(img_array)): 33 | for j in range(len(img_array[0])): 34 | if img_array[i][j] > threshold: 35 | img_array[i][j] = 0 36 | else: 37 | img_array[i][j] = 1 38 | return img_array 39 | 40 | 41 | def ccl4(img_array): 42 | ##### first pass ##### 43 | print "starting first pass" 44 | curr_label = 1; 45 | img_array = numpy.array(img_array) 46 | labels = numpy.array(img_array) 47 | 48 | # storing label conversions 49 | label_conv = [] 50 | label_conv.append([]) 51 | label_conv.append([]) 52 | 53 | count = 0 54 | for i in range(1, len(img_array)): 55 | for j in range(1, len(img_array[0])): 56 | 57 | if img_array[i][j] > 0: 58 | label_x = labels[i][j - 1] 59 | label_y = labels[i - 1][j] 60 | 61 | if label_x > 0: 62 | # both x and y have a label 63 | if label_y > 0: 64 | 65 | if not label_x == label_y: 66 | labels[i][j] = min(label_x, label_y) 67 | if max(label_x, label_y) not in label_conv[0]: 68 | label_conv[0].append(max(label_x, label_y)) 69 | label_conv[1].append(min(label_x, label_y)) 70 | elif max(label_x, label_y) in label_conv[0]: 71 | ind = label_conv[0].index(max(label_x, label_y)) 72 | if label_conv[1][ind] > min(label_x, label_y): 73 | l = label_conv[1][ind] 74 | label_conv[1][ind] = min(label_x, label_y) 75 | while l in label_conv[0] and count < 100: 76 | count += 1 77 | ind = label_conv[0].index(l) 78 | l = label_conv[1][ind] 79 | label_conv[1][ind] = min(label_x, label_y) 80 | 81 | label_conv[0].append(l) 82 | label_conv[1].append(min(label_x, label_y)) 83 | 84 | else: 85 | labels[i][j] = label_y 86 | # only x has a label 87 | else: 88 | labels[i][j] = label_x 89 | 90 | # only y has a label 91 | elif label_y > 0: 92 | labels[i][j] = label_y 93 | 94 | # neither x nor y has a label 95 | else: 96 | labels[i][j] = curr_label 97 | curr_label += 1 98 | 99 | ##### second pass ##### 100 | print "starting second pass" 101 | count = 1 102 | for idx, val in enumerate(label_conv[0]): 103 | 104 | if label_conv[1][idx] in label_conv[0] and count < 100: 105 | count += 1 106 | ind = label_conv[0].index(label_conv[1][idx]) 107 | label_conv[1][idx] = label_conv[1][ind] 108 | 109 | for i in range(1, len(labels)): 110 | for j in range(1, len(labels[0])): 111 | 112 | if labels[i][j] in label_conv[0]: 113 | ind = label_conv[0].index(labels[i][j]) 114 | labels[i][j] = label_conv[1][ind] 115 | 116 | return labels 117 | 118 | 119 | def main(): 120 | numpy.set_printoptions(threshold=numpy.nan) 121 | # Open the image 122 | img = Image.open(sys.argv[1]) 123 | 124 | # Threshold the image 125 | img = img.convert('L') 126 | img = ImageOps.expand(img, border=1, fill='white') 127 | img = numpy.array(img) 128 | img = binarize(img) 129 | 130 | """ 131 | img = [ [0,0,0,0,0,0,0,0,0,0], 132 | [0,1,1,0,1,1,1,0,1,0], 133 | [0,1,1,0,1,0,1,0,1,0], 134 | [0,1,1,1,1,0,0,0,1,0], 135 | [0,0,0,0,0,0,0,0,1,0], 136 | [0,1,1,1,1,0,1,0,1,0], 137 | [0,0,0,0,1,0,1,0,1,0], 138 | [0,1,1,1,1,0,0,0,1,0], 139 | [0,1,1,1,1,0,1,1,1,0], 140 | [0,0,0,0,0,0,0,0,0,0]] 141 | """ 142 | 143 | img = ccl4(img) 144 | 145 | # Colour the image using labels 146 | coloured_img = colourize(img) 147 | 148 | # Show the coloured image 149 | coloured_img.show() 150 | 151 | 152 | if __name__ == "__main__": main() 153 | -------------------------------------------------------------------------------- /Connected Component Labelling/input.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Connected Component Labelling/input.png -------------------------------------------------------------------------------- /Gradient/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Gradient/.DS_Store -------------------------------------------------------------------------------- /Gradient/gradient.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Gradient/gradient.jpg -------------------------------------------------------------------------------- /Gradient/gradient.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import cv2 3 | import numpy as np 4 | 5 | #Read Image in GrayScale 6 | img_gray = cv2.imread('lena.jpg',0) 7 | h,w = img_gray.shape[:2] 8 | 9 | grad_img = np.asarray(img_gray) 10 | 11 | for i in range(0,h): 12 | for j in range(0,w-1): 13 | 14 | #applying gradient 15 | a = min(img_gray[i][j+1],img_gray[i][j]) 16 | if a == img_gray[i][j+1] : 17 | temp_arr = img_gray[i][j] - img_gray[i][j+1] 18 | else : 19 | temp_arr = img_gray[i][j+1] - img_gray[i][j] 20 | 21 | 22 | grad_img[i,j] = temp_arr 23 | 24 | img = Image.fromarray(grad_img) 25 | img.save("gradient.jpg") 26 | 27 | -------------------------------------------------------------------------------- /Gradient/lena.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Gradient/lena.jpg -------------------------------------------------------------------------------- /Histogram Equalization/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Histogram Equalization/.DS_Store -------------------------------------------------------------------------------- /Histogram Equalization/high_contrast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Histogram Equalization/high_contrast.png -------------------------------------------------------------------------------- /Histogram Equalization/hist2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Histogram Equalization/hist2.jpg -------------------------------------------------------------------------------- /Histogram Equalization/hist2.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Histogram Equalization/hist2.tif -------------------------------------------------------------------------------- /Histogram Equalization/hist_eq.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import matplotlib.pyplot as plt 3 | import cv2 4 | import numpy as np 5 | 6 | img = cv2.imread('hist2.tif',0) 7 | 8 | #Initialize intensity values with 256 zeroes 9 | intensity_count = [0] * 256 10 | 11 | height,width = img.shape[:2] 12 | N = height * width 13 | 14 | #Array for new_image 15 | high_contrast = np.zeros(img.shape) 16 | 17 | for i in range(0,height): 18 | for j in range(0,width): 19 | intensity_count[img[i][j]] += 1 #Find pixels count for each intensity 20 | 21 | L = 256 22 | 23 | intensity_count,total_values_used = np.histogram(img.flatten(),L,[0,L]) 24 | pdf_list = np.ceil(intensity_count*(L-1)/img.size) #Calculate PDF 25 | cdf_list = pdf_list.cumsum() #Calculate CDF 26 | 27 | 28 | for y in range(0, height): 29 | for x in range(0, width): 30 | #Apply the new intensities in our new image 31 | high_contrast[y,x] = cdf_list[img[y,x]] 32 | 33 | 34 | #PLOT THE HISTOGRAMS 35 | cv2.imwrite('high_contrast.png', high_contrast) 36 | 37 | plt.hist(img.ravel(),256,[0,256]) 38 | plt.xlabel('Intensity Values') 39 | plt.ylabel('Pixel Count') 40 | plt.show() 41 | 42 | plt.hist(high_contrast.ravel(),256,[0,256]) 43 | plt.xlabel('Intensity Values') 44 | plt.ylabel('Pixel Count') 45 | plt.show() 46 | -------------------------------------------------------------------------------- /Image Negative/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/.DS_Store -------------------------------------------------------------------------------- /Image Negative/binary.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/binary.jpg -------------------------------------------------------------------------------- /Image Negative/binary_inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/binary_inverted.png -------------------------------------------------------------------------------- /Image Negative/grayscale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/grayscale.png -------------------------------------------------------------------------------- /Image Negative/grayscale_inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/grayscale_inverted.png -------------------------------------------------------------------------------- /Image Negative/negative.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import cv2 3 | import sys 4 | import numpy as np 5 | 6 | S = 255 7 | 8 | # if in rgb 9 | if(sys.argv[2]=="rgb"): 10 | # open in rgb 11 | img = cv2.imread(sys.argv[1],cv2.IMREAD_COLOR) 12 | B,G,R = cv2.split(img) 13 | B[:] = [S-x for x in B] #inverting blue 14 | G[:] = [S-x for x in G] #inverting green 15 | R[:] = [S-x for x in R] #inverting red 16 | 17 | #saving image 18 | my_img = cv2.merge((B, G, R)) 19 | cv2.imwrite(sys.argv[1]+'_inverted.png', my_img) 20 | cv2.imshow(sys.argv[1]+'_inverted.png', my_img) 21 | 22 | #if in grayscale or binary 23 | else: 24 | # open in grayscale 25 | img = cv2.imread(sys.argv[1],cv2.IMREAD_GRAYSCALE) 26 | my_img = np.array([S-x for x in img]) 27 | cv2.imwrite(sys.argv[1]+'_inverted.png', my_img) 28 | cv2.imshow(sys.argv[1]+'_inverted.png', my_img) 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /Image Negative/rgb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/rgb.jpg -------------------------------------------------------------------------------- /Image Negative/rgb_inverted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Negative/rgb_inverted.png -------------------------------------------------------------------------------- /Image Segmentation/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Segmentation/.DS_Store -------------------------------------------------------------------------------- /Image Segmentation/Capture.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Segmentation/Capture.PNG -------------------------------------------------------------------------------- /Image Segmentation/Capture2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Segmentation/Capture2.PNG -------------------------------------------------------------------------------- /Image Segmentation/Capture3.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Segmentation/Capture3.PNG -------------------------------------------------------------------------------- /Image Segmentation/Segmentation.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | # Loading the image in RGB 5 | img = cv2.imread("image.png", 0) 6 | 7 | # Applying Gaussian blur with kernel size 7 to remove unwanted noise 8 | blurred_image = cv2.GaussianBlur(img,(7,7),0) 9 | 10 | # Applying Otsu's thresholding to binarize the image 11 | retval ,binarized_image = cv2.threshold(blurred_image,40,255,cv2.THRESH_BINARY) 12 | 13 | # Applying Closing to fill in the holes 14 | filter = np.ones((3,3),np.uint8) 15 | closed_image = cv2.morphologyEx(binarized_image, cv2.MORPH_CLOSE, filter) 16 | 17 | # Using connected components to label the image 18 | retval, markers = cv2.connectedComponents(closed_image) 19 | 20 | # Mapping the component labels to hue val 21 | label_hue = np.uint8(120*markers/np.max(markers)) 22 | blank_ch = 255*np.ones_like(label_hue) 23 | labeled_image = cv2.merge([label_hue, blank_ch, blank_ch]) 24 | 25 | # changing from HSV to RGB again to show 26 | labeled_image = cv2.cvtColor(labeled_image, cv2.COLOR_HSV2BGR) 27 | 28 | # background label set to black 29 | labeled_image[label_hue==0] = 0 30 | 31 | # getting the unique colors in the image 32 | unique_colors = np.unique(labeled_image.reshape(-1, labeled_image.shape[2]), axis=0) 33 | 34 | print "Colors available in labeled image:" 35 | for x in xrange(unique_colors.shape[0]): 36 | print str(x+1)+"=> B:"+str(unique_colors[x,0])+" G:"+str(unique_colors[x,1])+" R:"+str(unique_colors[x,2])+" " 37 | 38 | print "\nSelect one of the colors and give its RGB values " 39 | 40 | r = raw_input("B : ") 41 | g = raw_input("G : ") 42 | b = raw_input("R : ") 43 | 44 | # making an output image 45 | output_image = np.zeros_like(labeled_image) 46 | 47 | # getting the object of user input color 48 | for x in xrange(labeled_image.shape[0]): 49 | for y in xrange(labeled_image.shape[1]): 50 | if (labeled_image[x,y,0] == int(r) and labeled_image[x,y,1] == int(g) and labeled_image[x,y,2] == int(b)): 51 | output_image[x,y,0:3] = labeled_image[x,y,0:3] 52 | 53 | # show the output image 54 | cv2.imshow("Selected", labeled_image) 55 | cv2.waitKey(0) 56 | -------------------------------------------------------------------------------- /Image Segmentation/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Image Segmentation/image.png -------------------------------------------------------------------------------- /Local Histogram Analysis/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/.DS_Store -------------------------------------------------------------------------------- /Local Histogram Analysis/Original.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/Original.png -------------------------------------------------------------------------------- /Local Histogram Analysis/SlidingWindowHistogram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/SlidingWindowHistogram.png -------------------------------------------------------------------------------- /Local Histogram Analysis/Slidingwindow.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | plt.figure(figsize=(12,12)) 5 | 6 | #reading image as grayscale 7 | img = cv2.imread("mountains.jpg",0) 8 | 9 | def slidingWindowEqualization(im, winSize): 10 | newImg = np.zeros((im.shape[0], im.shape[1])) 11 | for row in xrange(im.shape[0]-winSize+1): 12 | for col in xrange(im.shape[1]-winSize+1): 13 | newImg[row:row+winSize,col:col+winSize] = cv2.equalizeHist(im[row:row+winSize,col:col+winSize]) 14 | return newImg 15 | 16 | windowSize = 256 17 | output_img = slidingWindowEqualization(img, windowSize) 18 | plt.subplot(211) 19 | plt.axis('off') 20 | plt.title("Image after transformation") 21 | plt.imshow(output_img, cmap="gray") 22 | 23 | #writing output image 24 | cv2.imwrite("SlidingWindow.jpg", output_img) 25 | 26 | plt.subplot(212) 27 | plt.hist(output_img.ravel(),256,[0,256]) 28 | plt.title("Histogram") 29 | 30 | plt.show() 31 | 32 | -------------------------------------------------------------------------------- /Local Histogram Analysis/high_contrast.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/high_contrast.png -------------------------------------------------------------------------------- /Local Histogram Analysis/high_contrast_global.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/high_contrast_global.png -------------------------------------------------------------------------------- /Local Histogram Analysis/high_contrast_local.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/high_contrast_local.png -------------------------------------------------------------------------------- /Local Histogram Analysis/high_contrast_local_img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/high_contrast_local_img.png -------------------------------------------------------------------------------- /Local Histogram Analysis/mountains.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Local Histogram Analysis/mountains.jpg -------------------------------------------------------------------------------- /Local Histogram Analysis/tilingGlobal.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | import matplotlib.pyplot as plt 3 | import cv2 4 | import numpy as np 5 | 6 | img = cv2.imread('mountains.jpg',0) 7 | intensity_count = [0] * 256 #Initialize intensity values with 256 zeroes 8 | L = 255.0 9 | 10 | in1 = [0] * 256 11 | in2 = [0] * 256 12 | in3 = [0] * 256 13 | in4 = [0] * 256 14 | 15 | height, width = img.shape[:2] #Get width and height 16 | N = height * width * 1.0 #Get total Pixels 17 | half_height, half_width = height/2, width/2 18 | half_N = (N / 2) * 1.0 19 | 20 | im1 = np.zeros((256,256,0)) 21 | im2 = np.zeros((256,256,0)) 22 | im3 = np.zeros((256,256,0)) 23 | im4 = np.zeros((256,256,0)) 24 | high_contrast = np.zeros(img.shape) #Array for global_equalized_new_image 25 | high_contrast_local = np.zeros(img.shape) #Array for local_equalized_new_image 26 | 27 | for y in range(0, height): 28 | for x in range(0, width): 29 | intensity_count[img[y,x]] += 1 #Increment each gray_level pixel value according to tile (tile 1, 2, 3 or 4) 30 | if y <= height/2 and x < width/2: 31 | in1[img[y,x]] += 1 32 | elif y <= height/2 and x >= width/2: 33 | in2[img[y,x]] += 1 34 | elif y > height/2 and x < width/2: 35 | in3[img[y,x]] += 1 36 | elif y > height/2 and x >= width/2: 37 | in4[img[y,x]] += 1 38 | 39 | newList1 = [x / half_N for x in in1] 40 | newList2 = [x / half_N for x in in2] 41 | newList3 = [x / half_N for x in in3] 42 | newList4 = [x / half_N for x in in4] 43 | 44 | pdf1_array = np.asarray(newList1); cdf1_array = np.cumsum(pdf1_array) 45 | pdf2_array = np.asarray(newList1); cdf2_array = np.cumsum(pdf2_array) 46 | pdf3_array = np.asarray(newList1); cdf3_array = np.cumsum(pdf3_array) 47 | pdf4_array = np.asarray(newList1); cdf4_array = np.cumsum(pdf4_array) 48 | 49 | approx1_List = [round(x * L) for x in cdf1_array] 50 | approx2_List = [round(x * L) for x in cdf2_array] 51 | approx3_List = [round(x * L) for x in cdf3_array] 52 | approx4_List = [round(x * L) for x in cdf4_array] 53 | 54 | for y in range(0, height): 55 | for x in range(0, width): 56 | if y <= height/2 and x < width/2: 57 | high_contrast_local[y,x] = approx1_List[img[y,x]] 58 | elif y <= height/2 and x >= width/2: 59 | high_contrast_local[y,x] = approx2_List[img[y,x]] 60 | elif y > height/2 and x < width/2: 61 | high_contrast_local[y,x] = approx3_List[img[y,x]] 62 | elif y > height/2 and x >= width/2: 63 | high_contrast_local[y,x] = approx4_List[img[y,x]] 64 | 65 | cv2.imwrite('high_contrast_local.png', high_contrast_local) 66 | 67 | newList = [x / N for x in intensity_count] 68 | 69 | pdf_array = np.asarray(newList) 70 | cdf_array = np.cumsum(pdf_array) 71 | 72 | approx_List = [round(x * L) for x in cdf_array] 73 | 74 | for y in range(0,height): 75 | for x in range(0,width): 76 | high_contrast[y,x] = approx_List[img[y,x]] 77 | 78 | plt.hist(high_contrast_local.ravel(),256,[0,256]) 79 | plt.xlabel('Intensity Values') 80 | plt.ylabel('Pixel Count') 81 | plt.savefig('high_contrast_local.png') 82 | plt.clf() 83 | #plt.show() 84 | 85 | 86 | cv2.imwrite('high_contrast_global.png', high_contrast) #PLOT THE HISTOGRAMS 87 | plt.hist(img.ravel(),256,[0,256]) 88 | plt.xlabel('Intensity Values') 89 | plt.ylabel('Pixel Count') 90 | plt.savefig('Original.png') 91 | plt.clf() 92 | #plt.show() 93 | plt.hist(high_contrast.ravel(),256,[0,256]) 94 | plt.xlabel('Intensity Values') 95 | plt.ylabel('Pixel Count') 96 | plt.savefig('high_contrast_global.png') 97 | plt.clf() 98 | #plt.show() 99 | 100 | 101 | -------------------------------------------------------------------------------- /Morphology/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/.DS_Store -------------------------------------------------------------------------------- /Morphology/closing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/closing.png -------------------------------------------------------------------------------- /Morphology/dilation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/dilation.png -------------------------------------------------------------------------------- /Morphology/erosion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/erosion.png -------------------------------------------------------------------------------- /Morphology/inp.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/inp.jpg -------------------------------------------------------------------------------- /Morphology/opening.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/opening.png -------------------------------------------------------------------------------- /Morphology/segment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/segment.png -------------------------------------------------------------------------------- /Morphology/segment.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | img = cv2.imread('inp.jpg',0) 5 | 6 | ret,bin = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV) 7 | 8 | kernel = np.ones((3,3),np.uint8) 9 | opened = cv2.morphologyEx(bin, cv2.MORPH_OPEN, kernel) 10 | cv2.imshow("opened", opened) 11 | 12 | kernel = np.ones((5,5),np.uint8) 13 | closed = cv2.morphologyEx(opened, cv2.MORPH_CLOSE, kernel) 14 | ret, output = cv2.threshold(closed,127,255,cv2.THRESH_BINARY_INV) 15 | cv2.imshow("segment", output) 16 | cv2.imwrite("segment.png", output) 17 | cv2.waitKey(0) 18 | -------------------------------------------------------------------------------- /Morphology/signature.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Morphology/signature.png -------------------------------------------------------------------------------- /Morphology/simple.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | img = cv2.imread('signature.png', 0) 5 | r, img = cv2.threshold(img, 130, 255, cv2.THRESH_BINARY_INV) 6 | cv2.imshow("Original", img) 7 | cv2.waitKey(0) 8 | 9 | kernel = np.ones((5,5),np.uint8) 10 | 11 | dilated = cv2.dilate(img,kernel,iterations = 1) 12 | cv2.imshow("Dilation", dilated) 13 | cv2.imwrite("dilation.png", dilated) 14 | cv2.waitKey(0) 15 | 16 | eroded = cv2.erode(img,kernel,iterations = 1) 17 | cv2.imshow("Erosion", eroded) 18 | cv2.imwrite("erosion.png", eroded) 19 | cv2.waitKey(0) 20 | 21 | opened = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) 22 | cv2.imshow("Opening", opened) 23 | cv2.imwrite("opening.png", opened) 24 | cv2.waitKey(0) 25 | 26 | closed = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel) 27 | cv2.imshow("Closing", closed) 28 | cv2.imwrite("closing.png", closed) 29 | cv2.waitKey(0) 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Basic Digital Image Processing Tasks 2 | > This repository contains basic implementations of image processing algorithms in python. 3 | 4 | ## Required Libraries 5 | * PIL 6 | ```shell 7 | $ pip install pillow 8 | ``` 9 | * opencv-python 10 | ```shell 11 | $ pip install opencv-python 12 | ``` 13 | 14 | ## Algorithms 15 | 16 | ### Gradient 17 | 18 | ```shell 19 | $ python gradient.py 20 | ``` 21 | |Original|Gradient| 22 | |---|---| 23 | |![Gradient-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Gradient/lena.jpg)|![Gradient-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Gradient/gradient.jpg)| 24 | 25 | ### Image Negative 26 | 27 | ```shell 28 | $ python negative.py binary.jpeg binary 29 | ``` 30 | |Original|Binary Negative| 31 | |---|---| 32 | |![Binary-Negative-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/binary.jpg)|![Binary-Negative-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/binary_inverted.png)| 33 | 34 | ```shell 35 | $ python negative.py lena.jpg gray 36 | ``` 37 | |Original|Grayscale Negative| 38 | |---|---| 39 | |![Gray-Negative-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/grayscale.png)|![Gray-Negative-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/grayscale_inverted.png)| 40 | 41 | 42 | ```shell 43 | $ python negative.py lena.jpg rgb 44 | ``` 45 | |Original|RGB Negative| 46 | |---|---| 47 | |![Rgb-Negative-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/rgb.jpg)|![Rgb-Negative-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Negative/rgb_inverted.png)| 48 | 49 | ### Image Segmentation 50 | 51 | ```shell 52 | $ python Segmentation.py 53 | ``` 54 | |Original|Segmented| 55 | |---|---| 56 | |![Segmented-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Segmentation/image.png)|![Segmented-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Image%20Segmentation/Capture3.PNG)| 57 | 58 | ### Centroid 59 | 60 | ```shell 61 | $ python Centroid.py 62 | ``` 63 | |Original|Centroid| 64 | |---|---| 65 | |![Centroid-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Centroid/Signature.png)|
Top LeftTop Right
![Centroid-TopLeft](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Centroid/TopLeft.png)![Centroid-TopRight](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Centroid/TopRight.png)
Bottom LeftBottom Right
![Centroid-BottomLeft](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Centroid/BottomLeft.png)![Centroid-BottomRight](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Centroid/BottomRight.png)
| 66 | 67 | ### Connected Component Labelling 68 | 69 | ```shell 70 | $ python ccl4.py 71 | ``` 72 | |Original|CCL4 Labelled| 73 | |---|---| 74 | |![CCL4-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Connected%20Component%20Labelling/input.png)|![CCL4-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Connected%20Component%20Labelling/ccl.png)| 75 | 76 | ### Histogram Equalization 77 | 78 | ```shell 79 | $ python hist_eq.py 80 | ``` 81 | |Original|Histogram Equalized| 82 | |---|---| 83 | |![Hist-eq-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Histogram%20Equalization/hist2.jpg)|![Hist-eq-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Histogram%20Equalization/high_contrast.png)| 84 | 85 | ### Local Histogram Analysis 86 | 87 | |Original|Local Histogram| 88 | |---|---| 89 | |![Local-Hist-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Local%20Histogram%20Analysis/mountains.jpg)|![Local-Hist-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Local%20Histogram%20Analysis/high_contrast_local_img.png)| 90 | 91 | ### Morphology 92 | 93 | ```shell 94 | $ python Simple.py 95 | ``` 96 | |Original|Morphology| 97 | |---|---| 98 | |![Morphology-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Morphology/signature.png)|
ErosionDilation
![Erosion](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Morphology/erosion.png)![Dilation](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Morphology/dilation.png)
OpeningClosing
![Opening](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Morphology/opening.png)![Closing](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Morphology/closing.png)
| 99 | 100 | ### Sharpening 101 | 102 | ```shell 103 | $ python sharpen.py 104 | ``` 105 | |Original|Sharpened| 106 | |---|---| 107 | |![Sharpened-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Sharpening/inp1.jpg)|![Sharpened-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Sharpening/sharpen.jpg)| 108 | 109 | ### Skeletonization 110 | 111 | ```shell 112 | $ python Skeletonization.py 113 | ``` 114 | ![Skeletionization](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Skeletonization/output.png) 115 | 116 | ### Smoothing 117 | 118 | ```shell 119 | $ python AvergingFilter.py 120 | ``` 121 | |Original|Averaging Filter| 122 | |---|---| 123 | |![Averaging-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/inp1.jpeg)|![Averaging-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/averaging.jpg)| 124 | 125 | ```shell 126 | $ python gaussian.py 127 | ``` 128 | |Original|Gaussian| 129 | |---|---| 130 | |![gaussian-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/inp1.jpeg)|![gaussian-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/gaussian.jpg)| 131 | 132 | ```shell 133 | $ python unsharp_masking.py 134 | ``` 135 | |Original|Unsharp Masking| 136 | |---|---| 137 | |![Unsharp-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/inp2.jpeg)|![Unsharp-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/unsharp_masking.jpg)| 138 | 139 | ```shell 140 | $ python median.py 141 | ``` 142 | |Original|Median| 143 | |---|---| 144 | |![Unsharp-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/inp3.jpeg)|![Unsharp-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Smoothing/median.jpg)| 145 | 146 | 147 | ### XY Cuts 148 | 149 | ```shell 150 | $ python XY_Cuts.py 151 | ``` 152 | |Original|XY Cuts| 153 | |---|---| 154 | |![XY-Original](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/XY_Cuts/XY-cuts.png)|![XY-Result](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/XY_Cuts/xycut.png)| 155 | 156 | ### Template Matching 157 | 158 | ```shell 159 | $ python TemplateMatching.py 160 | ``` 161 | |Template|Matched in Image| 162 | |---|---| 163 | |![Template](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Template%20Matching/template.png)|![MatchedTemplate](https://github.com/mohammaduzair9/Basic-Digital-Image-Processing/blob/master/Template%20Matching/matchedTemplate.png)| 164 | 165 | -------------------------------------------------------------------------------- /Sharpening/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Sharpening/.DS_Store -------------------------------------------------------------------------------- /Sharpening/inp1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Sharpening/inp1.jpg -------------------------------------------------------------------------------- /Sharpening/sharpen.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Sharpening/sharpen.jpg -------------------------------------------------------------------------------- /Sharpening/sharpen.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | import copy 5 | 6 | plt.figure(figsize=(12,12)) 7 | #reading image file 8 | im = cv2.imread("inp1.jpg", 1) 9 | #function for sharpening filter 10 | def sharpenFiltering(img): 11 | inputImg = copy.deepcopy(img.astype(np.float)) 12 | #converting color scale from BGR to GRAY 13 | inputImg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 14 | #initialize black image of size equal to given image 15 | outputImg = np.zeros(inputImg.shape) 16 | #padding the image with zeros 17 | inputImg = np.pad(inputImg, (1, 1), 'constant', constant_values=(0)) 18 | #creating two filters for horizontal and vertical edge detection 19 | fh = np.array([[-1.0,-2.0,-1.0],[0.0,0.0,0.0],[1.0,2.0,1.0]]) 20 | fy = np.array([[-1.0,0.0,1.0],[-2.0,0.0,2.0],[-1.0,0.0,1.0]]) 21 | #looping through image pixels 22 | for row in range(1, inputImg.shape[0]-1): 23 | for col in range(1, inputImg.shape[1]-1): 24 | dx, dy = 0.0, 0.0 25 | #convolving both filters 26 | for x_filter in xrange(3): 27 | for y_filter in xrange(3): 28 | dx += inputImg[row+x_filter-1][col+y_filter-1]*fh[x_filter][y_filter] 29 | dy += inputImg[row+x_filter-1][col+y_filter-1]*fy[x_filter][y_filter] 30 | 31 | #magnitude of gradient (instead of just adding dx and dy. we calculate magnitude) 32 | pixel = np.sqrt(dx * dx + dy * dy) 33 | outputImg[row-1][col-1] = pixel 34 | #normalizing pixels 35 | outputImg *= 255.0/np.max(outputImg) 36 | return outputImg 37 | 38 | #applying sharpen filters 39 | output = sharpenFiltering(im) 40 | #writing image to image file 41 | cv2.imwrite("sharpen.jpg",output) 42 | #converting color scale from BGR to RGB 43 | im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) 44 | #plotting original image 45 | plt.subplot(211) 46 | plt.axis('off') 47 | plt.title("Original Image") 48 | plt.imshow(im) 49 | #plotting transformed image 50 | plt.subplot(212) 51 | plt.axis('off') 52 | plt.title("Transformed Image") 53 | plt.imshow(output, cmap="gray") 54 | 55 | plt.show() -------------------------------------------------------------------------------- /Skeletonization/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Skeletonization/.DS_Store -------------------------------------------------------------------------------- /Skeletonization/Skeletonization.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | img = cv2.imread('Thumb.png',0) 5 | ret,img = cv2.threshold(img,127,255,cv2.THRESH_BINARY) 6 | 7 | # Function for skeletonizing the image 8 | def findSkeleton(im): 9 | element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3)) 10 | out = np.zeros(im.shape,np.uint8) 11 | 12 | flag = 0 13 | while(not flag): 14 | eroded = cv2.erode(im, element) 15 | opened = cv2.dilate(eroded, element) 16 | opened = cv2.subtract(im,opened) 17 | out = cv2.bitwise_or(out,opened) 18 | im = eroded.copy() 19 | zeros = img.size - cv2.countNonZero(im) 20 | flag = 1 if (zeros == img.size) else 0 21 | return out 22 | 23 | output = findSkeleton(img) 24 | 25 | kernel = np.ones((3,3),np.uint8) 26 | output = cv2.dilate(output,kernel) 27 | output = cv2.medianBlur(output, 5) 28 | ret,thresh = cv2.threshold(output,127,255,cv2.THRESH_BINARY_INV) 29 | 30 | res = np.hstack((img, thresh)) 31 | cv2.imshow("output", cv2.resize(res, dsize=None,fx=0.5, fy=0.5)) 32 | cv2.imwrite("task1_output.png", res) 33 | cv2.waitKey(0) 34 | 35 | -------------------------------------------------------------------------------- /Skeletonization/Thumb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Skeletonization/Thumb.png -------------------------------------------------------------------------------- /Skeletonization/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Skeletonization/output.png -------------------------------------------------------------------------------- /Smoothing/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/.DS_Store -------------------------------------------------------------------------------- /Smoothing/AvergingFilter.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | from Filter import applyFilter 5 | 6 | plt.figure(figsize=(12,12)) 7 | 8 | #reading image from file 9 | im = cv2.imread("inp1.tif", 0).astype(np.float) 10 | 11 | size = int(raw_input("> Enter the size of averaging filter: ")) 12 | #applying filter on image 13 | output = applyFilter(im, filterSize=size) 14 | 15 | #writing image to image file 16 | cv2.imwrite("averaging.jpg",output) 17 | 18 | #plotting original image 19 | plt.subplot(211) 20 | plt.axis('off') 21 | plt.title("Original Image") 22 | plt.imshow(im, cmap="gray") 23 | 24 | #plotting smoothed image 25 | plt.subplot(212) 26 | plt.axis('off') 27 | plt.title("Smoothed Image (avg. filter"+str(size)+"x"+str(size)+")") 28 | plt.imshow(output, cmap="gray") 29 | 30 | plt.show() -------------------------------------------------------------------------------- /Smoothing/Filter.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import copy 3 | 4 | #function to apply filter on image 5 | #call it with just filter size (averaging filter) 6 | #call it with given filter in imFilter 7 | def applyFilter(img, filterSize=None, imFilter=None): 8 | filteredImg = copy.deepcopy(img) 9 | #if filter is provided 10 | if (imFilter is not None): 11 | imgFilter = imFilter 12 | filterSize = len(imFilter) 13 | #if filter is not provided, create an averging filter 14 | else: 15 | imgFilter = (1.0/(filterSize*filterSize))*np.ones((filterSize,filterSize)) #creating filter 16 | 17 | paddingSize = filterSize/2 18 | #padding the image with zeros 19 | filteredImg = np.pad(filteredImg, (paddingSize, paddingSize), 'constant', constant_values=(0)) 20 | 21 | for row in range(paddingSize, filteredImg.shape[0]-paddingSize): 22 | for col in range(paddingSize, filteredImg.shape[1]-paddingSize): 23 | pixel = 0.0 24 | #convolving the filter 25 | for x_filter in xrange(filterSize): 26 | for y_filter in xrange(filterSize): 27 | pixel += filteredImg[row+x_filter-paddingSize][col+y_filter-paddingSize]*imgFilter[x_filter][y_filter] 28 | filteredImg[row,col] = pixel 29 | #removing padded pixels 30 | filteredImg = filteredImg[paddingSize:filteredImg.shape[0]-paddingSize, paddingSize:filteredImg.shape[1]-paddingSize] 31 | return filteredImg -------------------------------------------------------------------------------- /Smoothing/averaging.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/averaging.jpg -------------------------------------------------------------------------------- /Smoothing/gaussian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/gaussian.jpg -------------------------------------------------------------------------------- /Smoothing/gaussian.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | from Filter import applyFilter 5 | 6 | plt.figure(figsize=(12,12)) 7 | 8 | im = cv2.imread("inp1.tif", 0).astype(np.float) 9 | 10 | #creating gaussian filter 11 | gaussianFilter = np.array([[1,1,2,2,2,1,1], 12 | [1,2,2,4,2,2,1], 13 | [2,2,4,8,4,2,2], 14 | [2,4,8,16,8,4,2], 15 | [2,2,4,8,4,2,2], 16 | [1,2,2,4,2,2,1], 17 | [1,1,2,2,2,1,1]], np.float) 18 | gaussianFilter /= np.sum(gaussianFilter*1.0) 19 | 20 | #applying gaussian filter 21 | output = applyFilter(im, imFilter=gaussianFilter) 22 | #writing image to image file 23 | cv2.imwrite("gaussian.jpg",output) 24 | #plotting original image 25 | plt.subplot(211) 26 | plt.axis('off') 27 | plt.title("Original Image") 28 | plt.imshow(im, cmap="gray") 29 | 30 | #plotting smoothed image 31 | plt.subplot(212) 32 | plt.axis('off') 33 | plt.title("Smoothed Image (gaussian filter7x7 (sigma 1.4))") 34 | plt.imshow(output, cmap="gray") 35 | plt.show() -------------------------------------------------------------------------------- /Smoothing/inp1.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp1.jpeg -------------------------------------------------------------------------------- /Smoothing/inp1.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp1.tif -------------------------------------------------------------------------------- /Smoothing/inp2.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp2.jpeg -------------------------------------------------------------------------------- /Smoothing/inp2.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp2.tif -------------------------------------------------------------------------------- /Smoothing/inp3.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp3.jpeg -------------------------------------------------------------------------------- /Smoothing/inp3.tif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/inp3.tif -------------------------------------------------------------------------------- /Smoothing/median.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/median.jpg -------------------------------------------------------------------------------- /Smoothing/median.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | import copy 5 | 6 | plt.figure(figsize=(12,12)) 7 | #reading image from file 8 | im = cv2.imread("inp3.tif", 0).astype(np.float) 9 | #function for median filtering 10 | def medianFiltering(img, filterSize): 11 | #making deep copy of image 12 | filteredImg = copy.deepcopy(img) 13 | #calculating padding size 14 | paddingSize = filterSize/2 15 | #padding the image 16 | filteredImg = np.pad(filteredImg, (paddingSize, paddingSize), 'constant', constant_values=(0)) 17 | #loop through image pixels 18 | for row in range(paddingSize, filteredImg.shape[0]-paddingSize): 19 | for col in range(paddingSize, filteredImg.shape[1]-paddingSize): 20 | kernal = [] 21 | for x_filter in xrange(filterSize): 22 | for y_filter in xrange(filterSize): 23 | kernal.append(filteredImg[row+x_filter-paddingSize][col+y_filter-paddingSize]) 24 | #calculating median of list 25 | filteredImg[row,col] = np.median(kernal) 26 | #removing zero padding 27 | filteredImg = filteredImg[paddingSize:filteredImg.shape[0]-paddingSize, paddingSize:filteredImg.shape[1]-paddingSize] 28 | return filteredImg 29 | 30 | size = int(raw_input("> Enter the size of median filter: ")) 31 | #applying meadian filtering on image 32 | output = medianFiltering(im, filterSize=size) 33 | #writing file to image file 34 | cv2.imwrite("median.jpg",output) 35 | #plotting original image 36 | plt.subplot(211) 37 | plt.axis('off') 38 | plt.title("Original Image") 39 | plt.imshow(im, cmap="gray") 40 | #plotting transformed image 41 | plt.subplot(212) 42 | plt.axis('off') 43 | plt.title("Transformed Image") 44 | plt.imshow(output, cmap="gray") 45 | 46 | plt.show() -------------------------------------------------------------------------------- /Smoothing/unsharp_masking.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/unsharp_masking.jpg -------------------------------------------------------------------------------- /Smoothing/unsharp_masking.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | import copy 5 | from Filter import applyFilter 6 | 7 | plt.figure(figsize=(12,12)) 8 | #reading image from file 9 | im = cv2.imread("inp2.tif", 0).astype(np.float) 10 | 11 | #function for unsharp masking 12 | def unsharpMasking(img): 13 | inputImg = copy.deepcopy(img) 14 | blurredImg = applyFilter(inputImg, filterSize=5) 15 | mask = inputImg - blurredImg 16 | result = inputImg + mask 17 | return result 18 | #unsharp masking the image 19 | output = unsharpMasking(im) 20 | #writing image to image file 21 | cv2.imwrite("unsharp_masking.jpg",output) 22 | #plotting original image 23 | plt.subplot(211) 24 | plt.axis('off') 25 | plt.title("Original Image") 26 | plt.imshow(im, cmap="gray") 27 | #plotting transformed image 28 | plt.subplot(212) 29 | plt.axis('off') 30 | plt.title("Transformed Image") 31 | plt.imshow(output, cmap="gray") 32 | 33 | plt.show() -------------------------------------------------------------------------------- /Smoothing/weightedavg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Smoothing/weightedavg.jpg -------------------------------------------------------------------------------- /Smoothing/weightedavg.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | from Filter import applyFilter 5 | 6 | plt.figure(figsize=(12,12)) 7 | 8 | #reading image from file 9 | im = cv2.imread("inp1.tif", 0).astype(np.float) 10 | 11 | #creating weighted filter: 12 | #[1 2 1], 13 | #[2 4 2], 14 | #[1 2 1] 15 | weightedFilter = (1.0/16)*np.array([[1,2,1],[2,4,2],[1,2,1]], np.int32) 16 | 17 | #applying filter on image 18 | output = applyFilter(im, imFilter=weightedFilter) 19 | 20 | #writing image to image file 21 | cv2.imwrite("weightedavg.jpg",output) 22 | 23 | #plotting original image 24 | plt.subplot(211) 25 | plt.axis('off') 26 | plt.title("Original Image") 27 | plt.imshow(im, cmap="gray") 28 | 29 | #plotting smoothed image 30 | plt.subplot(212) 31 | plt.axis('off') 32 | plt.title("Smoothed Image (weighted avg. filter3x3)") 33 | plt.imshow(output, cmap="gray") 34 | plt.show() -------------------------------------------------------------------------------- /Template Matching/TemplateMatching.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | image = cv2.imread('image.png') 5 | template = cv2.imread('template.png') 6 | (templateHeight, templateWidth) = template.shape[:2] 7 | 8 | matchResult = cv2.matchTemplate(image, template, cv2.TM_CCOEFF) 9 | (_, _, minLoc, maxLoc) = cv2.minMaxLoc(matchResult) 10 | 11 | topLeft = maxLoc 12 | botRight = (topLeft[0] + templateWidth, topLeft[1] + templateHeight) 13 | roi = image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] 14 | 15 | mask = np.zeros(image.shape, dtype = "uint8") 16 | image = cv2.addWeighted(image, 0.25, mask, 0.75, 0) 17 | 18 | image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi 19 | 20 | cv2.imwrite("matchedTemplate.png", image) 21 | -------------------------------------------------------------------------------- /Template Matching/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Template Matching/image.png -------------------------------------------------------------------------------- /Template Matching/matchedTemplate.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Template Matching/matchedTemplate.png -------------------------------------------------------------------------------- /Template Matching/matchedTemplateCutOut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Template Matching/matchedTemplateCutOut.png -------------------------------------------------------------------------------- /Template Matching/template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/Template Matching/template.png -------------------------------------------------------------------------------- /XY_Cuts/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/XY_Cuts/.DS_Store -------------------------------------------------------------------------------- /XY_Cuts/XY-cuts.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/XY_Cuts/XY-cuts.png -------------------------------------------------------------------------------- /XY_Cuts/XY_Cuts.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from matplotlib import pyplot as plt 3 | 4 | def xycut(image, image_path): 5 | # reading image 6 | img = plt.imread(image_path) 7 | fig, ax = plt.subplots() 8 | ax.imshow(img) 9 | 10 | black_pix = [] 11 | white_lines = [] 12 | # detecting the white lines 13 | for i in range(0, bin_img.shape[0]): 14 | # if number of white phixels in row is greater than 750 15 | if (bin_img[i].sum() / 255 > 750): 16 | 17 | # draw horizontal lines 18 | ax.axhline(y=i, color='green') 19 | else: 20 | # black pixels on x-axis 21 | for j in range(0, bin_img.shape[1]): 22 | if (bin_img[i][j] == 0): 23 | black_pix.append(j) 24 | 25 | if len(black_pix) != 0: 26 | # draw first & last vertical line only 27 | ax.axvline(x=min(black_pix), linewidth=3, color='green') 28 | ax.axvline(x=max(black_pix), linewidth=3, color='green') 29 | 30 | # remove the axis 31 | ax.set_axis_off() 32 | # saving new figure 33 | plt.savefig('xycut.png', bbox_inches='tight') 34 | # show the figure 35 | plt.show() 36 | 37 | 38 | # load image as greyscale 39 | image = cv2.imread("XY-cuts.png", 0) 40 | # binarize image 41 | (_, bin_img) = cv2.threshold(image, 120, 255, cv2.THRESH_BINARY) 42 | # call xycut function 43 | xycut(bin_img, "XY-cuts.png") 44 | -------------------------------------------------------------------------------- /XY_Cuts/xycut.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mohammaduzair9/Basic-Image-Processing/1db823d47bfbab18cd27c86f1f21239e824baab0/XY_Cuts/xycut.png --------------------------------------------------------------------------------