├── .gitattributes ├── ColorCount.java ├── LICENSE ├── README.md ├── TestVideo.mp4 ├── main.py ├── opencv_python-3.2.0+contrib-cp36-cp36m-win_amd64.whl ├── output.txt ├── output_GIF.gif ├── part-r-00000 └── start.sh /.gitattributes: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:ccb6e05d39983581a9db7912eea9be8f11c229072fb61a75a17e42f410e2bddd 3 | size 179 4 | -------------------------------------------------------------------------------- /ColorCount.java: -------------------------------------------------------------------------------- 1 | import java.io.IOException; 2 | import java.util.*; 3 | 4 | import org.apache.hadoop.io.IntWritable; 5 | import org.apache.hadoop.io.LongWritable; 6 | import org.apache.hadoop.io.Text; 7 | 8 | import org.apache.hadoop.mapreduce.Reducer; 9 | 10 | import org.apache.hadoop.mapreduce.Mapper; 11 | 12 | 13 | import org.apache.hadoop.fs.Path; 14 | import org.apache.hadoop.mapreduce.Job; 15 | import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; 16 | import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; 17 | 18 | public class ColorCount{ 19 | 20 | 21 | //mapper 22 | public static class color_mapper extends Mapper 23 | { 24 | //@Override 25 | public void map(LongWritable key,Text value,Context context) 26 | throws IOException,InterruptedException{ 27 | 28 | StringTokenizer iterator=new StringTokenizer(value.toString(),","); 29 | while(iterator.hasMoreTokens()) 30 | { 31 | value.set(iterator.nextToken()); 32 | context.write(value,new IntWritable(1)); 33 | } 34 | 35 | } 36 | } 37 | 38 | //reducer 39 | public static class color_reducer 40 | extends Reducer{ 41 | //@Override 42 | public void reduce(Text key,Iterable values,Context context) 43 | throws IOException,InterruptedException{ 44 | int sum=0; 45 | for(IntWritable value:values) 46 | { 47 | sum=sum+value.get(); 48 | } 49 | Text x=new Text(); 50 | x.set(key); 51 | context.write(new IntWritable(sum),x); 52 | } 53 | } 54 | 55 | 56 | 57 | //main 58 | 59 | public static void main (String args[]) 60 | throws Exception{ 61 | if(args.length!=2) 62 | { 63 | System.err.println("useage output.txt "); 64 | System.exit(-1); 65 | } 66 | Job job=new Job(); 67 | job.setJarByClass(ColorCount.class); 68 | job.setJobName("COLOR COUNT"); 69 | 70 | FileInputFormat.addInputPath(job,new Path(args[0])); 71 | FileOutputFormat.setOutputPath(job,new Path(args[1])); 72 | 73 | job.setMapperClass(color_mapper.class); 74 | job.setReducerClass(color_reducer.class); 75 | 76 | job.setMapOutputKeyClass(Text.class); 77 | job.setMapOutputValueClass(IntWritable.class); 78 | 79 | job.setOutputKeyClass(IntWritable.class); 80 | job.setOutputValueClass(Text.class); 81 | 82 | System.exit(job.waitForCompletion(true)?0:1); 83 | 84 | } 85 | 86 | 87 | } 88 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 SKsaqlain 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Vehicle-Detection-And-Color-Classification 2 | This is a simple Project that Detects moving objects in this case vehicles and classifies its color using k-means algorithm and finds the frequency of each color detected using Hadoop Map-Reduce algorithm.
3 | ## Working 4 | The moving object is detected by subtracting the current frame from the previous frame thus any static objects compare to the previous frame is eliminated thus bringing out only the moving object, once the object is detected Morphological operations are done so that noises are eliminated and then Dilation are done to merge distorted blobs, then Contours are constructed to locate exact position of each object so that the corresponding dimensions can be used to extract the actual vehicle image from the original frame to get the RGB value and also to eliminate vehicles that are coming from far distance, once the dimensions are obtained normalization of RGB values are done within the area so that each object has as a single RGB associated to the entire area.
5 | Once the RGB values were obtained it was used to get the centroid of the cluster to which it actually belonged to using a k-means algorithm which was trained using predefined clusters centroids and values, but the model worked well but its accuracy can further be improved using different ML models.
6 | Once the corresponding colors were obtained the output was written to a file output.txt the final output file is then used to detect the frequency of each color that was seen in the current traffic and predict which color vehicle was most frequently purchased, using Hadoop Map-Reduce Algorithm.
7 | The sample output is shown below. 8 | ![](output_GIF.gif) 9 | 10 | ## Requirements 11 |
    12 |
  • Python 3.6.5
  • 13 |
  • OpenCV 3
  • 14 |
  • java version 1.8.0_181
  • 15 |
  • Hadoop 2.6.4 on Ubuntu 16.04
  • 16 |
17 | 18 | ## Usage 19 | Clone this Repository from the Desktop 20 | ``` 21 | $git clone https://github.com/SKsaqlain/Vehicle-Detection-And-Color-Classification ColorCount 22 | ``` 23 | To detect moving Vehicles/Objects run the below command 24 | ``` 25 | $cd ColorCount 26 | $python3 main.py 27 | ``` 28 | The main.py Script produces output that is the number of different color vehicles detected in the current frame of the video.
29 | Thus to find the frequency of each color the output.txt file has to be loaded into HDFS, run the below command to upload into HDFS after starting Hadoop. 30 | ``` 31 | hadoop fs -put 32 | $hadoop fs -put /home/hduser/Desktop/ColorCount/output.txt /Input 33 | ``` 34 | To compile the java Map-Reduce ColorCount program and run the jar file execute the below command 35 | ``` 36 | $chmod 777 start.sh 37 | $./start.sh ColorCount 38 | ``` 39 | To see the output
40 | Navigate to "http://localhost:50070" on your web browser
41 | Click on the Utilities tab (top right)=>Browse the file system
42 | Click on user=>hduser=>Output=>part-r-00000=> Download
43 | The downloaded file contains the required output. 44 | 45 | 46 | ## License 47 | This project is licensed under the [MIT License](LICENSE.md) 48 | -------------------------------------------------------------------------------- /TestVideo.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SKsaqlain/Vehicle-Detection-And-Color-Classification/2733e86a0fe6cabd48b5dbc392b51c826e561086/TestVideo.mp4 -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | 2 | from scipy.spatial import distance as dist 3 | from collections import OrderedDict 4 | import numpy as np 5 | from scipy.stats import itemfreq 6 | import cv2 7 | import math 8 | import warnings 9 | warnings.filterwarnings("ignore") 10 | 11 | #Function to get the centroid of the Object. 12 | def get_centroid(x, y, w, h): 13 | x1 = int(w / 2) 14 | y1 = int(h / 2) 15 | 16 | cx = x + x1 17 | cy = y + y1 18 | 19 | return (cx, cy) 20 | 21 | 22 | #function to detect vehical/moving object 23 | def detect_vehicles(fg_mask, min_contour_width=35, min_contour_height=35): 24 | 25 | matches = [] 26 | frame_copy=fg_mask 27 | # finding external contours 28 | im, contours, hierarchy = cv2.findContours( 29 | fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1) 30 | 31 | 32 | for (i, contour) in enumerate(contours): 33 | (x, y, w, h) = cv2.boundingRect(contour) 34 | contour_valid = (w >= min_contour_width) and ( 35 | h >= min_contour_height) 36 | 37 | if not contour_valid: 38 | continue 39 | 40 | # getting center of the bounding box 41 | centroid = get_centroid(x, y, w, h) 42 | 43 | matches.append(((x, y, w, h), centroid)) 44 | 45 | return matches 46 | 47 | 48 | #function to normalize the image so that the entire blob has the same rgb value 49 | def normalized(down): 50 | s=down.shape 51 | x=s[1] 52 | y=s[0] 53 | norm=np.zeros((y,x,3),np.float32) 54 | norm_rgb=np.zeros((y,x,3),np.uint8) 55 | 56 | b=down[:,:,0] 57 | g=down[:,:,1] 58 | r=down[:,:,2] 59 | 60 | sum=b+g+r 61 | 62 | norm[:,:,0]=b/sum*255.0 63 | norm[:,:,1]=g/sum*255.0 64 | norm[:,:,2]=r/sum*255.0 65 | 66 | norm_rgb=cv2.convertScaleAbs(norm) 67 | return norm_rgb 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | # initializing color class 76 | colors = OrderedDict({"red": (255, 0, 0),"green": (0, 255, 0),"blue": (0,0, 255),"white":(255,255,255),"black":(100,100,100)}) 77 | lab = np.zeros((len(colors), 1, 3), dtype="uint8") 78 | colorNames = [] 79 | 80 | 81 | f=open("output.txt","w") 82 | 83 | incre=1 84 | ''' 85 | if(len(x)==0): 86 | #no image name present in the file 87 | incre=1 88 | else: 89 | #reding the image number 90 | incre=int(x[-1].split(",")[0].split("_")[-1].split(".")[0]) 91 | f.close() 92 | ''' 93 | #converting the rbg color to lab colors 94 | for (i, (name, rgb)) in enumerate(colors.items()): 95 | # update the L*a*b* array and the color names list 96 | lab[i] = rgb 97 | colorNames.append(name) 98 | lab = cv2.cvtColor(lab, cv2.COLOR_RGB2LAB) 99 | 100 | 101 | #function to label car lab color to a perticular color class 102 | def label(image,lab,colorNames): 103 | 104 | # initialize the minimum distance found thus far 105 | minDist = (np.inf, None) 106 | 107 | # loop over the known L*a*b* color values 108 | for (i, row) in enumerate(lab): 109 | # compute the distance between the current L*a*b* 110 | # color value and the mean of the image 111 | 112 | d = dist.euclidean(row[0],image) 113 | 114 | # if the distance is smaller than the current distance, 115 | # then update the bookkeeping variable 116 | if d < minDist[0]: 117 | minDist = (d, i) 118 | 119 | # return the name of the color with the smallest distance 120 | return colorNames[minDist[1]] 121 | 122 | 123 | 124 | #initialising background object used for background elemination 125 | background=cv2.createBackgroundSubtractorMOG2() 126 | 127 | 128 | cap=cv2.VideoCapture('TestVideo.mp4') 129 | #initialising frame counter 130 | count_frame=0 131 | while(cap.isOpened()): 132 | _,frame=cap.read() 133 | #resizing the frame 134 | try: 135 | frame=cv2.resize(frame,(640,480)) 136 | except: 137 | break 138 | #creating a copy of the frame 139 | frame_copy=frame 140 | frame_copy_copy=copy =frame[:,:] 141 | 142 | #applying background elemination 143 | bg=background.apply(frame) 144 | 145 | #additional image processing 146 | 147 | kernel=cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2)) 148 | bg= cv2.erode(bg,kernel,iterations = 1) 149 | 150 | # Fill any small holes 151 | closing=cv2.morphologyEx(bg,cv2.MORPH_CLOSE,kernel) 152 | cv2.imshow("closing",closing) 153 | 154 | # Remove noise 155 | opening=cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel) 156 | cv2.imshow("removing_noise",opening) 157 | 158 | # Dilate to merge adjacent blobs 159 | dilation=cv2.dilate(opening, kernel, iterations=2) 160 | 161 | # threshold to remove furthur noise 162 | dilation[dilation < 240] = 0 163 | bg=dilation 164 | 165 | #initialising output color list 166 | output_color=[] 167 | 168 | #detecting contour and calculating the co-ordinates of the contours 169 | contour_list=detect_vehicles(bg) 170 | 171 | #traversing through each detected contour 172 | for ele in contour_list: 173 | x1=ele[0][0] 174 | y1=ele[0][1] 175 | x2=x1+ele[0][2] 176 | y2=y1+ele[0][3] 177 | #extracting the regions that contains car features 178 | 179 | slice_bg=frame_copy[y1:y2,x1:x2] 180 | 181 | #normalising the image so that there is uniform color throughout 182 | slice_bg=normalized(slice_bg) 183 | 184 | arr=np.float32(slice_bg) 185 | #reshaping the image to a linear form with 3-channels 186 | pixels=arr.reshape((-1,3)) 187 | 188 | #number of clusters 189 | n_colors=2 190 | 191 | #number of iterations 192 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1) 193 | 194 | #initialising centroid 195 | flags = cv2.KMEANS_RANDOM_CENTERS 196 | 197 | #applying k-means to detect prominant color in the image 198 | _, labels, centroids = cv2.kmeans(pixels, n_colors, None, criteria, 10, flags) 199 | 200 | 201 | palette = np.uint8(centroids) 202 | quantized = palette[labels.flatten()] 203 | 204 | #detecting the centroid with densest cluster 205 | dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])] 206 | 207 | 208 | 209 | r=int(dominant_color[0]) 210 | g=int(dominant_color[1]) 211 | b=int(dominant_color[2]) 212 | 213 | 214 | rgb=np.zeros((1,1,3),dtype='uint8') 215 | rgb[0]=(r,g,b) 216 | 217 | 218 | 219 | #getting the label of the car color 220 | color=label(rgb,lab,colorNames) 221 | 222 | 223 | output_color.append(color) 224 | 225 | #drawing rectangle over the detected car 226 | frame_copy= cv2.rectangle(frame_copy,(x1,y1),(x2,y2),(r,g,b),3) 227 | font = cv2.FONT_HERSHEY_SIMPLEX 228 | #labeling each rectangle with the detected color of the car 229 | cv2.putText(frame_copy,color,(x1,y1), font, 2,(r,g,b),2,cv2.LINE_AA) 230 | #openinig file to write the ouput of each frame 231 | #f=open("output.txt","w") 232 | 233 | #writing onto the file for every 10 frames 234 | 235 | if(count_frame%10==0): 236 | if(len(output_color)!=0): 237 | c=",".join(output_color)+'\n' 238 | 239 | #image_name="img_"+str(incre)+".jpg,"+c+'\n' 240 | f.write(c) 241 | # cv2.imwrite(img,frame) 242 | incre=incre+1 243 | count_frame=0 244 | count_frame+=1 245 | cv2.imshow("object",frame_copy) 246 | if(cv2.waitKey(30)==27 & 0xff): 247 | break 248 | 249 | cap.release() 250 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /opencv_python-3.2.0+contrib-cp36-cp36m-win_amd64.whl: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:96b1ec878d7adb203051539dcdb44d58937362217de24fc19a05a5f5774542e4 3 | size 51590654 4 | -------------------------------------------------------------------------------- /output.txt: -------------------------------------------------------------------------------- 1 | white 2 | white 3 | white 4 | white 5 | white 6 | white 7 | white 8 | white,black,white 9 | black,black 10 | black 11 | black 12 | black 13 | black,red 14 | red,green 15 | red,red 16 | black 17 | black,green 18 | black,green 19 | black,green 20 | black 21 | black 22 | black 23 | black 24 | black 25 | black 26 | black 27 | black 28 | black 29 | black 30 | black 31 | black 32 | black 33 | black 34 | red 35 | white 36 | black,black 37 | black,black 38 | black,black 39 | black,black 40 | black,black 41 | black,black,white 42 | black,black 43 | black 44 | black,white 45 | red,black 46 | black,red,black,white 47 | black,black,black 48 | black 49 | black 50 | black,black 51 | white 52 | black 53 | black,white 54 | black,black,white 55 | black 56 | black,black,white 57 | black 58 | black,white 59 | black 60 | black 61 | black 62 | black 63 | white 64 | white 65 | -------------------------------------------------------------------------------- /output_GIF.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SKsaqlain/Vehicle-Detection-And-Color-Classification/2733e86a0fe6cabd48b5dbc392b51c826e561086/output_GIF.gif -------------------------------------------------------------------------------- /part-r-00000: -------------------------------------------------------------------------------- 1 | 64 black 2 | 4 green 3 | 7 red 4 | 20 white 5 | -------------------------------------------------------------------------------- /start.sh: -------------------------------------------------------------------------------- 1 | # !!! MAKE APPROPRIATE CHANGES TO THE PATH !!! 2 | 3 | 4 | cd /home/hduser/Desktop/ColorCount 5 | echo " COMPILATION IN PROGRESS !!!" 6 | javac -classpath /usr/local/hadoop/share/hadoop/common/hadoop-common-2.6.4.jar:/usr/local/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.6.4.jar:/usr/local/hadoop/share/hadoop/common/lib/commons-cli-1.2.jar -d /home/hduser/Desktop/ColorCount $1.java 7 | echo " " 8 | echo " CREATING DIRECTORY " 9 | echo " " 10 | rm -rf c || mkdir c 11 | mkdir c || echo "directory c exists" 12 | echo " " 13 | echo "MOVING .Class FILES TO THE CREATED DIRECTORY" 14 | mv *.class c 15 | echo " " 16 | 17 | cd /home/hduser/Desktop/ColorCount 18 | echo " " 19 | echo "CREATING JAR FILE" 20 | echo " " 21 | jar -cvf $1.jar -C /home/hduser/Desktop/ColorCount/c . 22 | echo " " 23 | echo " " 24 | echo " " 25 | echo "DELETING EXISTING OUTPUT FILES" 26 | # hadoop fs -rm -r -skipTrash /user/hduser/Output || echo " unable to delete Output folder" 27 | 28 | hadoop fs -rm -r -skipTrash /Output || echo "unable to delete /Output folder" 29 | echo " " 30 | echo " " 31 | echo " " 32 | echo "STARTING TO EXECUTE !!" 33 | cd /usr/local/hadoop 34 | echo " " 35 | bin/hadoop jar /home/hduser/Desktop/ColorCount/$1.jar $1 /Input /Output 36 | echo " " 37 | echo " " 38 | --------------------------------------------------------------------------------