├── Live_Code.py ├── Main_Code.py └── README.md /Live_Code.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | 5 | """ 6 | @author: EECE AR HUD TEAM 2018 7 | Created on Fri Jul 6 14:39:50 2018 8 | 9 | """ 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | """ 20 | This is a script that can be used to retrain the YOLOv2 model for our own dataset. 21 | """ 22 | ########################### MAIN CODE PART IMPORT ##################################### 23 | from IPython import get_ipython # to be used as an interface with Ipython console 24 | import time 25 | import os,cv2,sys 26 | import numpy as np 27 | import PIL #to be used for preoprocessing of data 28 | from keras import backend as K 29 | from keras.layers import Input, Lambda, Conv2D 30 | from keras.models import load_model, Model 31 | from yad2k.models.keras_yolo import (yolo_body,yolo_eval, yolo_head, yolo_loss) 32 | from yad2k.utils.draw_boxes import draw_boxes 33 | from PIL import Image 34 | from queue import Queue #to be used for queuing to get rid of image after showing it in real live video 35 | import socket 36 | ########################### NAVIGATION PART IMPORT #################################### 37 | import urllib.request 38 | import json 39 | import re as regex 40 | import mpu 41 | ############################### AR PART IMPORT ######################################## 42 | from PIL import ImageFont 43 | from PIL import ImageDraw 44 | from datetime import datetime 45 | from threading import Thread 46 | from flask import Flask, render_template, Response 47 | import http.client 48 | """ 49 | ######################################################################################## 50 | ########################## INITIALIZATION OF GLOBAL VARIABLES ########################## 51 | ######################################################################################## 52 | """ 53 | ################################### IP Comfiguration ########################### 54 | webcamIP = "10.42.0.166" 55 | serverDataIP = "10.42.0.1" 56 | VR_IP = "192.168.42.132" 57 | #AIzaSyDk6T9Ap6FuWMxQlmodB3MQszzN9upWZxw 58 | 59 | 60 | API_KEY = "AIzaSyBwMEXvIKqsn5EShsfN6TUzv1u71YB3p74" 61 | ####### BUFFER FOR SHARING DATA BETWEEN AR, NAVIGATION AND DETECTION PARTS ############# 62 | buffer=[False,None, 0 , False, None , None, False, 0, False, (0,0), False,(0,0),[]] 63 | 64 | """ 65 | IsDirection=buffer[0]-->flag for the arrow on the ground 66 | direction=buffer[1]-->type of the arrow on the ground[go,left,right] 67 | distance=buffer[2]--> int for the distance of the given direction (print in a message below the arrow) 68 | arrived=buffer[3]--> Flag if arrived for final destination 69 | NextRouteDirection=buffer[4]-->string for navigation notifcation[first notification bar] if (=none) no navigation ,if ((up,right,left,uturn)) put the corrsponding image 70 | next_Route=buffer[5]-->(string ) name of the next route 71 | IsCalling=buffer[6]-->flag if the coming mobile name is caller ID 72 | MobileName=buffer[7]--> name coming from mobile caller ID or song name ,if none no mobile notifcation 73 | IsSign=buffer[8]--> flag if sign detected 74 | SignType=buffer[9]--> detected sign type 75 | IsCar=buffer[10] -->flag for cars 76 | CarPos=buffer[11]--> car position [todo positionsssss] 77 | IsPed=buffer[12]-->flag for pedstrinas 78 | PedPos=buffer[13]--> pedstrian position [todo postionsss] 79 | cat_sp=buffer[14] --> car speed 80 | answered=buffer[17] --> True if the call is answered 81 | music_state= buffer[18]--> pause or play 82 | """ 83 | ############################### NAVIGATION VARIABLES ################################### 84 | 85 | Mode="driving" 86 | isNeedRerouting=False 87 | destinationPoint ="0.0%2c0.0" 88 | startingPoint ="0.0%2c0.0" 89 | currentLocation="0.0%2c0.0" 90 | FirstTimeNav=True 91 | distance2=None 92 | TextToSpeechFlag=1 93 | 94 | ################################# AR VARIABLES ######################################### 95 | 96 | #********************** used to display server data for a given time ******************# 97 | folderPath="AR" # folder containing arrows 98 | length_of_box=12 # length of characters in notification box 99 | name_counter=-1 # index of first character in song name to be dispayed in each time which increases after a specific delay to slide 100 | counter_Delay=0 # initial value of delay for each stream of characters to be displayed before sliding one character left 101 | name_counter_R=-1 102 | counter_Delay_R=0 103 | switch_flag=0 # 0 is initial value, 1 when music is on and 2 in case of calling 104 | CallImage_counter=0 # counter for flashing call image for a specific timer 105 | start = datetime.now() # time of answering a call which is the start value of call counter 106 | last_answered=False # initial value for call state, True if answered then increasing call couter 107 | Ring=True # initial state for sliding a caller name 108 | First_Time=True # indication of first value for next route and then start sliding notification box 109 | TrafficSignFlag=0 110 | Sign_Or_Min=False 111 | 112 | PedCounter0=0 113 | PedCounter1=0 114 | PedCounter2=0 115 | 116 | PedCounterImage0=0 117 | PedCounterImage1=0 118 | PedCounterImage2=0 119 | toggle0=True 120 | toggle1=True 121 | toggle2=True 122 | #**************** used to display a given sign type for a given time ******************# 123 | Idle=False # state for detecting a traffic sign, True if detecting none 124 | SleepTime=0 # time for displaying a message of detected traffic sign 125 | MaxTime=100 # max time for displaying a detected traffic sign message """TODO find relation bet MaxTime and car Speed """ 126 | DisplayedSignType=0 # sign type to be displayed [1:No Stop , 2:Curve Left , 3:Pedstrians Crossing , 4:Bump , 5:U-turn , 6: ... , 7:Split , 8:Devided Road , 9:Bump and Pedstrians Crossing] 127 | NumberOfSigns=0 128 | 129 | #********************** To avoid oscillations in cars and pedestrian ******************# 130 | KeepPositionTime=4 131 | CarCounter=0 132 | PedCounter=0 133 | ChangeCar=True 134 | ChangePed=True 135 | CalibrationImg=True 136 | CarCounter=[0,0,0] 137 | PedCounter=[0,0,0] 138 | 139 | OldPosCar=[(),(),()] #centres of displayed boxes "old ones" 140 | OldPosPed=[(),(),()] 141 | 142 | OldCarBoxDim=[[],[],[]] #boxes of last displayed frame 143 | OldpedBoxDim=[[],[],[]] 144 | 145 | 146 | 147 | w=480 # desired image width to be resized and displayed in 148 | h=360 # desired image hight to be resized and displayed in 149 | index1=int(110/1920*w) # initial index for cropping next route notification box 150 | index2=int(110/1920*w) # initial index for cropping music and calling notification box 151 | save=False # resize and save all images needed for specified image size 152 | Try=True # only resize needed images needed for specified image size without saving 153 | T_server=[] # transparent image for recieved server data 154 | T_process=[] # transparent image for detection data 155 | navigationCounter=-1 156 | ##################################### only for testing #################################### 157 | arrived=False # initial value for navigation, True when arriving to desired destination 158 | direc=None # initial value for direction of next route 159 | next_route="" # initial value for string of next route 160 | IsCalling=False # initial value, False when music is on and True if calling 161 | Mob_Name=None # name of caller or song and None for Idle state 162 | ans=False # initial value for answering a call 163 | speed=1 # initial value for speedometer 164 | 165 | ###################################### MAIN VARIABLES ###################################### 166 | 167 | DrawFrames=[] 168 | DisplayQueue = Queue(); # a queue to hold next image to be shown in video mode 169 | frames_captured=[] # frames captured by webcam (each frame captured is deleted after passing to draw function ) 170 | ProcessingTime = 0.5; # initial processing time for the first frame 171 | YOLO_ANCHORS = np.array(((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),(7.88282, 3.52778), (9.77052, 9.16828))) 172 | weights_file_path='saved_weights/BestWeights/trained_epoch_65_acc_45.h5' 173 | classes_path='saved_weights/BestWeights/Temp_classes.txt' 174 | VideoPath='AR/TestVideos/Test5.MOV' 175 | AR_Mode=True # Falg for displaying AR data on frames 176 | stream=True # True if stream and False if WebCam 177 | 178 | ret=None # initial value for correctly capturing frame by WebCam 179 | im=None # the current captured frame to be sent to detection process 180 | video=None # initial value for video captured from a videoPath or live WebCam 181 | im2=[] # the captured frame after preprocessing to be sent for dislaying in DisplayInterval Function 182 | GetFramesFlag=False # Flag is set to True when capturing frist frame from WebCam 183 | count_occurrence=np.zeros(9) 184 | frames_counted=np.zeros(9) 185 | begin=np.zeros(9) 186 | number_of_frames_averaged=10 187 | min_occurrence=5 188 | 189 | """ 190 | ######################################################################################## 191 | ############################### NAVIGATION FUNCCTIONS ################################## 192 | ######################################################################################## 193 | """ 194 | ######################################################################################## 195 | ######################################################################################## 196 | ######################################################################################## 197 | app = Flask(__name__) 198 | @app.route('/') 199 | def index(): 200 | return render_template('index.html') 201 | 202 | def gen(): 203 | goSleep = False 204 | while True: 205 | if (goSleep): 206 | time.sleep(1/100) 207 | goSleep = False 208 | else: 209 | time.sleep(1/500) 210 | 211 | Image = open("images/{}.jpg".format(0),"rb").read(); 212 | 213 | find = Image.find(b'\xff\xd9'); 214 | if (find != -1): 215 | goSleep = True 216 | yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' + Image + b'\r\n\r\n') 217 | else: 218 | goSleep = False 219 | 220 | 221 | 222 | @app.route('/video_feed') 223 | def video_feed(): 224 | return Response(gen(), 225 | mimetype='multipart/x-mixed-replace; boundary=frame') 226 | 227 | ######################################################################################## 228 | ######################################################################################## 229 | ######################################################################################## 230 | def getRequestSream(ip=serverDataIP,port=80,page='serverData.html'): 231 | if (port == 80): 232 | connection = http.client.HTTPConnection(ip,port) 233 | elif (port == 443): 234 | connection = http.client.HTTPSConnection(ip,port) 235 | 236 | connection.request('GET', '/' + page) 237 | response = connection.getresponse() 238 | answer = response.read() 239 | return answer 240 | 241 | def checkRerouting(currentLoc,nextLoc): 242 | global destinationPoint,distance2 243 | startingPoint2 = currentLoc; 244 | requestData = getRequestSream(ip="maps.googleapis.com",port=443,page="maps/api/directions/json?origin={}&destination={}&" \ 245 | "mode={}&key={}".format(startingPoint2,destinationPoint,"driving",API_KEY)).decode('utf-8'); 246 | jsonData = json.loads(requestData) 247 | destLat = jsonData["routes"][0]["legs"][0]["steps"][0]["end_location"]["lat"]; 248 | destLong = jsonData["routes"][0]["legs"][0]["steps"][0]["end_location"]["lng"]; 249 | distance2 = jsonData["routes"][0]["legs"][0]["steps"][0]["distance"]["text"]; 250 | nextLocation = str(destLat) + "," + str(destLong); 251 | if (nextLocation == nextLoc): 252 | return False; 253 | else: 254 | return True; 255 | 256 | 257 | 258 | 259 | """ 260 | ############################################################################################## 261 | ################################### Animation Function ####################################### 262 | ############################################################################################## 263 | """ 264 | def Animation(name=" ",IsCall=True,Ring=True,Route=False): 265 | """ 266 | this function takes caller name or song name and return the wanted characters to be displayed at that time for sliding effect. 267 | 268 | Parameters: 269 | ----------- 270 | name: string 271 | caller or song name 272 | 273 | IsCall: bool,default True 274 | True if call and False if song 275 | 276 | Ring: bool,default True 277 | in case of calling, True if Ringing mode and False after answering the call 278 | 279 | Return: 280 | ------- 281 | name:string 282 | name to be displayed in notification box in this time 283 | """ 284 | global background 285 | global length_of_box 286 | global name_counter,counter_Delay,name_counter_R,counter_Delay_R 287 | if Route==True: 288 | counter_Delay_R+=1 289 | if(counter_Delay_R==8): 290 | counter_Delay_R=0 291 | name_counter_R+=1 292 | if name_counter_R==len (name): 293 | name_counter_R=0 294 | return name[name_counter_R:name_counter_R+length_of_box+1] 295 | 296 | if IsCall and Ring: 297 | name=str(name+" is calling") 298 | while True : 299 | if(len(name)<=length_of_box): 300 | 301 | return name 302 | else: 303 | counter_Delay+=1 304 | if(counter_Delay==8): 305 | counter_Delay=0 306 | name_counter+=1 307 | if name_counter==len (name): 308 | name_counter=0 309 | return name[name_counter:name_counter+length_of_box+1] 310 | 311 | 312 | """ 313 | ############################################################################################## 314 | ################################# DrawProcess Function ####################################### 315 | ############################################################################################## 316 | """ 317 | def DrawProcess(): 318 | """ 319 | this function updates a global tranparent image for AR detection data[cars,traffic signs,pedsrians] after running session on the frame to be composited with Frame and displayed 320 | 321 | parameters: 322 | ----------- 323 | None 324 | 325 | Return: 326 | ------- 327 | None 328 | """ 329 | global buffer,T_process 330 | global Idle,SleepTime,DisplayedSignType 331 | global ChangeCar,ChangePed,KeepPositionTime,CarCounter,PedCounter,TempPosCar,TempPosPed,CalibrationImg ### Dina 332 | global IP 333 | global w,h 334 | global save,Try 335 | global TrafficSignFlag,Sign_Or_Min 336 | global PedCounter0,PedCounter1,PedCounter2 337 | global PedCounterImage0,PedCounterImage1,PedCounterImage2 338 | global toggle0,toggle1,toggle2 339 | 340 | TransparantImage = Image.new('RGBA',(w,h),(255,255,255,0)) # create a transparent image 341 | 342 | # getting needed data from global buffer 343 | IsSign=buffer[6] 344 | SignType=buffer[7] 345 | IsCar=buffer[8] 346 | CarPos=buffer[9] 347 | IsPed=buffer[10] 348 | PedPos=buffer[11] 349 | ################################################################################################ 350 | ############################## for Traffic sign detection ###################################### 351 | ################################################################################################ 352 | if Idle: # if True(sign is already displayed), open the same traffic sign image to be displayed again 353 | SignImage=Image.open(os.path.join(folderPath,"TrafficSigns/{}.png".format(DisplayedSignType))) 354 | SignImage=SignImage.convert("RGBA") 355 | if save==True: # resize and save if save mode 356 | SignImage=SignImage.resize((int(454/1920*w),int(340/1080*h)),Image.ANTIALIAS) 357 | SignImage.save("TrafficSigns/{}.png".format(SignType)) 358 | elif Try==True: # only resize if try mode 359 | SignImage=SignImage.resize((int(454/1920*w),int(340/1080*h)),Image.ANTIALIAS) 360 | TransparantImage.paste(SignImage, (600,750),SignImage) 361 | SleepTime+=1 362 | if(SleepTime==MaxTime): # stop displaying traffic image after a specific time 363 | Idle=False 364 | SleepTime=0 365 | Sign_Or_Min=True 366 | 367 | if(IsSign and not Idle and TrafficSignFlag): # if IsSign is true display the sign if Idle is False as it's first time to be detected 368 | if SignType==78: 369 | SignType=7 370 | Sign_Or_Min=True 371 | Idle=True 372 | DisplayedSignType=SignType 373 | SignImage=Image.open(os.path.join(folderPath,"TrafficSigns/{}.png".format(SignType))) 374 | SignImage=SignImage.convert("RGBA") 375 | if save==True: # resize and save if save mode 376 | SignImage=SignImage.resize((int(454/1920*w),int(340/1080*h)),Image.ANTIALIAS) 377 | SignImage.save("TrafficSigns/{}.png".format(SignType)) 378 | elif Try==True: # only resize if try mode 379 | SignImage=SignImage.resize((int(454/1920*w),int(340/1080*h)),Image.ANTIALIAS) 380 | TransparantImage.paste(SignImage, (int(600/1920*w),int(750/1080*h)),SignImage) 381 | ################################################################################################ 382 | #################################### for car detection ######################################### 383 | ################################################################################################ 384 | if(IsCar): # if IsCar is true display,loop over boxes and display the warning sign 385 | for i in range(len(CarPos)): 386 | if(CarPos[i]!=()): 387 | CarImage=Image.open(os.path.join(folderPath,"TrafficSigns/Car2.png")) 388 | CarImage=CarImage.convert("RGBA") 389 | if save==True: # resize and save if save mode 390 | CarImage=CarImage.resize((int(80/1920*w),int(80/1080*h)),Image.ANTIALIAS) 391 | CarImage.save(os.path.join(folderPath,"TrafficSigns/Car2.png")) 392 | elif Try==True: # only resize if try mode 393 | CarImage=CarImage.resize((int(80/1920*w),int(80/1080*h)),Image.ANTIALIAS) 394 | TransparantImage.paste(CarImage, CarPos[i],CarImage) 395 | ################################################################################################ 396 | ################################# for pedstrian detection ###################################### 397 | ################################################################################################ 398 | if(IsPed): # if IsPed is true display,loop over boxes and display the warning sign 399 | for i in range(len(PedPos)) : 400 | if(PedPos[i]!=()): 401 | if(PedPos[i][0]w/3 and PedPos[i][0]<2*w/3): 424 | PedImage=Image.open(os.path.join(folderPath,"TrafficSigns/ped{}.png".format(PedCounterImage1))) 425 | PedImage=PedImage.convert("RGBA") 426 | # PedImage=PedImage.resize((70,70),Image.ANTIALIAS) 427 | PedImage=PedImage.resize((int(100/1920*w),int(100/1080*h)),Image.ANTIALIAS) 428 | TransparantImage.paste(PedImage, PedPos[i],PedImage) 429 | PedCounter1+=1 430 | if(PedCounter1 % 30==0 and toggle1==True): 431 | PedCounter1=0 432 | if PedCounterImage1==1: 433 | toggle1=False 434 | else: 435 | PedCounterImage1+=1 436 | 437 | elif(PedCounter1 % 30==0 and toggle1==False): 438 | PedCounter1=0 439 | if PedCounterImage1==0: 440 | toggle1=True 441 | else: 442 | PedCounterImage1-=1 443 | 444 | if(PedPos[i][0]>2*w/3): 445 | PedImage=Image.open(os.path.join(folderPath,"TrafficSigns/ped{}.png".format(PedCounterImage2))) 446 | PedImage=PedImage.convert("RGBA") 447 | # PedImage=PedImage.resize((70,70),Image.ANTIALIAS) 448 | PedImage=PedImage.resize((int(100/1920*w),int(100/1080*h)),Image.ANTIALIAS) 449 | TransparantImage.paste(PedImage, PedPos[i],PedImage) 450 | PedCounter2+=1 451 | if(PedCounter2 % 30==0 and toggle2==True): 452 | PedCounter2=0 453 | if PedCounterImage2==1: 454 | toggle2=False 455 | else: 456 | PedCounterImage2+=1 457 | 458 | elif(PedCounter2 % 30==0 and toggle2==False): 459 | PedCounter2=0 460 | if PedCounterImage2==0: 461 | toggle2=True 462 | else: 463 | PedCounterImage2-=1 464 | # update the global process transparent image 465 | T_process=TransparantImage 466 | 467 | 468 | 469 | """ 470 | ############################################################################################## 471 | ################################### DrawDirection Function ################################### 472 | ############################################################################################## 473 | """ 474 | def DrawDirection(): 475 | """ 476 | This function run on a seperat thread to update a gloabl transparent image for AR server data[next navigation Route, call, music, car speed] to be composited with Frame and displayed 477 | 478 | parameters: 479 | ----------- 480 | None 481 | 482 | Return: 483 | ------- 484 | None 485 | """ 486 | global buffer,T_server,length_of_box 487 | global index1 488 | global index2 489 | global switch_flag 490 | global CallImage_counter 491 | global start 492 | global last_answered 493 | global Ring 494 | global First_Time 495 | global name_counter 496 | global Idle,SleepTime,DisplayedSignType 497 | global ChangeCar,ChangePed,KeepPositionTime,CarCounter,PedCounter,TempPosCar,TempPosPed,CalibrationImg 498 | global IP 499 | global destinationPoint,startingPoint,currentLocation,FirstTimeNav 500 | global w,h 501 | global save,Try 502 | global TextToSpeechFlag,TrafficSignFlag 503 | global OldCarBoxDim,OldPosCar,navigationCounter,Sign_Or_Min 504 | # initialization values for server data before recieving the current data 505 | cat_sp=0 506 | IsCalling=False 507 | MobileName=None 508 | answered=False 509 | music_state= "pause" 510 | global destinationPoint,isNeedRerouting,currentLocation,startingPoint,buffer,distance2,text_to_speech_phrase,text_to_speech_flag 511 | text_to_speech_phrase="" 512 | # imporing the used font for writing notification 513 | font = ImageFont.truetype("AR/fonts/Coval-Heavy.ttf", 14) 514 | font1 = ImageFont.truetype("AR/fonts/digital-7 (italic).ttf", 30) 515 | 516 | while True: 517 | Sign_Or_Min=False 518 | time.sleep(1/30); 519 | # get navigation data from global buffer 520 | IsDirection=True 521 | direction=buffer[1] 522 | distance=buffer[2] 523 | arrived=buffer[3] 524 | NextRouteDirection=buffer[4] 525 | next_Route=buffer[5] 526 | CarPos=OldPosCar 527 | OldCarBox=OldCarBoxDim 528 | 529 | # read server data 530 | try: 531 | page_source = getRequestSream().decode('utf-8') 532 | 533 | if(len(page_source)>0): 534 | Long,Lat,speed,trackName,callerID,destLong,destLat,callState,settings = page_source.split(':')[1].split(",") 535 | except: 536 | input("Error in get stream Draw Function"); 537 | continue; 538 | 539 | # destinationPoint="30.025953,31.223558" 540 | # startingPoint="30.022543,31.211342" 541 | # print (trackName) 542 | # print("Lat and Long =",Lat,",",Long) 543 | destinationPoint=str(destLat)+"%2c"+str(destLong) 544 | if FirstTimeNav==True and destinationPoint!="0.0%2c0.0": 545 | startingPoint = str(Lat)+"%2c"+str(Long); 546 | # print("startingPoint=",startingPoint) 547 | # input() 548 | FirstTimeNav=False 549 | else: 550 | currentLocation= str(Lat)+"%2c"+str(Long); 551 | # print(currentLocation) 552 | if destinationPoint=="0.0%2c0.0" or startingPoint=="0.0%2c0.0" or currentLocation=="0.0%2c0.0": 553 | # print("inside if condition.") 554 | NextRouteDirection=None 555 | # update settings flags 556 | Flags=settings.split("#") 557 | NavigationFlag=int(float(Flags[0])) 558 | MusicFlag=int(float(Flags[1])) 559 | PhoneCallFlag=int(float(Flags[2])) 560 | TrafficSignFlag=int(float(Flags[3])) 561 | TextToSpeechFlag=int(float(Flags[4])) 562 | SpeedFlag=int(float(Flags[7])) 563 | TempFlag=int(float(Flags[8])) 564 | FuelFlag=int(float(Flags[9])) 565 | cat_sp=int(float(speed)) 566 | 567 | IsDirection=NavigationFlag 568 | 569 | if (callerID!="Idle" and PhoneCallFlag): # there is a comming call, so get the caller name and set the calling state to True 570 | IsCalling=True 571 | MobileName=callerID 572 | if callState=="Busy": # state of answering a call, then start a time counter for the call 573 | answered=True 574 | 575 | elif trackName!="null" and callState=="Idle" and MusicFlag: # song is being played, then get the Track name and set the music state to "play" 576 | IsCalling=False 577 | MobileName=trackName 578 | music_state="play" 579 | elif trackName=="null" and callState=="Idle" : # song is being played, then get the Track name and set the music state to "play" 580 | MobileName=None 581 | 582 | 583 | 584 | 585 | 586 | # create the transparent image 587 | TransparantImage = Image.new('RGBA',(w,h),(255,255,255,0)) 588 | d = ImageDraw.Draw(TransparantImage) 589 | 590 | 591 | ############################################################################################# 592 | ############################################################################################# 593 | if(True): 594 | for i in range (len(CarPos)): 595 | if(OldCarBox[i]!=[]): 596 | if(((OldCarBox[i][1] + OldCarBox[i][3])/2)>int(810/1920*w) and ((OldCarBox[i][1] + OldCarBox[i][3])/2) int(850/1080*h))): 597 | MinImage=Image.open(os.path.join(folderPath,"Pictures/3.png")) 598 | MinImage=MinImage.convert("RGBA") 599 | MinImage=MinImage.resize((int(650/1920*w),int(150/1080*h)),Image.ANTIALIAS) 600 | ### MinImage=MinImage.resize((1100,650),Image.ANTIALIAS) 601 | TransparantImage.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*0.2),int(OldCarBox[i][2])),MinImage) 602 | ### image_copy.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*0.7),int(OldCarBox[i][2])-int((OldCarBox[i][2]-OldCarBox[i][0])*0.7)),MinImage) 603 | Sign_Or_Min=True 604 | elif(((OldCarBox[i][1] + OldCarBox[i][3])/2)>int(810/1920*w) and ((OldCarBox[i][1] + OldCarBox[i][3])/2) int(800/1080*h))): 605 | 606 | MinImage=Image.open(os.path.join(folderPath,"Pictures/4.png")) 607 | MinImage=MinImage.convert("RGBA") 608 | MinImage=MinImage.resize((int(680/1920*w),int(200/1080*h),Image.ANTIALIAS)) 609 | ### MinImage=MinImage.resize((880,400),Image.ANTIALIAS) 610 | 611 | TransparantImage.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*1.1),int(OldCarBox[i][2])),MinImage) 612 | ### image_copy.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*1.5),int(OldCarBox[i][2])-int((OldCarBox[i][2]-OldCarBox[i][0])*1.35)),MinImage) 613 | Sign_Or_Min=True 614 | elif(((OldCarBox[i][1] + OldCarBox[i][3])/2)>int(810/1920*w) and ((OldCarBox[i][1] + OldCarBox[i][3])/2) int(900/1080*h))): 615 | 616 | MinImage=Image.open(os.path.join(folderPath,"Pictures/2.png")) 617 | MinImage=MinImage.convert("RGBA") 618 | MinImage=MinImage.resize((int(700/1920*w),int(100/1080*h)),Image.ANTIALIAS) 619 | ### MinImage=MinImage.resize((1300,550),Image.ANTIALIAS) 620 | TransparantImage.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*0.18),int(OldCarBox[i][2])),MinImage) 621 | Sign_Or_Min=True 622 | ### image_copy.paste(MinImage, (int(OldCarBox[i][1])-int((OldCarBox[i][3]-OldCarBox[i][1])*0.85),int(OldCarBox[i][2])-int((OldCarBox[i][2]-OldCarBox[i][0])*.6)),MinImage)####################################################################################### 623 | 624 | ############################################################################################# 625 | ################################## arrow on the ground and its message ###################### 626 | ############################################################################################# 627 | if(IsDirection and Sign_Or_Min==False and NavigationFlag): 628 | if((direction == "left" or direction == "uturnl") and distance<31 and distance!=-1): # deh bs l7ad mnzabt kol al arrows 629 | direction = "left" 630 | position = (int(570/1920*w), int(730/1080*h)) #position to diplay the arrow 631 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/call_ImageCounter2.png")) 632 | TransparantImage.paste(Noti_Image, (int(720/1920*w),int(27/1080*h)),Noti_Image) # hna kda al arrow at7at 3la al sora :D 633 | d.text((int(830/1920*w), int(55/1080*h)),"{}M".format(distance),(255,255,255),font=font) 634 | elif((direction == "right" or direction == "uturnr") and distance<31 and distance!=-1): # deh bs l7ad mnzabt kol al arrows 635 | direction = "right" 636 | position = (int(900/1920*w), int(750/1080*h)) #position to diplay the arrow 637 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/call_ImageCounter2.png")) 638 | TransparantImage.paste(Noti_Image, (int(720/1920*w),int(27/1080*h)),Noti_Image) # hna kda al arrow at7at 3la al sora :D 639 | d.text((int(790/1920*w), int(55/1080*h)),"{}M".format(distance),(255,255,255),font=font) 640 | else: 641 | direction="go" 642 | ### bta3 re7ab position = (150, 600) #position to diplay the arrow 643 | position = (int(600/1920*w), int(450/1080*h)) #position to diplay the arrow 644 | ArrowImage = Image.open(os.path.join(folderPath,"PhotoShop/{}.png".format(direction))) # open the arrow of the given direction 645 | 646 | ### bta3 re7ab ArrowImage=ArrowImage.rotate(8) 647 | ArrowImage=ArrowImage.convert("RGBA") 648 | 649 | TransparantImage.paste(ArrowImage, position,ArrowImage) # hna kda al arrow at7at 3la al sora :D 650 | 651 | # d.text((200, 920),"{}M to go {}".format(distance,direction),(255,255,255),font=font) 652 | ####################################### noti for next route ################################# 653 | ############################################################################################# 654 | if(NextRouteDirection!=None and NavigationFlag): # if we have next route direction in navigation 655 | 656 | if First_Time==True: # navigation started, then notification box start sliding 657 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/upper1.png")) 658 | """ 659 | if save==True: # resize and save if save mode 660 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 661 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) 662 | elif Try==True: # only resize if try mode 663 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 664 | """ 665 | 666 | """crop a slide from notification box to be displayed with increasing the cropped size each time till the end of box then display the text notification""" 667 | crop_rectangle = (int(2/1920*w), 0, index1, int(240/1080*h)) 668 | index1+=int(20/1920*w) 669 | cropped_im = Noti_Image.crop(crop_rectangle) 670 | TransparantImage.paste(cropped_im, (int(10/1920*w),0),cropped_im) 671 | if index1 > int(770/1920*w): 672 | First_Time=False 673 | else: # sliding notification box finished, so pasting full notification box 674 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/upper1.png")) 675 | """ 676 | if save==True: 677 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 678 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) 679 | elif Try==True: 680 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 681 | """ 682 | 683 | TransparantImage.paste(Noti_Image, (int(10/1920*w),0),Noti_Image) 684 | index1=int(800/1920*w) 685 | if arrived: # final destination, importing destination image 686 | DestImage=Image.open(os.path.join(folderPath,"arrows/Destination.png")) 687 | """ 688 | if save==True: 689 | DestImage=DestImage.resize((int(130/1920*w),int(100/1080*h)),Image.ANTIALIAS) 690 | DestImage.save(os.path.join(folderPath,"arrows/Destination.png")) 691 | elif Try==True: 692 | DestImage=DestImage.resize((int(130/1920*w),int(100/1080*h)),Image.ANTIALIAS) 693 | """ 694 | 695 | TransparantImage.paste(DestImage, (0,int(20/1080*h)),DestImage) 696 | if index1>int(770/1920*h): # pasting text that end with "..." if length of text bigger than length of box 697 | if(len(next_Route)>length_of_box): 698 | next_Route=Animation(name=next_Route,IsCall=IsCalling,Ring=Ring,Route=True) 699 | d.text((int(280/1920*w),int(50/1080*h)), next_Route, font=font, fill=(255,255,255,255)) 700 | else: 701 | d.text((int(280/1920*w),int(50/1080*h)), next_Route, font=font, fill=(255,255,255,255)) 702 | 703 | else: # not arrived yet, use next route direction 704 | output = Image.open(os.path.join(folderPath,'arrows/{}2.png'.format(NextRouteDirection))) 705 | """ 706 | if save==True: 707 | output=output.resize((int(80/1920*w),int(80/1080*h)),Image.ANTIALIAS) 708 | output.save(os.path.join(folderPath,'arrows/{}2.png'.format(NextRouteDirection))) 709 | elif Try==True: 710 | output=output.resize((int(80/1920*w),int(80/1080*h)),Image.ANTIALIAS) 711 | """ 712 | 713 | TransparantImage.paste(output, (int(25/1920*w),int(31/1080*h)),output) 714 | if index1>int(770/1920*w): # pasting text that end with "..." if length of text bigger than length of box 715 | if(len(next_Route)>length_of_box): 716 | next_Route=Animation(name=next_Route,IsCall=IsCalling,Ring=Ring,Route=True) 717 | d.text((int(280/1920*w),int(50/1080*h)), next_Route, font=font, fill='white') 718 | else: 719 | d.text((int(280/1920*w),int(50/1080*h)), next_Route, font=font, fill='white') 720 | 721 | ############################################################################################## 722 | ################################ noti for mobile data ######################################## 723 | ############################################################################################## 724 | if(MobileName!=None): # there is a mobile notification to be displayed 725 | if(IsCalling): # there is a comming call 726 | 727 | # first time to get this notification, then initialize the index of cropped notification and set state to call state 728 | if (switch_flag==0 or switch_flag==1): 729 | index2=int(110/1920*w) 730 | switch_flag=2 731 | name_counter=-1 732 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/upper1.png")) 733 | """ 734 | if save==True: 735 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 736 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) 737 | elif Try==True: 738 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 739 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) ###################### 740 | print("Mobile name 744") 741 | """ 742 | # display the cropped part of notification box 743 | crop_rectangle = (int(2/1920*w), 0, index2, int(240/1080*h)) 744 | cropped_im = Noti_Image.crop(crop_rectangle) 745 | TransparantImage.paste(cropped_im, (int(10/1920*w),int(110/1080*h)),cropped_im) 746 | im = Image.open(os.path.join(folderPath,"Pictures/Unknown_Caller1.png")) 747 | """ 748 | if save==True: 749 | im=im.resize((int(90/1920*w),int(90/1080*h)),Image.ANTIALIAS) 750 | im.save(os.path.join(folderPath,"Pictures/Unknown_Caller1.png")) 751 | elif Try==True: 752 | im=im.resize((int(90/1920*w),int(90/1080*h)),Image.ANTIALIAS) ############################# 753 | """ 754 | TransparantImage.paste(im, (int(22/1920*w),int(138/1080*h)),im) 755 | 756 | ############################################################################################# 757 | ############################# if user answered .. start counting time ####################### 758 | ############################################################################################# 759 | if last_answered==True: # call is on and continue counting 760 | now = datetime.now() 761 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/call_ImageCounter2.png")) 762 | """ 763 | if save==True: 764 | Noti_Image=Noti_Image.resize((int(300/1920*w),int(100/1080*h)),Image.ANTIALIAS) 765 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/call_ImageCounter2.png")) 766 | elif Try==True: 767 | Noti_Image=Noti_Image.resize((int(300/1920*w),int(100/1080*h)),Image.ANTIALIAS) 768 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/call_ImageCounter2.png")) ################# 769 | print("call_ImageCounter2 773") 770 | """ 771 | TransparantImage.paste(Noti_Image, (int(720/1920*w),int(137/1080*h)),Noti_Image) 772 | minutes, seconds = divmod(((now - start).total_seconds()), 59) 773 | d.text((int(790/1920*w),int(163/1080*h)), "%02d:%02d" % (minutes, round(seconds)), font=font, fill='white') 774 | Ring=False 775 | 776 | elif answered==True: # first time to answer the call, get initial time for answering 777 | last_answered=True 778 | start = datetime.now() 779 | else: 780 | Ring=True 781 | 782 | ############################################################################################# 783 | ################################ flash the green call image ################################# 784 | ############################################################################################# 785 | # flash the displaying of call image in ringing state but it's always displayed after answering 786 | if CallImage_counter>=20 or answered== True: 787 | CallImage=Image.open(os.path.join(folderPath,"Pictures/phone_call3.png")) 788 | """ 789 | if save==True: 790 | CallImage=CallImage.resize((int(40/1920*w),int(40/1080*h)),Image.ANTIALIAS) 791 | CallImage.save(os.path.join(folderPath,"Pictures/phone_call3.png")) 792 | elif Try==True: 793 | CallImage=CallImage.resize((int(40/1920*w),int(40/1080*h)),Image.ANTIALIAS) 794 | CallImage.save(os.path.join(folderPath,"Pictures/phone_call3.png")) ############################# 795 | print("phone_call3 798") 796 | """ 797 | TransparantImage.paste(CallImage, (int(10/1920*w),int(120/1080*h)),CallImage) 798 | if CallImage_counter>=40: 799 | CallImage_counter=0 800 | CallImage_counter+=1 801 | 802 | ############################################################################################# 803 | #################################### song is playing ######################################## 804 | ############################################################################################# 805 | elif MusicFlag==True: # initialize call couter and last answer flags for next calling 806 | last_answered=False 807 | CallImage_counter=0 808 | 809 | # first time to get this notification, then initialize the index of cropped notification and set state to media state 810 | if (switch_flag==0 or switch_flag==2): 811 | index2=int(110/1920*w) 812 | switch_flag=1 813 | name_counter=-1 814 | 815 | Noti_Image=Image.open(os.path.join(folderPath,"PhotoShop/upper1.png")) 816 | """ 817 | if save==True: 818 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 819 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) 820 | elif Try==True: 821 | Noti_Image=Noti_Image.resize((int(772/1920*w),int(240/1080*h)),Image.ANTIALIAS) 822 | Noti_Image.save(os.path.join(folderPath,"PhotoShop/upper1.png")) ######################### 823 | print("upper1 826") 824 | """ 825 | # display the cropped part of notification box 826 | crop_rectangle = (int(2/1920*w), 0, index2, int(240/1080*h)) 827 | cropped_im = Noti_Image.crop(crop_rectangle) 828 | TransparantImage.paste(cropped_im, (int(10/1920*w),int(110/1080*h)),cropped_im) 829 | im = Image.open("AR/Pictures/{}.png".format(music_state)) 830 | """ 831 | if save==True: 832 | im=im.resize((int(85/1920*w),int(85/1080*h)),Image.ANTIALIAS) 833 | im.save("AR/Pictures/{}.png".format(music_state)) 834 | elif Try==True: 835 | im=im.resize((int(85/1920*w),int(85/1080*h)),Image.ANTIALIAS) 836 | im.save("AR/Pictures/{}.png".format(music_state)) #################### 837 | print("music_state 840") 838 | """ 839 | TransparantImage.paste(im, (int(21/1920*w),int(138/1080*h)),im) 840 | 841 | # display notification text after finishing sliding notification box 842 | if index2>=int(770/1920*w): 843 | name=Animation(name=MobileName,IsCall=IsCalling,Ring=Ring) 844 | d.text((int(280/1920*w),int(160/1080*h)), name, font=font, fill='white') 845 | index2+=int(20/1920*w) 846 | # no notification from mobil to be displayed, initialize switch flag, last state of answering a call and call counter 847 | else: 848 | last_answered=False 849 | CallImage_counter=0 850 | switch_flag=0 851 | 852 | ############################################################################################# 853 | ################################## speedometer blue logo #################################### 854 | ############################################################################################# 855 | if (SpeedFlag): 856 | SP_Image=Image.open(os.path.join(folderPath,"PhotoShop/c4.png")) 857 | """ 858 | if save==True: 859 | SP_Image=SP_Image.resize((int(280/1920*w),int(295/1080*h)),Image.ANTIALIAS) 860 | SP_Image.save(os.path.join(folderPath,"PhotoShop/c4.png")) 861 | elif Try==True: 862 | SP_Image=SP_Image.resize((int(280/1920*w),int(295/1080*h)),Image.ANTIALIAS) 863 | SP_Image.save(os.path.join(folderPath,"PhotoShop/c4.png")) ###################### 864 | print("c4 866") 865 | """ 866 | TransparantImage.paste(SP_Image, (int(190/1920*w),int(785/1080*h)),SP_Image) 867 | d.text((int(300/1920*w),int(900/1080*h)), "{}".format(cat_sp), font=font1, fill=(255,255,255,255)) 868 | 869 | ############################################################################################# 870 | ################################# Speedometer image ######################################### 871 | ############################################################################################# 872 | if cat_sp>=6: 873 | cat_sp-=6 874 | else: 875 | cat_sp=0 876 | SpeedoImage=Image.open(os.path.join(folderPath,"cropped/frame_{}.png".format(cat_sp))) 877 | SpeedoImage=SpeedoImage.convert("RGBA") 878 | """ 879 | if save==True: 880 | SpeedoImage=SpeedoImage.resize((int(274/1920*w),int(274/1080*h)),Image.ANTIALIAS) 881 | SpeedoImage.save(os.path.join(folderPath,"cropped/frame_{}.png".format(cat_sp))) 882 | elif Try==True: 883 | SpeedoImage=SpeedoImage.resize((int(274/1920*w),int(274/1080*h)),Image.ANTIALIAS) 884 | SpeedoImage.save(os.path.join(folderPath,"cropped/frame_{}.png".format(cat_sp))) ################### 885 | print("cat_sp") 886 | """ 887 | TransparantImage.paste(SpeedoImage, (int(193/1920*w),int(780/1080*h)),SpeedoImage) 888 | 889 | ############################################################################################# 890 | ################################### fuel and temp image ##################################### 891 | ############################################################################################# 892 | if (TempFlag and FuelFlag): 893 | FuelImage=Image.open(os.path.join(folderPath,"cropped/FUEL_1.png")) 894 | FuelImage=FuelImage.convert("RGBA") 895 | """ 896 | if save==True: 897 | FuelImage=FuelImage.resize((int(120/1920*w),int(180/1080*h)),Image.ANTIALIAS) 898 | FuelImage.save(os.path.join(folderPath,"cropped/FUEL_1.png")) 899 | elif Try==True: 900 | FuelImage=FuelImage.resize((int(120/1920*w),int(180/1080*h)),Image.ANTIALIAS) 901 | FuelImage.save(os.path.join(folderPath,"cropped/FUEL_1.png")) ########################### 902 | print("FUEL_1 898") 903 | """ 904 | TransparantImage.paste(FuelImage, (int(60/1920*w),int(850/1080*h)),FuelImage) 905 | 906 | # update the gloabal server transparent image 907 | T_server=TransparantImage 908 | 909 | 910 | # print(startingPoint," ",destinationPoint," ",currentLocation) 911 | # print("\n\n") 912 | 913 | 914 | 915 | """ 916 | ############################################################################################### 917 | ##################################### Write Buffer ############################################ 918 | ############################################################################################### 919 | """ 920 | def WriteBuffer(out_Boxes,out_classes,classes_to_be_shown): 921 | """ 922 | this function split frame into 3 ranges and set the data[signFlag, signType,carFlag, carBox, pedFalg, pedBox] in the global buffer due to its range sothat there is only one detected car or pedstrian in each range to be diplayed and one traffic sign message to be shown for a specific time. 923 | 924 | Parameters: 925 | ----------- 926 | out_Boxes: list of arrays 927 | each array contain [x1,y1,x2,y2] of a specific object 928 | 929 | out_classes: list 930 | classes of objects that have been detected 931 | 932 | classes_to_be_shown: list 933 | averaged traffic sign classes to be displayed in the frame 934 | 935 | Return: 936 | ------- 937 | None 938 | """ 939 | 940 | global OldPosCar,OldPosPed,OldCarBoxDim,OldpedBoxDim,ChangeCar,ChangePed,CarCounter,PedCounter,bottom_max 941 | global Idle,SleepTime,DisplayedSignType,MaxTime,NumberOfSigns 942 | y=416/h 943 | x=416/w 944 | Range=-1 945 | carFlag=False 946 | pedFlag=False 947 | signFlag=False 948 | 949 | clip=0.35 950 | 951 | carBox=[(),(),()] 952 | pedBox=[(),(),()] 953 | 954 | signType=-1 955 | bottom_max=[-1,-1,10000000] 956 | 957 | NewBoxDim=[[],[],[]] 958 | NewBoxDimPed=[[],[],[]] 959 | 960 | for i,className in enumerate(out_classes): 961 | top, left, bottom, right = out_Boxes[i] 962 | top = max(0, np.floor(top + 0.5).astype('int32')) 963 | left = max(0, np.floor(left + 0.5).astype('int32')) 964 | bottom = min(416, np.floor(bottom + 0.5).astype('int32')) 965 | right = min(416, np.floor(right + 0.5).astype('int32')) 966 | 967 | centerY=int((((bottom-top)/2)+top)/y) 968 | centerX=int((((left-right)/2)+right)/x) 969 | 970 | # if ((right-left <=60) and (bottom-top <=60)):continue 971 | 972 | top=top/y 973 | left=left/x 974 | bottom=bottom/y 975 | right=right/x 976 | 977 | 978 | 979 | if(centerX <(int(1920/3))): 980 | Range=0 981 | elif(centerX>(int(1920/3)) and centerX<2*(int(1920/3))): 982 | Range=1 983 | elif(centerX > 2*(int(1920/3))): 984 | Range=2 985 | 986 | if className == 0: 987 | carFlag=True 988 | if (Range==1): 989 | if bottom > bottom_max[Range]: 990 | carBox[Range]=(centerX,centerY) 991 | NewBoxDim[Range]=[top, left, bottom, right] 992 | bottom_max[Range]=bottom 993 | elif (Range==0): 994 | if right > bottom_max[Range]: 995 | carBox[Range]=(centerX,centerY) 996 | NewBoxDim[Range]=[top, left, bottom, right] 997 | bottom_max[Range]=right 998 | elif (Range==2): 999 | if left < bottom_max[Range]: 1000 | carBox[Range]=(centerX,centerY) 1001 | NewBoxDim[Range]=[top, left, bottom, right] 1002 | bottom_max[Range]=left 1003 | 1004 | elif className == 6: 1005 | pedFlag=True 1006 | if (Range==1): 1007 | if bottom > bottom_max[Range]: 1008 | pedBox[Range]=(centerX,centerY) 1009 | NewBoxDimPed[Range]=[top, left, bottom, right] 1010 | bottom_max[Range]=bottom 1011 | if (Range==0): 1012 | if right > bottom_max[Range]: 1013 | pedBox[Range]=(centerX,centerY) 1014 | NewBoxDimPed[Range]=[top, left, bottom, right] 1015 | bottom_max[Range]=right 1016 | if (Range==2): 1017 | if left < bottom_max[Range]: 1018 | pedBox[Range]=(centerX,centerY) 1019 | NewBoxDimPed[Range]=[top, left, bottom, right] 1020 | bottom_max[Range]=left 1021 | 1022 | # if bottom > bottom_max[Range]: 1023 | # pedBox[Range]=(centerX,centerY) 1024 | # NewBoxDimPed[Range]=[top, left, bottom, right] 1025 | # bottom_max[Range]=bottom 1026 | ###################################################################################### 1027 | for i in range (3): 1028 | if(OldPosCar[i]== () or carBox[i]==()): 1029 | CarCounter[i]=CarCounter[i]+1 1030 | if (CarCounter[i]==10 or OldPosCar[i]== () ): 1031 | CarCounter[i]=0 1032 | OldCarBoxDim[i]=NewBoxDim[i] 1033 | OldPosCar[i]=carBox[i] 1034 | else: 1035 | if(carBox[i][0]<(OldCarBoxDim[i][1]+clip*(OldCarBoxDim[i][3]-OldCarBoxDim[i][1])) or carBox[i][0]>(OldCarBoxDim[i][3]-clip*(OldCarBoxDim[i][3]-OldCarBoxDim[i][1])) or carBox[i][1]<(OldCarBoxDim[i][0]+clip*(OldCarBoxDim[i][2]-OldCarBoxDim[i][0])) or carBox[i][1]>(OldCarBoxDim[i][2]-clip*(OldCarBoxDim[i][2]-OldCarBoxDim[i][0])) ): 1036 | OldCarBoxDim[i]=NewBoxDim[i] 1037 | OldPosCar[i]=carBox[i] 1038 | CarCounter[i]=0 1039 | 1040 | # if (OldPosCar[1] !=() and OldPosCar[0] !=()): 1041 | # if (((OldPosCar[1][0] - OldPosCar[0][0] ) < 120)): OldPosCar[1]=() 1042 | # if (OldPosCar[2] !=() and OldPosCar[1] !=()): 1043 | # if (((OldPosCar[2][0] - OldPosCar[1][0] ) < 120)): OldPosCar[1]=() 1044 | 1045 | for i in range (3): 1046 | if(OldPosPed[i]== () or pedBox[i]==()): 1047 | PedCounter[i]=PedCounter[i]+1 1048 | if (PedCounter[i]==0 or OldPosPed[i]== () ): 1049 | PedCounter[i]=0 1050 | OldpedBoxDim[i]=NewBoxDimPed[i] 1051 | OldPosPed[i]=pedBox[i] 1052 | else: 1053 | if(pedBox[i][0]<(OldpedBoxDim[i][1]+clip*(OldpedBoxDim[i][3]-OldpedBoxDim[i][1])) or pedBox[i][0]>(OldpedBoxDim[i][3]-clip*(OldpedBoxDim[i][3]-OldpedBoxDim[i][1])) or pedBox[i][1]<(OldpedBoxDim[i][0]+clip*(OldpedBoxDim[i][2]-OldpedBoxDim[i][0])) or pedBox[i][1]>(OldpedBoxDim[i][2]-clip*(OldpedBoxDim[i][2]-OldpedBoxDim[i][0])) ): 1054 | OldpedBoxDim[i]=NewBoxDimPed[i] 1055 | OldPosPed[i]=pedBox[i] 1056 | PedCounter[i]=0 1057 | 1058 | ########################## to choose one traffic Sign##################################### 1059 | 1060 | NumberOfDetectedsigns=len(classes_to_be_shown) 1061 | if(SleepTime!=MaxTime): 1062 | if(NumberOfSigns==2): 1063 | signFlag=True 1064 | signType=DisplayedSignType 1065 | SleepTime+=1 1066 | elif(NumberOfSigns==1 and NumberOfDetectedsigns>=1): 1067 | for i,className in enumerate(classes_to_be_shown): 1068 | if (className!=DisplayedSignType and className!=0 and className!=6 ): 1069 | NumberOfSigns=2 1070 | SleepTime=-1 1071 | if(DisplayedSignType < className): 1072 | DisplayedSignType = DisplayedSignType*10+className 1073 | 1074 | elif(DisplayedSignType > className): 1075 | DisplayedSignType = DisplayedSignType+className*10 1076 | signFlag=True 1077 | SleepTime+=1 1078 | signType=DisplayedSignType 1079 | 1080 | elif(NumberOfSigns==1 and NumberOfDetectedsigns==0): 1081 | signFlag=True 1082 | signType=DisplayedSignType 1083 | SleepTime+=1 1084 | 1085 | elif(NumberOfSigns==0 and NumberOfDetectedsigns>=1): 1086 | for i,className in enumerate(classes_to_be_shown): 1087 | if(NumberOfSigns<2 and className!=0 and className!=6): 1088 | NumberOfSigns+=1 1089 | if DisplayedSignType==0: 1090 | DisplayedSignType=className 1091 | signFlag=True 1092 | 1093 | elif(DisplayedSignType < className): 1094 | DisplayedSignType = DisplayedSignType*10+className 1095 | 1096 | elif(DisplayedSignType > className): 1097 | DisplayedSignType = DisplayedSignType+className*10 1098 | 1099 | signType=DisplayedSignType 1100 | 1101 | 1102 | else: 1103 | SleepTime=0 1104 | DisplayedSignType=0 1105 | signFlag=False 1106 | NumberOfSigns=0 1107 | # print(OldPosCar) 1108 | # print(pedFlag) 1109 | # print(OldPosPed) 1110 | # update buffer data 1111 | buffer[6]=signFlag 1112 | buffer[7]=signType 1113 | buffer[8]=carFlag 1114 | buffer[9]=OldPosCar 1115 | buffer[10]=pedFlag 1116 | buffer[11]=OldPosPed 1117 | buffer[12]=OldCarBoxDim 1118 | 1119 | """ 1120 | ########################################################################################## 1121 | ################################## Create_Model Function ################################# 1122 | ########################################################################################## 1123 | """ 1124 | def create_model(anchors, class_names, load_pretrained=True): 1125 | ''' 1126 | returns the body of the model and the model 1127 | 1128 | # Params: 1129 | load_pretrained: whether or not to load the pretrained model or initialize all weights 1130 | 1131 | # Returns: 1132 | model_body: YOLOv2 with new output layer 1133 | model: YOLOv2 with custom loss Lambda layer 1134 | ''' 1135 | 1136 | detectors_mask_shape = (13, 13, 5, 1) 1137 | matching_boxes_shape = (13, 13, 5, 5) 1138 | 1139 | # Create model input layers. 1140 | image_input = Input(shape=(416, 416, 3)) 1141 | boxes_input = Input(shape=(None, 5)) #true label 1142 | detectors_mask_input = Input(shape=detectors_mask_shape) # a return from get_detectors function 1143 | matching_boxes_input = Input(shape=matching_boxes_shape) # a return from get_detectors function 1144 | 1145 | # Create model body. 1146 | # Note:Model here is created without last layer as we will train last layer again every time we have new dataset. 1147 | yolo_model = yolo_body(image_input, len(anchors), len(class_names)) 1148 | 1149 | """ 1150 | The next line creates model between input layer and the layer before the last layer but how it creates the model with only knowing the first layer and last layer of model ? 1151 | each layer has a pointer to the preeceding layer so it creates model from the last layer going upwards to the first layer through pointers 1152 | """ 1153 | topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output) 1154 | 1155 | if load_pretrained: 1156 | # Save topless yolo: 1157 | # Saving occurs only once to save model .without last layer. 1158 | #every time after this time we saved in, we will only load data. 1159 | topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5') #get path of topless_yolo 1160 | if not os.path.exists(topless_yolo_path): # ask if topless_yolo exists ? if exists then only load it ,if not exists then enter the if conditional to load the full yolo model 1161 | print("CREATING TOPLESS WEIGHTS FILE") #so that you can extract the topless_yolo model. 1162 | yolo_path = os.path.join('model_data', 'yolo.h5') #yolo.h5 1163 | model_body = load_model(yolo_path) 1164 | model_body = Model(model_body.inputs, model_body.layers[-2].output) 1165 | model_body.save_weights(topless_yolo_path) 1166 | topless_yolo.load_weights(topless_yolo_path) 1167 | 1168 | final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output) 1169 | 1170 | 1171 | model_body = Model(image_input, final_layer) 1172 | 1173 | # Place model loss on CPU to reduce GPU memory usage. 1174 | """ 1175 | model_loss is a layer to represent the loss of our model 1176 | so it feeds the final layer outputs to yolo_loss function to calculate loss and then passes it to Model func. to generate 1177 | new model from the input to the last layer which is now the loss layer. 1178 | """ 1179 | 1180 | # with tf.device('/gpu:0'): 1181 | model_loss = Lambda(yolo_loss,output_shape=(1, ),name='yolo_loss', 1182 | arguments={'anchors': anchors, 1183 | 'num_classes': len(class_names)})([ 1184 | model_body.output, boxes_input, 1185 | detectors_mask_input, matching_boxes_input 1186 | ]) 1187 | 1188 | model = Model([model_body.input, boxes_input, detectors_mask_input,matching_boxes_input], model_loss) 1189 | 1190 | return model_body, model 1191 | 1192 | """ 1193 | ########################################################################################## 1194 | ################################### Get_Classes Function ################################# 1195 | ########################################################################################## 1196 | """ 1197 | def get_classes(classes_path): 1198 | '''loads the classes''' 1199 | with open(classes_path) as f: 1200 | class_names = f.readlines() 1201 | class_names = [c.strip() for c in class_names] 1202 | return class_names 1203 | 1204 | """ 1205 | ########################################################################################## 1206 | ################################## Process_Data Function ################################# 1207 | ########################################################################################## 1208 | """ 1209 | def process_data(images, boxes=None): 1210 | '''processes the data''' 1211 | images = [PIL.Image.fromarray(i) for i in images] 1212 | orig_size = np.array([images[0].width, images[0].height]) 1213 | orig_size = np.expand_dims(orig_size, axis=0) 1214 | 1215 | # Image preprocessing. 1216 | processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images] 1217 | processed_images = [np.array(image, dtype=np.float) for image in processed_images] 1218 | processed_images = [image/255. for image in processed_images] 1219 | 1220 | if boxes is not None: 1221 | # Box preprocessing. 1222 | # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max. 1223 | boxes = [box.reshape((-1, 5)) for box in boxes] 1224 | 1225 | # Get box parameters as x_center, y_center, box_width, box_height, class. 1226 | boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes] 1227 | boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes] 1228 | boxes_xy = [boxxy / orig_size for boxxy in boxes_xy] 1229 | boxes_wh = [boxwh / orig_size for boxwh in boxes_wh] 1230 | 1231 | boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)] 1232 | 1233 | # find the max number of boxes 1234 | max_boxes = 0 1235 | for boxz in boxes: 1236 | if boxz.shape[0] > max_boxes: 1237 | max_boxes = boxz.shape[0] 1238 | 1239 | # add zero pad for training 1240 | for i, boxz in enumerate(boxes): 1241 | if boxz.shape[0] < max_boxes: 1242 | zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32) 1243 | boxes[i] = np.vstack((boxz, zero_padding)) 1244 | 1245 | return np.array(processed_images), np.array(boxes) 1246 | else: 1247 | return np.array(processed_images) 1248 | 1249 | """ 1250 | ########################################################################################## 1251 | ################################### Model_Body_Processing Function ####################### 1252 | ########################################################################################## 1253 | """ 1254 | def model_body_processing(model_body, class_names, anchors): 1255 | ''' 1256 | function to be called once for loading weights and preparing the boxes,scores and classes 1257 | according to anchor boxes values,score threshold and iou threshold. 1258 | This is evaluated by non_max_suppression function. 1259 | ''' 1260 | global input_image_shape 1261 | global w,h 1262 | 1263 | yolo_outputs = yolo_head(model_body.output, anchors, len(class_names)) 1264 | boxes, scores, classes = yolo_eval(yolo_outputs, input_image_shape, score_threshold=0.5, iou_threshold=0.5) 1265 | return boxes,scores,classes 1266 | 1267 | """ 1268 | ########################################################################################## 1269 | ###################################### GetFrames Function ################################ 1270 | ########################################################################################## 1271 | """ 1272 | def GetFrames(): 1273 | ''' 1274 | Function to be run on separate thread to get frames from the camera and store them in global 1275 | frames_captured. 1276 | time.sleep is used to determine number of frames taken per second 1277 | [example if(Processing time =0.1) then fps=1/0.1=10 frames per second] 1278 | Processing time variable is equivalent to processing time taken to get and draw bounding box plus 1279 | an offset (tolerance). 1280 | ''' 1281 | global ProcessingTime,frames_captured,ret,DisplayQueue,im,video,im2,GetFramesFlag 1282 | 1283 | while True: 1284 | time.sleep(1/35) 1285 | ret, im = video.read() 1286 | im=cv2.cvtColor((im), cv2.COLOR_RGB2BGR) 1287 | tempImage=np.asarray(im) 1288 | tempImage=np.expand_dims(im,axis=0) 1289 | tempImage=process_data(tempImage) 1290 | tempImage= Image.fromarray(np.floor(tempImage[0] * 255 + 0.5).astype('uint8')) 1291 | tempImage=tempImage.resize((w,h),Image.ANTIALIAS) 1292 | im2 = tempImage.convert("RGBA") 1293 | GetFramesFlag=True 1294 | 1295 | """ 1296 | ########################################################################################## 1297 | ###################################### VideoDraw Function ################################ 1298 | ########################################################################################## 1299 | """ 1300 | def getNextRoute( ): 1301 | global destinationPoint,isNeedRerouting,currentLocation,startingPoint,buffer,distance2,text_to_speech_phrase,text_to_speech_flag 1302 | text_to_speech_phrase="idle@idle" 1303 | getMeOut = False; 1304 | currentLocation = startingPoint; 1305 | 1306 | while(True): 1307 | 1308 | time.sleep(1); 1309 | getMeOut = False 1310 | # print("START OF WHILE TRUEEEEEEEEEEEEEE") 1311 | if destinationPoint== "0.0%2c0.0" or startingPoint=="0.0%2c0.0" or currentLocation =="0.0%2c0.0": 1312 | continue 1313 | # print("################### : ", currentLocation) 1314 | if (isNeedRerouting): 1315 | isNeedRerouting = False 1316 | # print("###### calculate new route #######") 1317 | # try: 1318 | 1319 | requestData = getRequestSream(ip="maps.googleapis.com",port=443,page="maps/api/directions/json?origin={}&destination={}&" \ 1320 | "mode={}&key={}".format(startingPoint,destinationPoint,"driving",API_KEY)).decode('utf-8'); 1321 | jsonData = json.loads(requestData) 1322 | # print(jsonData) 1323 | # break 1324 | stepsCount = len(jsonData["routes"][0]["legs"][0]["steps"]); 1325 | # print("get step count.. ",stepsCount) 1326 | 1327 | # except: 1328 | # print("Error in get request stream in Navigation 1"); 1329 | # continue 1330 | 1331 | 1332 | 1333 | # try: 1334 | # if (isFirstTimeToRoute): 1335 | # isFirstTimeToRoute = False 1336 | # firstStepLat = jsonData["routes"][0]["legs"][0]["steps"][0]["start_location"]["lat"]; 1337 | # firstStepLong = jsonData["routes"][0]["legs"][0]["steps"][0]["start_location"]["lng"]; 1338 | # distanceToFirstStep = int(mpu.haversine_distance((float(currentLocation.split("%2c")[0]),float(currentLocation.split("%2c")[1])) , (firstStepLat,firstStepLong)) * 1000) 1339 | # print("get first distance",distanceToFirstStep) 1340 | # while (distanceToFirstStep > 1): 1341 | # distanceToFirstStep = int(mpu.haversine_distance((float(currentLocation.split("%2c")[0]),float(currentLocation.split("%2c")[1])) , (firstStepLat,firstStepLong)) * 1000) 1342 | # print("distance to first step= ",distanceToFirstStep) 1343 | # except: 1344 | # print("Error in first time"); 1345 | 1346 | 1347 | 1348 | 1349 | 1350 | buffer[1]="go" 1351 | for nxt in range(stepsCount): 1352 | # print("in for loop .. next =",nxt) 1353 | if (getMeOut): 1354 | 1355 | # print("in if Get me out") 1356 | break; 1357 | if (isNeedRerouting): 1358 | # input("in if need rerouting") 1359 | break 1360 | if destinationPoint== "0.0%2c0.0" or startingPoint=="0.0%2c0.0" or currentLocation =="0.0%2c0.0": 1361 | # input("in 1") 1362 | break 1363 | try: 1364 | # print(jsonData) 1365 | distance = jsonData["routes"][0]["legs"][0]["steps"][nxt]["distance"]["text"]; 1366 | destLat = jsonData["routes"][0]["legs"][0]["steps"][nxt]["end_location"]["lat"]; 1367 | destLong = jsonData["routes"][0]["legs"][0]["steps"][nxt]["end_location"]["lng"]; 1368 | isManeuver = str(jsonData["routes"][0]["legs"][0]["steps"][nxt]) 1369 | if ("maneuver" in isManeuver): 1370 | maneuver = isManeuver["maneuver"] 1371 | # print("maneuver ",maneuver) 1372 | else: 1373 | 1374 | 1375 | maneuver = 'null'; 1376 | # print("maneuver else ",maneuver) 1377 | except: 1378 | # print("Error in 2") 1379 | break 1380 | 1381 | try: 1382 | 1383 | instruction = regex.sub('<[^<]+?>', '#', jsonData["routes"][0]["legs"][0]["steps"][nxt]["html_instructions"]); 1384 | text=regex.sub('<[^<]+?>', ' ', jsonData["routes"][0]["legs"][0]["steps"][nxt]["html_instructions"]); 1385 | Result = instruction.split('#') 1386 | 1387 | left=["turn-slight-left","turn-sharp-left","turn-left","ramp-left","fork-left"] 1388 | right=["turn-slight-right","turn-sharp-right","turn-right","ramp-right","fork-right"] 1389 | go=["straight"] 1390 | uturn_left=[ "uturn-left","roundabout-left"] 1391 | uturn_right=[ "uturn-right","roundabout-right"] 1392 | if(maneuver in left): nextroutedirec="left" 1393 | if(maneuver in right): nextroutedirec="right" 1394 | if((maneuver in go) or maneuver=="null" ): nextroutedirec="go" 1395 | if(maneuver in uturn_left): nextroutedirec="uturnl" 1396 | if(maneuver in uturn_right): nextroutedirec="uturnr" 1397 | 1398 | #distance 1399 | buffer[4]=nextroutedirec #NextRouteDirection 1400 | buffer[5]=Result[3] #next_Route 1401 | temp=urllib.parse.quote_plus("direction@After {} {}".format(distance,text)) 1402 | except: 1403 | # print("Error in 3"); 1404 | break; 1405 | 1406 | if( urllib.parse.quote_plus(text_to_speech_phrase) != temp and TextToSpeechFlag==1): 1407 | # print("in 1406") 1408 | text_to_speech_phrase = "direction@After {} {}".format(distance,text) 1409 | try: 1410 | # print("get request stream") 1411 | getRequestSream(page="textToSpeech.php?data={}".format(text_to_speech_phrase)) 1412 | except: 1413 | # print("Error in get request stream Text to speech Navigation function"); 1414 | continue; 1415 | 1416 | 1417 | nextLocation = str(destLat) + "," + str(destLong); 1418 | distanceToNext = int(mpu.haversine_distance((float(currentLocation.split("%2c")[0]),float(currentLocation.split("%2c")[1])) , (destLat,destLong)) * 1000) 1419 | while (distanceToNext > 5): 1420 | # print(distanceToNext) 1421 | buffer[2]= distanceToNext 1422 | distanceToNext = int(mpu.haversine_distance((float(currentLocation.split("%2c")[0]),float(currentLocation.split("%2c")[1]) ), (destLat,destLong)) * 1000) 1423 | if destinationPoint== "0.0%2c0.0" or startingPoint=="0.0%2c0.0" or currentLocation =="0.0%2c0.0": 1424 | getMeOut = True 1425 | break; 1426 | try: 1427 | isNeedRerouting = checkRerouting(currentLocation,nextLocation); 1428 | except: 1429 | # print("########## 3") 1430 | break 1431 | # print(isNeedRerouting) 1432 | if (isNeedRerouting): 1433 | startingPoint = currentLocation; 1434 | getMeOut = True 1435 | break 1436 | 1437 | 1438 | 1439 | if (not isNeedRerouting and nxt==stepsCount): 1440 | # print(nxt," ",stepsCount) 1441 | buffer[3]=True 1442 | break 1443 | 1444 | def videoDraw(model_body, class_names, anchors): 1445 | ''' 1446 | Main function of drawing that controls all actions 1447 | ''' 1448 | global video,ProcessingTime,input_image_shape,frames_captured,DrawFrame,ret,im,stream 1449 | global w,h 1450 | input_image_shape = K.placeholder(shape=(2, )) 1451 | boxes,scores,classes=model_body_processing(model_body, class_names, anchors) #called once to avoid adding nodes to graph 1452 | 1453 | ########## CALIBRATION FOR SESS RUN AS FIRST RUN TAKES MUCH TIME ####### 1454 | calibrated=cv2.imread('Calibration.bmp') 1455 | calibrated=cv2.resize(calibrated,(416,416)) 1456 | calibrated = cv2.cvtColor(calibrated, cv2.COLOR_BGR2RGB) 1457 | calibrated = np.reshape(calibrated,(-1,416,416,3)) 1458 | draw(boxes,scores,classes,model_body, class_names, calibrated) 1459 | ######### END OF CALIBRATION ########### 1460 | 1461 | ##### start capturing from web_cam (frame is captured with size 416x416) #### 1462 | if stream==True: 1463 | video = cv2.VideoCapture('http://{}:8080/video'.format(webcamIP)) 1464 | 1465 | else: 1466 | video = cv2.VideoCapture(1) 1467 | video.set(3,h); 1468 | video.set(4,w); 1469 | 1470 | #### multithreading between : 1) server data transparent image thread 2)Displaying video after composition with AR transparent images 3) Capturing video frames #### 1471 | #pool=ThreadPool(processes=3) 1472 | #pool.apply_async(DrawDirection,[]) 1473 | #pool.apply_async(display_interval) 1474 | #pool.apply_async(GetFrames, []) 1475 | # pool.apply_async(getNextRoute) 1476 | 1477 | thread1 = Thread(target = DrawDirection) 1478 | thread2 = Thread(target = display_interval) 1479 | thread3 = Thread(target = GetFrames) 1480 | thread4 = Thread(target = getNextRoute) 1481 | thread1.start() 1482 | thread2.start() 1483 | thread3.start() 1484 | thread4.start() 1485 | while (True): 1486 | time.sleep(1/30); 1487 | if(ret): 1488 | frames_captured_p=np.asarray(im) #frames captured are passed to another temp variable (frames captured_p) 1489 | frames_captured_p=np.expand_dims(frames_captured_p, axis=0) 1490 | frames_captured_p=process_data(frames_captured_p)#pass frames captured to be preprocessed 1491 | draw(boxes,scores,classes,model_body, class_names, frames_captured_p[:]) #pass frames captured to draw function to add bounding boxes 1492 | """ 1493 | ########################################################################################## 1494 | ###################################### draw Function ##################################### 1495 | ########################################################################################## 1496 | """ 1497 | def draw(boxes,scores,classes,model_body, class_names,image_data): 1498 | ''' 1499 | this function apply processing to images, run session to produce boxes and classes of detected objects then draw boxes on frame in case of no AR mode or average classes, update buffer and update the global process transparent image in case of AR mode 1500 | 1501 | Parameters: 1502 | ----------- 1503 | ################ i don't remmeber them ! 1504 | 1505 | return: 1506 | ------- 1507 | None 1508 | ''' 1509 | global input_image_shape 1510 | global buffer,im,T_process,ret 1511 | global next_route,IsCalling,Mob_Name,ans,music_state,direc,arrived,speed 1512 | while ((not ret) and im !=None): 1513 | continue 1514 | sess = K.get_session() # TODO: Remove dependence on Tensorflow session. 1515 | 1516 | # run session, get boxes and classes 1517 | out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes], 1518 | feed_dict={ 1519 | model_body.input: image_data[:], 1520 | input_image_shape: [416, 416] 1521 | ,K.learning_phase():0 #testing phase 1522 | }) 1523 | 1524 | if AR_Mode: # get the averaged classes of traffic signs then update the global transparent process image 1525 | classes_to_be_shown=average_classes(out_classes) 1526 | WriteBuffer(out_boxes,out_classes,classes_to_be_shown) 1527 | DrawProcess() 1528 | else: # draw boxes on objects in the frame 1529 | image_with_boxes = draw_boxes(image_data[:][0], out_boxes, out_classes,class_names, out_scores) 1530 | 1531 | """ 1532 | ########################################################################################## 1533 | ###################################### display_interval Function ######################### 1534 | ########################################################################################## 1535 | """ 1536 | def display_interval(): 1537 | ''' 1538 | Function to be run on separate thread to display frames after composition with server transparent image and process transparent image 1539 | 1540 | Parameters: 1541 | ----------- 1542 | None 1543 | 1544 | Return: 1545 | ------- 1546 | None 1547 | ''' 1548 | global ret,T_server,T_process,im2 1549 | global w,h,GetFramesFlag 1550 | # initialize images 1551 | T_server=Image.new('RGBA',(w,h),(255,255,255,0)) 1552 | T_process=Image.new('RGBA',(w,h),(255,255,255,0)) 1553 | counter = 0; 1554 | while (True): 1555 | time.sleep(1/30) 1556 | if (GetFramesFlag==True): # CHECK IF FIRST FRAME IS CAPTURED 1557 | #hnnnnnnnnnnnnnnnnnnnnnnna cooooooooooode 1558 | # print(im2.shape,T_server.size) 1559 | out1 = Image.alpha_composite(im2, T_server) 1560 | out2= Image.alpha_composite(out1, T_process) 1561 | out2=cv2.cvtColor(np.array(out2), cv2.COLOR_RGB2BGR) 1562 | cv2.imwrite("images/{}.jpg".format(0),out2); 1563 | 1564 | #print("Time:",time.time()-w) 1565 | # cv2.imshow('r',(out2)) 1566 | # if cv2.waitKey(1) and 0xFF == ord('q'):break; 1567 | # cv2.destroyAllWindows() 1568 | 1569 | """ 1570 | ########################################################################################## 1571 | ################################ Average classes Function ################################ 1572 | ########################################################################################## 1573 | """ 1574 | def average_classes(out_classes): 1575 | """ 1576 | Average function to eliminate False alarms of traffic signs 1577 | 1578 | Parameters: 1579 | ----------- 1580 | out_classes: list 1581 | output classes of detection objects in the frame 1582 | """ 1583 | global begin,count_occurrence,frames_counted,number_of_frames_averaged,min_occurrence 1584 | show_class=[] 1585 | out_classes=list(np.array(out_classes)) 1586 | 1587 | for z in out_classes: 1588 | begin[z]=1 1589 | count_occurrence[z]= count_occurrence[z]+1 1590 | frames_counted=[frames_counted[f]+1 if t==1 else frames_counted[f] for f,t in enumerate(begin)] 1591 | frames_counted=list(np.asarray(frames_counted,dtype=np.int32)) 1592 | show_class=[show_class for show_class,x in enumerate(count_occurrence) if x > min_occurrence] 1593 | show_class=list(np.asarray(show_class,dtype=np.int32)) 1594 | 1595 | if (any(r==number_of_frames_averaged for r in frames_counted)): 1596 | indices_frames_counted=[] 1597 | indices_frames_counted_temp=list(np.nonzero(np.array(frames_counted) >= number_of_frames_averaged)) 1598 | for i in range (len(indices_frames_counted_temp[0])): 1599 | indices_frames_counted.append(indices_frames_counted_temp[0][i]) 1600 | 1601 | for z in indices_frames_counted: 1602 | frames_counted[z]=0 1603 | count_occurrence[z]=0 1604 | begin[z]=0 1605 | frames_counted=list(np.asarray(frames_counted,dtype=np.int32)) 1606 | 1607 | count_occurrence=list(np.asarray(count_occurrence,dtype=np.int32)) 1608 | begin=list(np.asarray(begin,dtype=np.int32)) 1609 | 1610 | return show_class 1611 | 1612 | 1613 | ########################################################################################## 1614 | ########################################################################################## 1615 | ########################################################################################## 1616 | ########################################################################################## 1617 | 1618 | """ 1619 | ########################################################################################## 1620 | ######################################### MAIN ########################################### 1621 | ########################################################################################## 1622 | """ 1623 | def videoDrawThread(): 1624 | class_names = get_classes(classes_path) # load the classes names 1625 | anchors = YOLO_ANCHORS 1626 | model_body, model = create_model(anchors, class_names) 1627 | model.load_weights(weights_file_path) 1628 | videoDraw(model_body,class_names,anchors) 1629 | 1630 | def _main(): 1631 | ##################################################### 1632 | #########ؤ############################################ 1633 | # s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 1634 | # s.connect(('8.8.8.8', 1)) # connect() for UDP doesn't send packets 1635 | # local_ip_address = s.getsockname()[0] 1636 | # 1637 | thread = Thread(target = videoDrawThread) 1638 | thread.start(); 1639 | app.run(host=VR_IP,port=2000,threaded=True, debug=True) 1640 | 1641 | 1642 | ##################################################### 1643 | #################################################### 1644 | 1645 | if __name__ == '__main__': 1646 | try: 1647 | _main() 1648 | except KeyboardInterrupt: #catch CTRL+C press 1649 | print(" ###### LIVE VIDEO RELEASED ######## ") 1650 | video.release() #release camera resources after pressing CTRL+C 1651 | get_ipython().magic('%reset -sf') #delete all variables after pressing CTRL+C 1652 | -------------------------------------------------------------------------------- /Main_Code.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Thu Jun 28 22:19:28 2018 5 | 6 | @author: EECE AR HUD TEAM 2018 7 | """ 8 | """ 9 | ##################################################################################### 10 | ###################################### IMPORT LIBRARIES ############################# 11 | ##################################################################################### 12 | """ 13 | from IPython import get_ipython # to be used as an interface with Ipython console 14 | import time 15 | import os,cv2 16 | from multiprocessing.pool import ThreadPool #to be used for Multithreading 17 | import numpy as np 18 | import PIL #to be used for preoprocessing of data 19 | from keras import backend as K 20 | from keras.layers import Input, Lambda, Conv2D 21 | from keras.models import load_model, Model 22 | from yad2k.models.keras_yolo import (yolo_body,yolo_eval, yolo_head, yolo_loss) 23 | from yad2k.utils.draw_boxes import draw_boxes 24 | from matplotlib import pyplot as plt 25 | import sys 26 | from AR.AR_semiFinal2 import DrawDirection, WriteBuffer,gauge,Cat_edit 27 | #import imutils 28 | from PIL import Image 29 | from queue import Queue #to be used for queuing to get rid of image after showing it in real live video 30 | import urllib.request 31 | import socket 32 | 33 | """ 34 | ##################################################################################### 35 | ###################################### GLOBAL VARIABLES ############################# 36 | ##################################################################################### 37 | """ 38 | ##################################### JUST FOR TESTING ############################## 39 | #next_route="Alhosary" 40 | #IsCalling=False 41 | #Mob_Name="el sood 3yonooo .. ya wlaa" 42 | #ans=False 43 | #music_state="play" 44 | #direc="right" 45 | #SignType=1 46 | #arrived=False 47 | #buffer=[False ,None, 0 , arrived , direc , next_route, IsCalling, Mob_Name, True, SignType, False, (0,0), False,(0,0), 50, 5, 10, ans, music_state] 48 | 49 | 50 | #next_route="" 51 | #IsCalling=False 52 | #Mob_Name=None 53 | #ans=False 54 | #music_state="play" 55 | #direc="right" 56 | #arrived=False 57 | #speed=1 58 | q=0 # counter used for testing in draw function 59 | 60 | ################################## MAIN VARIABLES #################################### 61 | 62 | ####### BUFFER FOR SHARING DATA BETWEEN AR, NAVIGATION AND DETECTION PARTS ############# 63 | buffer=[False,None, 0 , False, None , None, False, 0, False, (0,0), False,(0,0)] 64 | 65 | """ 66 | IsDirection=buffer[0]-->flag for the arrow on the ground 67 | direction=buffer[1]-->type of the arrow on the ground[go,left,right] 68 | distance=buffer[2]--> int for the distance of the given direction (print in a message below the arrow) 69 | arrived=buffer[3]--> Flag if arrived for final destination 70 | NextRouteDirection=buffer[4]-->string for navigation notifcation[first notification bar] if (=none) no navigation ,if ((up,right,left,uturn)) put the corrsponding image 71 | next_Route=buffer[5]-->(string ) name of the next route 72 | IsCalling=buffer[6]-->flag if the coming mobile name is caller ID 73 | MobileName=buffer[7]--> name coming from mobile caller ID or song name ,if none no mobile notifcation 74 | IsSign=buffer[8]--> flag if sign detected 75 | SignType=buffer[9]--> detected sign type 76 | IsCar=buffer[10] -->flag for cars 77 | CarPos=buffer[11]--> car position [todo positionsssss] 78 | IsPed=buffer[12]-->flag for pedstrinas 79 | PedPos=buffer[13]--> pedstrian position [todo postionsss] 80 | cat_sp=buffer[14] --> car speed 81 | answered=buffer[17] --> True if the call is answered 82 | music_state= buffer[18]--> pause or play 83 | """ 84 | 85 | IP=[l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0] 86 | imageQueue = Queue(); #a queue to hold next image to be shown.(The image is dequued to temp variable (a variable in display_interval func) to be shown) 87 | frames_captured=[] #frames captured by webcam (each frame captured is deleted after passing to draw function ) 88 | ProcessingTime = 0.5; #initial processing time for the first frame 89 | YOLO_ANCHORS = np.array(((0.57273, 0.677385), (1.87446, 2.06253), (3.33843, 5.47434),(7.88282, 3.52778), (9.77052, 9.16828))) 90 | weights_file_path='saved_weights/BestWeights/trained_epoch_65_acc_45.h5' 91 | classes_path='saved_weights/BestWeights/Temp_classes.txt' 92 | VideoPath='AR/TestVideos/Test5.MOV' 93 | AR_Mode=True 94 | Live=False 95 | ret=None 96 | ################################## AVERAGE VARIABLES ################################## 97 | count_occurrence=np.zeros(9) 98 | frames_counted=np.zeros(9) 99 | start=np.zeros(9) 100 | number_of_frames_averaged=6 101 | min_occurrence=3 102 | 103 | 104 | 105 | """ 106 | ##################################################################################### 107 | ###################################### MAIN FUNCIONS ################################ 108 | ##################################################################################### 109 | """ 110 | def _main(): 111 | class_names = get_classes(classes_path) #loads the classes names 112 | anchors = YOLO_ANCHORS 113 | model_body, model = create_model(anchors, class_names) 114 | model.load_weights(weights_file_path) 115 | videoDraw(model_body,class_names,anchors) 116 | 117 | """ 118 | ##################################################################################### 119 | ############################## create_model FUNCIONS ################################ 120 | ##################################################################################### 121 | """ 122 | def create_model(anchors, class_names, load_pretrained=True): 123 | ''' 124 | returns the body of the model and the model 125 | 126 | # Params: 127 | load_pretrained: whether or not to load the pretrained model or initialize all weights 128 | 129 | # Returns: 130 | model_body: YOLOv2 with new output layer 131 | model: YOLOv2 with custom loss Lambda layer 132 | ''' 133 | detectors_mask_shape = (13, 13, 5, 1) 134 | matching_boxes_shape = (13, 13, 5, 5) 135 | 136 | # Create model input layers. 137 | image_input = Input(shape=(416, 416, 3)) # 138 | boxes_input = Input(shape=(None, 5)) #true label 139 | detectors_mask_input = Input(shape=detectors_mask_shape) # a return from get_detectors function 140 | matching_boxes_input = Input(shape=matching_boxes_shape) # a return from get_detectors function 141 | 142 | # Create model body. 143 | # Note:Model here is created without last layer as we will train last layer again every time we have new dataset. 144 | yolo_model = yolo_body(image_input, len(anchors), len(class_names)) 145 | 146 | """ 147 | The next line creates model between input layer and the layer before the last layer but how it creates the model with only knowing the first layer and last layer of model ? 148 | each layer has a pointer to the preeceding layer so it creates model from the last layer going upwards to the first layer through pointers 149 | """ 150 | topless_yolo = Model(yolo_model.input, yolo_model.layers[-2].output) 151 | 152 | if load_pretrained: 153 | # Save topless yolo: 154 | # Saving occurs only once to save model .without last layer. 155 | #every time after this time we saved in, we will only load data. 156 | topless_yolo_path = os.path.join('model_data', 'yolo_topless.h5') #get path of topless_yolo 157 | if not os.path.exists(topless_yolo_path): # ask if topless_yolo exists ? if exists then only load it ,if not exists then enter the if conditional to load the full yolo model 158 | print("CREATING TOPLESS WEIGHTS FILE") #so that you can extract the topless_yolo model. 159 | yolo_path = os.path.join('model_data', 'yolo.h5') #yolo.h5 160 | model_body = load_model(yolo_path) 161 | model_body = Model(model_body.inputs, model_body.layers[-2].output) 162 | model_body.save_weights(topless_yolo_path) 163 | topless_yolo.load_weights(topless_yolo_path) 164 | 165 | final_layer = Conv2D(len(anchors)*(5+len(class_names)), (1, 1), activation='linear')(topless_yolo.output) 166 | model_body = Model(image_input, final_layer) 167 | # Place model loss on CPU to reduce GPU memory usage. 168 | """ 169 | model_loss is a layer to represent the loss of our model 170 | so it feeds the final layer outputs to yolo_loss function to calculate loss and then passes it to Model func. to generate 171 | new model from the input to the last layer which is now the loss layer. 172 | """ 173 | # with tf.device('/gpu:0'): 174 | # TODO: Replace Lambda with custom Keras layer for loss. 175 | model_loss = Lambda(yolo_loss,output_shape=(1, ),name='yolo_loss', 176 | arguments={'anchors': anchors, 177 | 'num_classes': len(class_names)})([ 178 | model_body.output, boxes_input, 179 | detectors_mask_input, matching_boxes_input 180 | ]) 181 | 182 | model = Model([model_body.input, boxes_input, detectors_mask_input,matching_boxes_input], model_loss) 183 | 184 | return model_body, model 185 | 186 | """ 187 | ##################################################################################### 188 | ############################## get_classes FUNCTION ################################# 189 | ##################################################################################### 190 | """ 191 | def get_classes(classes_path): 192 | '''loads the classes''' 193 | with open(classes_path) as f: 194 | class_names = f.readlines() 195 | class_names = [c.strip() for c in class_names] 196 | return class_names 197 | 198 | """ 199 | ##################################################################################### 200 | ############################## process_data FUNCTION ################################ 201 | ##################################################################################### 202 | """ 203 | def process_data(images, boxes=None): 204 | '''processes the data''' 205 | images = [PIL.Image.fromarray(i) for i in images] 206 | orig_size = np.array([images[0].width, images[0].height]) 207 | orig_size = np.expand_dims(orig_size, axis=0) 208 | 209 | # Image preprocessing. 210 | processed_images = [i.resize((416, 416), PIL.Image.BICUBIC) for i in images] 211 | processed_images = [np.array(image, dtype=np.float) for image in processed_images] 212 | processed_images = [image/255. for image in processed_images] 213 | 214 | if boxes is not None: 215 | # Box preprocessing. 216 | # Original boxes stored as 1D list of class, x_min, y_min, x_max, y_max. 217 | boxes = [box.reshape((-1, 5)) for box in boxes] 218 | 219 | # Get box parameters as x_center, y_center, box_width, box_height, class. 220 | boxes_xy = [0.5 * (box[:, 3:5] + box[:, 1:3]) for box in boxes] 221 | boxes_wh = [box[:, 3:5] - box[:, 1:3] for box in boxes] 222 | boxes_xy = [boxxy / orig_size for boxxy in boxes_xy] 223 | boxes_wh = [boxwh / orig_size for boxwh in boxes_wh] 224 | 225 | boxes = [np.concatenate((boxes_xy[i], boxes_wh[i], box[:, 0:1]), axis=1) for i, box in enumerate(boxes)] 226 | 227 | # find the max number of boxes 228 | max_boxes = 0 229 | for boxz in boxes: 230 | if boxz.shape[0] > max_boxes: 231 | max_boxes = boxz.shape[0] 232 | 233 | # add zero pad for training 234 | for i, boxz in enumerate(boxes): 235 | if boxz.shape[0] < max_boxes: 236 | zero_padding = np.zeros( (max_boxes-boxz.shape[0], 5), dtype=np.float32) 237 | boxes[i] = np.vstack((boxz, zero_padding)) 238 | 239 | return np.array(processed_images), np.array(boxes) 240 | else: 241 | return np.array(processed_images) 242 | 243 | """ 244 | ##################################################################################### 245 | ########################### model_body_processing FUNCTION ########################## 246 | ##################################################################################### 247 | """ 248 | 249 | def model_body_processing(model_body, class_names, anchors): 250 | ''' 251 | function to be called once for loading weights and preparing the boxes,scores and classes 252 | according to anchor boxes values,score threshold and iou threshold. 253 | This is evaluated by non_max_suppression function. 254 | ''' 255 | global input_image_shape 256 | 257 | yolo_outputs = yolo_head(model_body.output, anchors, len(class_names)) 258 | boxes, scores, classes = yolo_eval(yolo_outputs, input_image_shape, score_threshold=0.5, iou_threshold=0.5) 259 | return boxes,scores,classes 260 | 261 | 262 | """ 263 | ##################################################################################### 264 | ################################# GetFrames FUNCTION ################################ 265 | ##################################################################################### 266 | """ 267 | def GetFrames(): 268 | ''' 269 | Function to be run on separate thread to get frames from the camera and store them in global 270 | frames_captured. 271 | time.sleep is used to determine number of frames taken per second 272 | [example if(Processing time =0.1) then fps=1/0.1=10 frames per second] 273 | Processing time variable is equivalent to processing time taken to get and draw bounding box plus 274 | an offset (tolerance). 275 | ''' 276 | global ProcessingTime,frames_captured,ret,DisplayQueue 277 | if Live: 278 | print(" ###### STARTING LIVE VIDEO ######## \n\n") 279 | while(True): 280 | ret, im = video.read() 281 | time.sleep(ProcessingTime) 282 | im=cv2.cvtColor(im, cv2.COLOR_BGR2RGB) 283 | frames_captured.append(im) 284 | else: 285 | while(video.isOpened()): 286 | ret, im = video.read() 287 | time.sleep(ProcessingTime) 288 | im=cv2.cvtColor(im, cv2.COLOR_BGR2RGB) 289 | frames_captured.append(im) 290 | 291 | """ 292 | ##################################################################################### 293 | ################################# videoDraw FUNCTION ################################ 294 | ##################################################################################### 295 | """ 296 | def videoDraw(model_body, class_names, anchors): 297 | ''' 298 | Main function of drawing that controls all actions 299 | ''' 300 | global video,ProcessingTime,input_image_shape,frames_captured,DrawFrame,ret 301 | input_image_shape = K.placeholder(shape=(2, )) 302 | boxes,scores,classes=model_body_processing(model_body, class_names, anchors) #called once to avoid adding nodes to graph 303 | 304 | ########## CALIBRATION FOR SESS RUN ####### 305 | # calibrated=cv2.imread('/home/dina/yad2k/Calibration.bmp') 306 | # calibrated=cv2.resize(calibrated,(416,416)) 307 | # calibrated = cv2.cvtColor(calibrated, cv2.COLOR_BGR2RGB) 308 | # calibrated = np.reshape(calibrated,(-1,416,416,3)) 309 | # draw(boxes,scores,classes,model_body, class_names, calibrated) 310 | ######### END OF CALIBRATION ########### 311 | 312 | ##### start capturing from web_cam (frame is captured with size 416x416) #### 313 | if Live: 314 | video = cv2.VideoCapture(0) 315 | # video.set(3,416); 316 | # video.set(4,416); 317 | else: 318 | print("opening video") 319 | video = cv2.VideoCapture( VideoPath) 320 | 321 | ####multithreading between : 1)Capturing video frames 2)Displaying video after adding bounding boxes 3)processing on images captured #### 322 | pool=ThreadPool(processes=2) 323 | pool.apply_async(GetFrames, []) 324 | pool.apply_async(display_interval,[]) 325 | if Live: 326 | while (True): 327 | sTime = time.time(); 328 | if(len(frames_captured) > 0): 329 | frames_captured_p=np.asarray(frames_captured[:]) #frames captured are passed to another temp variable (frames captured_p) 330 | del frames_captured[:] # frames_captured are deleted after being passed to other temp variable (frames_captured_p) to avoid memory overflow 331 | frames_captured_p=process_data(frames_captured_p)#pass frames captured to be preprocessed 332 | out_classes=draw(boxes,scores,classes,model_body, class_names, frames_captured_p[:]) #pass frames captured to draw function to add bounding boxes 333 | classes_to_be_shown=average_classes(out_classes) ### to be passed to AR code 334 | eTime=time.time() 335 | ProcessingTime = (eTime-sTime) * 1.1 #Processing time which controls the fps of video capture 336 | else: 337 | while (video.isOpened()): 338 | if ret==False:break 339 | sTime = time.time(); 340 | if(len(frames_captured) > 0): 341 | frames_captured_p=np.asarray(frames_captured[:]) #frames captured are passed to another temp variable (frames captured_p) 342 | del frames_captured[:] # frames_captured are deleted after being passed to other temp variable (frames_captured_p) to avoid memory overflow 343 | frames_captured_p=process_data(frames_captured_p)#pass frames captured to be preprocessed 344 | 345 | out_classes=draw(boxes,scores,classes,model_body, class_names, frames_captured_p[:]) #pass frames captured to draw function to add bounding boxes 346 | classes_to_be_shown=average_classes(out_classes) ### to be passed to AR code 347 | eTime=time.time() 348 | ProcessingTime = (eTime-sTime) * 1.1 #Processing time which controls the fps of video capture 349 | 350 | """ 351 | ##################################################################################### 352 | ##################################### Draw FUNCTION ################################# 353 | ##################################################################################### 354 | """ 355 | def draw(boxes,scores,classes,model_body, class_names,image_data): 356 | ''' 357 | Draw bounding boxes on image datac 358 | ''' 359 | global input_image_shape ,imageQueue,q 360 | global buffer 361 | global next_route,IsCalling,Mob_Name,ans,music_state,direc,arrived,speed 362 | 363 | image_data = np.array([np.expand_dims(image, axis=0) for image in image_data]) 364 | sess = K.get_session() # TODO: Remove dependence on Tensorflow session. 365 | for i in range(len(image_data)): 366 | out_boxes, out_scores, out_classes = sess.run([boxes, scores, classes], 367 | feed_dict={ 368 | model_body.input: image_data[i], 369 | input_image_shape: [416, 416] 370 | ,K.learning_phase():0 #testing phase 371 | }) 372 | 373 | if AR_Mode: 374 | classes_to_be_shown=average_classes(out_classes) 375 | buffer[6],buffer[7],buffer[8],buffer[9],buffer[10],buffer[11]=WriteBuffer(out_boxes,out_classes,classes_to_be_shown) 376 | image_with_boxes = DrawDirection(buffer,IsFrame=True, FramePath=None, Frame=image_data[i][0]) 377 | image_with_boxes=image_with_boxes.resize((416,416),Image.ANTIALIAS) 378 | cv2.imwrite('Video_Images/{}.jpg'.format(q),cv2.cvtColor(np.array(image_with_boxes), cv2.COLOR_BGR2RGB)) 379 | else: 380 | image_with_boxes = draw_boxes(image_data[i][0], out_boxes, out_classes,class_names, out_scores) 381 | imageQueue.put(np.array(image_with_boxes)); #queue image_with_boxes to imageQueue. 382 | 383 | return out_classes 384 | """ 385 | ##################################################################################### 386 | ############################# display_interval FUNCTION ############################# 387 | ##################################################################################### 388 | """ 389 | def display_interval(): 390 | ''' 391 | Function to be run on separate thread to display frames after adding bounding boxes continously 392 | ''' 393 | global imageQueue,ret 394 | 395 | tempImage = []; 396 | print("in interval") 397 | while (video.isOpened()): 398 | if (ret==False): 399 | 400 | break 401 | if (not imageQueue.empty()): 402 | tempImage = imageQueue.get();#dequeue frames with boxes to avoid overflow of memory 403 | tempImage=cv2.cvtColor(tempImage, cv2.COLOR_BGR2RGB) 404 | print("in if") 405 | print("out if") 406 | cv2.imshow('Frame',tempImage) 407 | if cv2.waitKey(1) and 0xFF == ord('q'):break; 408 | cv2.destroyAllWindows() 409 | 410 | """ 411 | ##################################################################################### 412 | ################################ average_classes FUNCTION ########################### 413 | ##################################################################################### 414 | """ 415 | def average_classes(out_classes): 416 | global start,count_occurrence,frames_counted,number_of_frames_averaged,min_occurrence 417 | show_class=[] 418 | out_classes=list(map(int,out_classes)) 419 | for z in out_classes: 420 | start[z]=1 421 | count_occurrence[z]= count_occurrence[z]+1 422 | frames_counted=[frames_counted[f]+1 if t==1 else frames_counted[f] for f,t in enumerate(start)] 423 | frames_counted=list(map(int,frames_counted)) 424 | show_class=[show_class for show_class,x in enumerate(count_occurrence) if x > min_occurrence] 425 | show_class=list(map(int,show_class)) 426 | 427 | if (any(r==number_of_frames_averaged for r in frames_counted)): 428 | indices_frames_counted=[] 429 | indices_frames_counted_temp=list(np.nonzero(np.array(frames_counted) >= number_of_frames_averaged)) 430 | for i in range (len(indices_frames_counted_temp[0])): 431 | indices_frames_counted.append(indices_frames_counted_temp[0][i]) 432 | 433 | for z in indices_frames_counted: 434 | frames_counted[z]=0 435 | count_occurrence[z]=0 436 | start[z]=0 437 | frames_counted=list(map(int,frames_counted)) 438 | count_occurrence=list(map(int,count_occurrence)) 439 | start=list(map(int,start)) 440 | return show_class 441 | 442 | 443 | 444 | if __name__ == '__main__': 445 | try: 446 | _main() 447 | except KeyboardInterrupt: #catch CTRL+C press 448 | print(" ###### LIVE VIDEO RELEASED ######## ") 449 | video.release() #release camera resources after pressing CTRL+C 450 | get_ipython().magic('%reset -sf') #delete all variables after pressing CTRL+C 451 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Augmented-reality-HUD-using-Deep-Learning 2 | The project utilizes the power of deep machine learning to detect and classify the surrounding environment hazards for vehicle. This includes passers-by, traffic signs and other vehicles in road. The implemented system streams live scene of vehicle traffic augmented with hazards alerts detected through the virtual reality headset. The system also augments navigation, music player and phone calls. The goal is to give the driver un- preceded level of visual information to keep him safe on the road. 3 | Object Detection Algorithm: YOLO9000 4 | Framework: Keras 5 | Dependencies: numpy, cv2, PIL, queue, socket, urllib.request, json, regular expression, datetime, threading, flask, http.client 6 | ![ar](https://user-images.githubusercontent.com/45567574/50644113-931aa580-0f78-11e9-94a6-23c28da4c7c5.jpg) 7 | Youtube Video demonstrating the work of this project: https://youtu.be/KtHVSDXYlpA 8 | --------------------------------------------------------------------------------