├── Lane_Detection ├── coco.names ├── lane_detection_version.py └── utils.py └── README.md /Lane_Detection/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | pottedplant 60 | bed 61 | diningtable 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Project structure 2 | The aim of this project is to try and implement a detection algorithm to identify road features such as detecting lane boundaries and surrounding vehicles. For detecting lane boundaries, a computer vision technique library such as opencv has been used and for vehicle detection the same library with pre-trained yolo weight has been chosen to perform the algorithm. 3 | 4 | 5 | ![ezgif com-video-to-gif (2)](https://user-images.githubusercontent.com/51369142/85700210-103d5b80-b6d4-11ea-8894-d36eef4cf0d1.gif) 6 | 7 | The pipeline to identify the road boundaries, comprises the following steps: 8 | 9 | 1. Calculate camera calibration matrix using ` cv2.findChessboardCorners()` function in order to remove the distortion generated from lenses and ensure that lane detection algorithm can be generalized to different cameras. Then apply the distortion correction to the raw image. 10 | 11 | 2. Detecting the edges on the image by using set of gradient and color based threshold using `cv2.Sobel` and `cv2.cvtColor` function in order to create a thresholded binary image. 12 | 13 | 3. Apply a perspective transform to make lane boundaries extraction easier resulting to a bird's eye view of the road. 14 | 15 | 4. Scaning the resulting frame for pixels and fit them to lane boundary and warp the detection lane boundaries back to the original image. 16 | 17 | 5. Approximate road properties such as curvature of the road and vehicle position within the lane. 18 | 19 | The snapshot of the afformentioned procedure can be seen as 20 | 21 | ![Webp net-resizeimage](https://user-images.githubusercontent.com/51369142/85710587-513a6d80-b6de-11ea-8abc-f8d95353a4dc.jpg) 22 | 23 | ### How to run the project 24 | First clone to `https://github.com/shayantaherian/Lane-Detection/.git` then move to the directory `cd Lane_Detection` and run `python lane_detection_version.py` 25 | 26 | ### References 27 | 1. [Detecting road features](https://github.com/navoshta/detecting-road-features) 28 | 2. [lane-detection-with-opencv](https://github.com/ckirksey3/lane-detection-with-opencv) 29 | -------------------------------------------------------------------------------- /Lane_Detection/lane_detection_version.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | from utils import * 4 | import os 5 | import time 6 | import argparse 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('--model_cfg', type = str, default = '', 10 | help = 'Path to config file') 11 | parser.add_argument('--model_weights', type=str, 12 | default='', 13 | help='path to weights of model') 14 | parser.add_argument('--video', type=str, default='', 15 | help='path to video file') 16 | parser.add_argument('--src', type=int, default=0, 17 | help='source of the camera') 18 | parser.add_argument('--output_dir', type=str, default='', 19 | help='path to the output directory') 20 | args = parser.parse_args() 21 | 22 | # print the arguments 23 | print('----- info -----') 24 | print('[i] The config file: ', args.model_cfg) 25 | print('[i] The weights of model file: ', args.model_weights) 26 | print('[i] Path to video file: ', args.video) 27 | print('###########################################################\n') 28 | frameWidth= 640 29 | frameHeight = 480 30 | 31 | 32 | 33 | net = cv2.dnn.readNet(args.model_weights, args.model_cfg) 34 | classes = [] 35 | with open("coco.names", "r") as f: 36 | classes = [line.strip() for line in f.readlines()] # we put the names in to an array 37 | 38 | layers_names = net.getLayerNames() 39 | output_layers = [layers_names[i[0] -1] for i in net.getUnconnectedOutLayers()] 40 | colors = np.random.uniform(0, 255, size = (len(classes), 3)) 41 | 42 | font = cv2.FONT_HERSHEY_PLAIN 43 | frame_id = 0 44 | cameraFeed= False 45 | #videoPath = 'road_car_view.mp4' 46 | cameraNo= 1 47 | #frameWidth= 640 48 | #frameHeight = 480 49 | 50 | 51 | if cameraFeed:intialTracbarVals = [24,55,12,100] # #wT,hT,wB,hB 52 | else:intialTracbarVals = [42,63,14,87] #wT,hT,wB,hB 53 | 54 | output_file = '' 55 | if cameraFeed: 56 | cap = cv2.VideoCapture(cameraNo) 57 | cap.set(3, frameWidth) 58 | cap.set(4, frameHeight) 59 | else: 60 | cap = cv2.VideoCapture(args.video) 61 | output_file = args.video[:-4].rsplit('/')[-1] + '_Detection.avi' 62 | count=0 63 | noOfArrayValues =10 64 | #global arrayCurve, arrayCounter 65 | arrayCounter=0 66 | arrayCurve = np.zeros([noOfArrayValues]) 67 | myVals=[] 68 | initializeTrackbars(intialTracbarVals) 69 | 70 | 71 | #fourcc = cv2.VideoWriter_fourcc(*'XVID') 72 | #video_writer = cv2.VideoWriter('output.avi', fourcc, 20.0, (640,480)) 73 | video_writer = cv2.VideoWriter('output2.avi', cv2.VideoWriter_fourcc(*'XVID'), 74 | cap.get(cv2.CAP_PROP_FPS), (2 * frameWidth,frameHeight)) 75 | starting_time = time.time() 76 | while True: 77 | 78 | success, img = cap.read() 79 | if not success: 80 | print('[i] ==> Done processing!!!') 81 | print('[i] ==> Output file is stored at', os.path.join(args.output_dir, output_file)) 82 | cv2.waitKey(1000) 83 | break 84 | 85 | #img = cv2.imread('test3.jpg') 86 | if cameraFeed== False:img = cv2.resize(img, (frameWidth, frameHeight), None) 87 | imgWarpPoints = img.copy() 88 | imgFinal = img.copy() 89 | imgCanny = img.copy() 90 | 91 | imgUndis = undistort(img) 92 | imgThres,imgCanny,imgColor = thresholding(imgUndis) 93 | src = valTrackbars() 94 | imgWarp = perspective_warp(imgThres, dst_size=(frameWidth, frameHeight), src=src) 95 | imgWarpPoints = drawPoints(imgWarpPoints, src) 96 | imgSliding, curves, lanes, ploty = sliding_window(imgWarp, draw_windows=True) 97 | 98 | try: 99 | curverad =get_curve(imgFinal, curves[0], curves[1]) 100 | lane_curve = np.mean([curverad[0], curverad[1]]) 101 | imgFinal = draw_lanes(img, curves[0], curves[1],frameWidth,frameHeight,src=src) 102 | 103 | # Average 104 | currentCurve = lane_curve // 50 105 | if int(np.sum(arrayCurve)) == 0:averageCurve = currentCurve 106 | else: 107 | averageCurve = np.sum(arrayCurve) // arrayCurve.shape[0] 108 | if abs(averageCurve-currentCurve) >200: arrayCurve[arrayCounter] = averageCurve 109 | else :arrayCurve[arrayCounter] = currentCurve 110 | arrayCounter +=1 111 | if arrayCounter >=noOfArrayValues : arrayCounter=0 112 | cv2.putText(imgFinal, str(int(averageCurve)), (frameWidth//2-70, 70), cv2.FONT_HERSHEY_DUPLEX, 1.75, (0, 0, 255), 2, cv2.LINE_AA) 113 | 114 | except: 115 | lane_curve=00 116 | pass 117 | 118 | imgFinal= drawLines(imgFinal,lane_curve) 119 | 120 | # Object detection 121 | success, frame = cap.read() 122 | 123 | frame = cv2.resize(frame, (frameWidth, frameHeight), None) 124 | frame_id += 1 125 | height, width, channels = frame.shape 126 | # Detect image 127 | blob = cv2.dnn.blobFromImage(frame, 0.00392, (320, 320), (0,0,0), swapRB = True, crop = False) 128 | net.setInput(blob) 129 | start = time.time() 130 | outs = net.forward(output_layers) 131 | 132 | # Showing informations on the screen 133 | class_ids = [] 134 | confidences = [] 135 | boxes = [] 136 | for out in outs: 137 | for detection in out: 138 | scores = detection[5:] 139 | class_id = np.argmax(scores) 140 | confidence = scores[class_id] 141 | if confidence > 0.5: 142 | #Object detected 143 | center_x = int(detection[0] * width) 144 | center_y = int(detection[1] * height) 145 | w = int(detection[2] * width) 146 | h = int(detection[3] * height) 147 | 148 | # Rectangle coordinates 149 | x = int(center_x - w / 2) 150 | y = int(center_y -h / 2) 151 | #cv2.rectangle(img, (x,y), (x+w, y+h), (0, 255, 0)) 152 | 153 | boxes.append([x, y, w, h]) 154 | confidences.append(float(confidence)) 155 | # Name of the object 156 | class_ids.append(class_id) 157 | 158 | indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) 159 | 160 | for i in range(len(boxes)): 161 | if i in indexes: 162 | x, y, w, h = boxes[i] 163 | label = "{}: {:.2f}%".format(classes[class_ids[i]], confidences[i]*100) 164 | color = colors[i] 165 | cv2.rectangle(frame, (x,y), (x+w, y+h), color, 2) 166 | cv2.putText(frame, label, (x,y+10), font, 2, color, 2) 167 | 168 | elapsed_time = time.time() - starting_time 169 | fps = frame_id / elapsed_time 170 | cv2.putText(frame, "FPS:" + str(fps), (10,30), font, 2, (0, 0, 0), 1) 171 | imgBlank = np.zeros_like(img) 172 | 173 | imgStacked = stackImages(0.7, ([imgUndis,frame], 174 | [imgColor, imgCanny], 175 | [imgWarp,imgSliding] 176 | )) 177 | 178 | #final_frame = cv2.hconcat((frame,imgCanny)) 179 | #video_writer.write(final_frame) 180 | #cv2.imshow('frame',final_frame) 181 | cv2.imshow("Image", frame) 182 | cv2.imshow("PipeLine",imgStacked) 183 | cv2.imshow("Result", imgFinal) 184 | 185 | 186 | if cv2.waitKey(1) & 0xFF == ord('q'): 187 | break 188 | 189 | #fourcc = cv2.VideoWriter_fourcc(*'MJPG') 190 | #out_corner = cv2.VideoWriter('img_corner_1.avi',fourcc, 20.0, (width, height)) 191 | cap.release() 192 | cv2.destroyAllWindows() 193 | print('==> All done!') 194 | print('***********************************************************') 195 | -------------------------------------------------------------------------------- /Lane_Detection/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import pickle 4 | 5 | def nothing(x): 6 | pass 7 | 8 | def undistort(img, cal_dir='cal_pickle.p'): 9 | with open(cal_dir, mode='rb') as f: 10 | file = pickle.load(f) 11 | mtx = file['mtx'] 12 | dist = file['dist'] 13 | dst = cv2.undistort(img, mtx, dist, None, mtx) 14 | return dst 15 | 16 | def colorFilter(img): 17 | hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) 18 | lowerYellow = np.array([18,94,140]) 19 | upperYellow = np.array([48,255,255]) 20 | lowerWhite = np.array([0, 0, 200]) 21 | upperWhite = np.array([255, 255, 255]) 22 | maskedWhite= cv2.inRange(hsv,lowerWhite,upperWhite) 23 | maskedYellow = cv2.inRange(hsv, lowerYellow, upperYellow) 24 | combinedImage = cv2.bitwise_or(maskedWhite,maskedYellow) 25 | return combinedImage 26 | 27 | 28 | def thresholding(img): 29 | imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 30 | kernel = np.ones((5,5)) 31 | imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 0) 32 | imgCanny = cv2.Canny(imgBlur, 50, 100) 33 | #imgClose = cv2.morphologyEx(imgCanny, cv2.MORPH_CLOSE, np.ones((10,10))) 34 | imgDial = cv2.dilate(imgCanny,kernel,iterations=1) 35 | imgErode = cv2.erode(imgDial,kernel,iterations=1) 36 | 37 | imgColor = colorFilter(img) 38 | combinedImage = cv2.bitwise_or(imgColor, imgErode) 39 | 40 | return combinedImage,imgCanny,imgColor 41 | 42 | def initializeTrackbars(intialTracbarVals): 43 | cv2.namedWindow("Trackbars") 44 | cv2.resizeWindow("Trackbars", 360, 240) 45 | cv2.createTrackbar("Width Top", "Trackbars", intialTracbarVals[0],50, nothing) 46 | cv2.createTrackbar("Height Top", "Trackbars", intialTracbarVals[1], 100, nothing) 47 | cv2.createTrackbar("Width Bottom", "Trackbars", intialTracbarVals[2], 50, nothing) 48 | cv2.createTrackbar("Height Bottom", "Trackbars", intialTracbarVals[3], 100, nothing) 49 | 50 | 51 | 52 | def valTrackbars(): 53 | widthTop = cv2.getTrackbarPos("Width Top", "Trackbars") 54 | heightTop = cv2.getTrackbarPos("Height Top", "Trackbars") 55 | widthBottom = cv2.getTrackbarPos("Width Bottom", "Trackbars") 56 | heightBottom = cv2.getTrackbarPos("Height Bottom", "Trackbars") 57 | 58 | src = np.float32([(widthTop/100,heightTop/100), (1-(widthTop/100), heightTop/100), 59 | (widthBottom/100, heightBottom/100), (1-(widthBottom/100), heightBottom/100)]) 60 | #src = np.float32([(0.43, 0.65), (0.58, 0.65), (0.1, 1), (1, 1)]) 61 | return src 62 | 63 | def drawPoints(img,src): 64 | img_size = np.float32([(img.shape[1],img.shape[0])]) 65 | #src = np.float32([(0.43, 0.65), (0.58, 0.65), (0.1, 1), (1, 1)]) 66 | src = src * img_size 67 | for x in range( 0,4): 68 | cv2.circle(img,(int(src[x][0]),int(src[x][1])),15,(0,0,255),cv2.FILLED) 69 | return img 70 | 71 | def pipeline(img, s_thresh=(100, 255), sx_thresh=(15, 255)): 72 | img = undistort(img) 73 | img = np.copy(img) 74 | # Convert to HLS color space and separate the V channel 75 | hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float) 76 | l_channel = hls[:, :, 1] 77 | s_channel = hls[:, :, 2] 78 | h_channel = hls[:, :, 0] 79 | # Sobel x 80 | sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 1) # Take the derivative in x 81 | abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal 82 | scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx)) 83 | 84 | # Threshold x gradient 85 | sxbinary = np.zeros_like(scaled_sobel) 86 | sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1 87 | 88 | # Threshold color channel 89 | s_binary = np.zeros_like(s_channel) 90 | s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1 91 | 92 | color_binary = np.dstack((np.zeros_like(sxbinary), sxbinary, s_binary)) * 255 93 | 94 | combined_binary = np.zeros_like(sxbinary) 95 | combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1 96 | return combined_binary 97 | 98 | 99 | def perspective_warp(img, 100 | dst_size=(1280, 720), 101 | src=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)]), 102 | dst=np.float32([(0,0), (1, 0), (0,1), (1,1)])): 103 | img_size = np.float32([(img.shape[1],img.shape[0])]) 104 | src = src* img_size 105 | # For destination points, I'm arbitrarily choosing some points to be 106 | # a nice fit for displaying our warped result 107 | # again, not exact, but close enough for our purposes 108 | dst = dst * np.float32(dst_size) 109 | # Given src and dst points, calculate the perspective transform matrix 110 | M = cv2.getPerspectiveTransform(src, dst) 111 | # Warp the image using OpenCV warpPerspective() 112 | warped = cv2.warpPerspective(img, M, dst_size) 113 | 114 | return warped 115 | 116 | def inv_perspective_warp(img, 117 | dst_size=(1280,720), 118 | src=np.float32([(0,0), (1, 0), (0,1), (1,1)]), 119 | dst=np.float32([(0.43,0.65),(0.58,0.65),(0.1,1),(1,1)])): 120 | img_size = np.float32([(img.shape[1],img.shape[0])]) 121 | src = src* img_size 122 | # For destination points, I'm arbitrarily choosing some points to be 123 | # a nice fit for displaying our warped result 124 | # again, not exact, but close enough for our purposes 125 | dst = dst * np.float32(dst_size) 126 | # Given src and dst points, calculate the perspective transform matrix 127 | M = cv2.getPerspectiveTransform(src, dst) 128 | # Warp the image using OpenCV warpPerspective() 129 | warped = cv2.warpPerspective(img, M, dst_size) 130 | return warped 131 | 132 | def get_hist(img): 133 | hist = np.sum(img[img.shape[0]//2:,:], axis=0) 134 | return hist 135 | 136 | 137 | 138 | left_a, left_b, left_c = [], [], [] 139 | right_a, right_b, right_c = [], [], [] 140 | 141 | 142 | def sliding_window(img, nwindows=15, margin=50, minpix=1, draw_windows=True): 143 | global left_a, left_b, left_c, right_a, right_b, right_c 144 | left_fit_ = np.empty(3) 145 | right_fit_ = np.empty(3) 146 | out_img = np.dstack((img, img, img)) * 255 147 | 148 | histogram = get_hist(img) 149 | # find peaks of left and right halves 150 | midpoint = int(histogram.shape[0] / 2) 151 | leftx_base = np.argmax(histogram[:midpoint]) 152 | rightx_base = np.argmax(histogram[midpoint:]) + midpoint 153 | 154 | # Set height of windows 155 | window_height = np.int(img.shape[0] / nwindows) 156 | # Identify the x and y positions of all nonzero pixels in the image 157 | nonzero = img.nonzero() 158 | nonzeroy = np.array(nonzero[0]) 159 | nonzerox = np.array(nonzero[1]) 160 | # Current positions to be updated for each window 161 | leftx_current = leftx_base 162 | rightx_current = rightx_base 163 | 164 | # Create empty lists to receive left and right lane pixel indices 165 | left_lane_inds = [] 166 | right_lane_inds = [] 167 | 168 | # Step through the windows one by one 169 | for window in range(nwindows): 170 | # Identify window boundaries in x and y (and right and left) 171 | win_y_low = img.shape[0] - (window + 1) * window_height 172 | win_y_high = img.shape[0] - window * window_height 173 | win_xleft_low = leftx_current - margin 174 | win_xleft_high = leftx_current + margin 175 | win_xright_low = rightx_current - margin 176 | win_xright_high = rightx_current + margin 177 | # Draw the windows on the visualization image 178 | if draw_windows == True: 179 | cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), 180 | (100, 255, 255), 1) 181 | cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), 182 | (100, 255, 255), 1) 183 | # Identify the nonzero pixels in x and y within the window 184 | good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 185 | (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0] 186 | good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 187 | (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0] 188 | # Append these indices to the lists 189 | left_lane_inds.append(good_left_inds) 190 | right_lane_inds.append(good_right_inds) 191 | # If you found > minpix pixels, recenter next window on their mean position 192 | if len(good_left_inds) > minpix: 193 | leftx_current = np.int(np.mean(nonzerox[good_left_inds])) 194 | if len(good_right_inds) > minpix: 195 | rightx_current = np.int(np.mean(nonzerox[good_right_inds])) 196 | 197 | # if len(good_right_inds) > minpix: 198 | # rightx_current = np.int(np.mean([leftx_current +900, np.mean(nonzerox[good_right_inds])])) 199 | # elif len(good_left_inds) > minpix: 200 | # rightx_current = np.int(np.mean([np.mean(nonzerox[good_left_inds]) +900, rightx_current])) 201 | # if len(good_left_inds) > minpix: 202 | # leftx_current = np.int(np.mean([rightx_current -900, np.mean(nonzerox[good_left_inds])])) 203 | # elif len(good_right_inds) > minpix: 204 | # leftx_current = np.int(np.mean([np.mean(nonzerox[good_right_inds]) -900, leftx_current])) 205 | 206 | # Concatenate the arrays of indices 207 | left_lane_inds = np.concatenate(left_lane_inds) 208 | right_lane_inds = np.concatenate(right_lane_inds) 209 | 210 | # Extract left and right line pixel positions 211 | leftx = nonzerox[left_lane_inds] 212 | lefty = nonzeroy[left_lane_inds] 213 | rightx = nonzerox[right_lane_inds] 214 | righty = nonzeroy[right_lane_inds] 215 | 216 | if leftx.size and rightx.size: 217 | # Fit a second order polynomial to each 218 | left_fit = np.polyfit(lefty, leftx, 2) 219 | right_fit = np.polyfit(righty, rightx, 2) 220 | 221 | left_a.append(left_fit[0]) 222 | left_b.append(left_fit[1]) 223 | left_c.append(left_fit[2]) 224 | 225 | right_a.append(right_fit[0]) 226 | right_b.append(right_fit[1]) 227 | right_c.append(right_fit[2]) 228 | 229 | left_fit_[0] = np.mean(left_a[-10:]) 230 | left_fit_[1] = np.mean(left_b[-10:]) 231 | left_fit_[2] = np.mean(left_c[-10:]) 232 | 233 | right_fit_[0] = np.mean(right_a[-10:]) 234 | right_fit_[1] = np.mean(right_b[-10:]) 235 | right_fit_[2] = np.mean(right_c[-10:]) 236 | 237 | # Generate x and y values for plotting 238 | ploty = np.linspace(0, img.shape[0] - 1, img.shape[0]) 239 | 240 | left_fitx = left_fit_[0] * ploty ** 2 + left_fit_[1] * ploty + left_fit_[2] 241 | right_fitx = right_fit_[0] * ploty ** 2 + right_fit_[1] * ploty + right_fit_[2] 242 | 243 | out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 100] 244 | out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 100, 255] 245 | 246 | return out_img, (left_fitx, right_fitx), (left_fit_, right_fit_), ploty 247 | else: 248 | return img,(0,0),(0,0),0 249 | 250 | 251 | 252 | 253 | def get_curve(img, leftx, rightx): 254 | ploty = np.linspace(0, img.shape[0] - 1, img.shape[0]) 255 | y_eval = np.max(ploty) 256 | ym_per_pix = 1 / img.shape[0] # meters per pixel in y dimension 257 | xm_per_pix = 0.1 / img.shape[0] # meters per pixel in x dimension 258 | 259 | # Fit new polynomials to x,y in world space 260 | left_fit_cr = np.polyfit(ploty * ym_per_pix, leftx * xm_per_pix, 2) 261 | right_fit_cr = np.polyfit(ploty * ym_per_pix, rightx * xm_per_pix, 2) 262 | # Calculate the new radii of curvature 263 | left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 264 | 2 * left_fit_cr[0]) 265 | right_curverad = ((1 + (2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute( 266 | 2 * right_fit_cr[0]) 267 | 268 | car_pos = img.shape[1] / 2 269 | l_fit_x_int = left_fit_cr[0] * img.shape[0] ** 2 + left_fit_cr[1] * img.shape[0] + left_fit_cr[2] 270 | r_fit_x_int = right_fit_cr[0] * img.shape[0] ** 2 + right_fit_cr[1] * img.shape[0] + right_fit_cr[2] 271 | lane_center_position = (r_fit_x_int + l_fit_x_int) / 2 272 | center = (car_pos - lane_center_position) * xm_per_pix / 10 273 | # Now our radius of curvature is in meters 274 | 275 | return (l_fit_x_int, r_fit_x_int, center) 276 | 277 | 278 | def draw_lanes(img, left_fit, right_fit,frameWidth,frameHeight,src): 279 | ploty = np.linspace(0, img.shape[0] - 1, img.shape[0]) 280 | color_img = np.zeros_like(img) 281 | 282 | left = np.array([np.transpose(np.vstack([left_fit, ploty]))]) 283 | right = np.array([np.flipud(np.transpose(np.vstack([right_fit, ploty])))]) 284 | points = np.hstack((left, right)) 285 | 286 | cv2.fillPoly(color_img, np.int_(points), (0, 200, 255)) 287 | inv_perspective = inv_perspective_warp(color_img,(frameWidth,frameHeight),dst=src) 288 | inv_perspective = cv2.addWeighted(img, 0.5, inv_perspective, 0.7, 0) 289 | return inv_perspective 290 | 291 | 292 | 293 | def textDisplay(curve,img): 294 | font = cv2.FONT_HERSHEY_SIMPLEX 295 | cv2.putText(img, str(curve), ((img.shape[1]//2)-30, 40), font, 1, (255, 255, 0), 2, cv2.LINE_AA) 296 | directionText=' No lane ' 297 | if curve > 10: 298 | directionText='Right' 299 | elif curve < -10: 300 | directionText='Left' 301 | elif curve <10 and curve > -10: 302 | directionText='Straight' 303 | elif curve == -1000000: 304 | directionText = 'No Lane Found' 305 | cv2.putText(img, directionText, ((img.shape[1]//2)-35,(img.shape[0])-20 ), font, 1, (0, 200, 200), 2, cv2.LINE_AA) 306 | 307 | 308 | 309 | 310 | def stackImages(scale,imgArray): 311 | rows = len(imgArray) 312 | cols = len(imgArray[0]) 313 | rowsAvailable = isinstance(imgArray[0], list) 314 | width = imgArray[0][0].shape[1] 315 | height = imgArray[0][0].shape[0] 316 | if rowsAvailable: 317 | for x in range ( 0, rows): 318 | for y in range(0, cols): 319 | if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]: 320 | imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale) 321 | else: 322 | imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale) 323 | if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR) 324 | imageBlank = np.zeros((height, width, 3), np.uint8) 325 | hor = [imageBlank]*rows 326 | hor_con = [imageBlank]*rows 327 | for x in range(0, rows): 328 | hor[x] = np.hstack(imgArray[x]) 329 | ver = np.vstack(hor) 330 | else: 331 | for x in range(0, rows): 332 | if imgArray[x].shape[:2] == imgArray[0].shape[:2]: 333 | imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale) 334 | else: 335 | imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale) 336 | if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR) 337 | hor= np.hstack(imgArray) 338 | ver = hor 339 | return ver 340 | 341 | def drawLines(img,lane_curve): 342 | myWidth = img.shape[1] 343 | myHeight = img.shape[0] 344 | print(myWidth,myHeight) 345 | for x in range(-30, 30): 346 | w = myWidth // 20 347 | cv2.line(img, (w * x + int(lane_curve // 100), myHeight - 30), 348 | (w * x + int(lane_curve // 100), myHeight), (0, 0, 255), 2) 349 | cv2.line(img, (int(lane_curve // 100) + myWidth // 2, myHeight - 30), 350 | (int(lane_curve // 100) + myWidth // 2, myHeight), (0, 255, 0), 3) 351 | cv2.line(img, (myWidth // 2, myHeight - 50), (myWidth // 2, myHeight), (0, 255, 255), 2) 352 | 353 | return img --------------------------------------------------------------------------------