├── fcws.wav ├── off.wav ├── on.wav ├── on-tacc.wav ├── steering.jpg ├── warning.wav ├── .gitattributes ├── PYTHONPATH.txt ├── README.md ├── grabscreen.py ├── directkeys-truck.py ├── directkeys.py ├── tacc-trucks.py └── tacc.py /fcws.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/fcws.wav -------------------------------------------------------------------------------- /off.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/off.wav -------------------------------------------------------------------------------- /on.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/on.wav -------------------------------------------------------------------------------- /on-tacc.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/on-tacc.wav -------------------------------------------------------------------------------- /steering.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/steering.jpg -------------------------------------------------------------------------------- /warning.wav: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tijayfly/ets-tacc-git/HEAD/warning.wav -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /PYTHONPATH.txt: -------------------------------------------------------------------------------- 1 | set PYTHONPATH=C:\users\YOUR-USERNAME\tensorflow\models;C:\users\YOUR-USERNAME\tensorflow\models\research;C:\users\YOUR-USERNAME\tensorflow\models\research\slim -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | RETIRED PRODUCT - long since deprecated, and preserved for archive purposes only. 2 | 3 | Adaptive cruise control and Lane Assist are now provided by default in ETS 1.50. 4 | 5 | Files: 6 | - directkeys.py: Exports keycodes for use in tacc.py 7 | - directkeys-truck.py: Exports keycodes for use in tacc-trucks.py 8 | - grabscreen.py: Library 9 | - tacc.py: Main script for use with cars (full "Autopilot") 10 | - tacc-trucks.py: Main script for use with trucks (TACC only) 11 | - PYTHONPATH.txt: Adds Tensorflow to PATH 12 | - steering.jpg: Required steering settings in ETS 13 | - .wav files: Sounds used in Freeway Assist 14 | 15 | This project uses some components from: 16 | - https://pythonprogramming.net/detecting-distances-self-driving-car/ 17 | - https://github.com/JackPaul3413/self-driving-car-using-opencv-test-with-forza-horizon/tree/master 18 | -------------------------------------------------------------------------------- /grabscreen.py: -------------------------------------------------------------------------------- 1 | # Done by Frannecklp 2 | 3 | import cv2 4 | import numpy as np 5 | import win32gui, win32ui, win32con, win32api 6 | 7 | def grab_screen(region=None): 8 | 9 | hwin = win32gui.GetDesktopWindow() 10 | 11 | if region: 12 | left,top,x2,y2 = region 13 | #width = x2 - left + 1 14 | #height = y2 - top + 1 15 | width = x2 16 | height = y2 17 | else: 18 | width = win32api.GetSystemMetrics(win32con.SM_CXVIRTUALSCREEN) 19 | height = win32api.GetSystemMetrics(win32con.SM_CYVIRTUALSCREEN) 20 | left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN) 21 | top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN) 22 | 23 | hwindc = win32gui.GetWindowDC(hwin) 24 | srcdc = win32ui.CreateDCFromHandle(hwindc) 25 | memdc = srcdc.CreateCompatibleDC() 26 | bmp = win32ui.CreateBitmap() 27 | bmp.CreateCompatibleBitmap(srcdc, width, height) 28 | memdc.SelectObject(bmp) 29 | memdc.BitBlt((0, 0), (width, height), srcdc, (left, top), win32con.SRCCOPY) 30 | 31 | signedIntsArray = bmp.GetBitmapBits(True) 32 | img = np.fromstring(signedIntsArray, dtype='uint8') 33 | img.shape = (height,width,4) 34 | 35 | srcdc.DeleteDC() 36 | memdc.DeleteDC() 37 | win32gui.ReleaseDC(hwin, hwindc) 38 | win32gui.DeleteObject(bmp.GetHandle()) 39 | 40 | return cv2.cvtColor(img, cv2.COLOR_BGRA2RGB) -------------------------------------------------------------------------------- /directkeys-truck.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Dec 28 09:12:54 2018 4 | 5 | @author: jack 6 | """ 7 | 8 | import ctypes 9 | import time 10 | 11 | SendInput = ctypes.windll.user32.SendInput 12 | 13 | 14 | W = 0x11 15 | A = 0x1E 16 | S = 0x1F 17 | D = 0x20 18 | E = 0x12 19 | R = 0x13 20 | C = 0x2E 21 | UP = 0xc8 22 | SPACE = 0x39 23 | ONE = 0x02 24 | Z = 0x2C 25 | X = 0x2D 26 | 27 | # C struct redefinitions 28 | PUL = ctypes.POINTER(ctypes.c_ulong) 29 | class KeyBdInput(ctypes.Structure): 30 | _fields_ = [("wVk", ctypes.c_ushort), 31 | ("wScan", ctypes.c_ushort), 32 | ("dwFlags", ctypes.c_ulong), 33 | ("time", ctypes.c_ulong), 34 | ("dwExtraInfo", PUL)] 35 | 36 | class HardwareInput(ctypes.Structure): 37 | _fields_ = [("uMsg", ctypes.c_ulong), 38 | ("wParamL", ctypes.c_short), 39 | ("wParamH", ctypes.c_ushort)] 40 | 41 | class MouseInput(ctypes.Structure): 42 | _fields_ = [("dx", ctypes.c_long), 43 | ("dy", ctypes.c_long), 44 | ("mouseData", ctypes.c_ulong), 45 | ("dwFlags", ctypes.c_ulong), 46 | ("time",ctypes.c_ulong), 47 | ("dwExtraInfo", PUL)] 48 | 49 | class Input_I(ctypes.Union): 50 | _fields_ = [("ki", KeyBdInput), 51 | ("mi", MouseInput), 52 | ("hi", HardwareInput)] 53 | 54 | class Input(ctypes.Structure): 55 | _fields_ = [("type", ctypes.c_ulong), 56 | ("ii", Input_I)] 57 | 58 | # Actuals Functions 59 | 60 | def PressKey(hexKeyCode): 61 | extra = ctypes.c_ulong(0) 62 | ii_ = Input_I() 63 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) ) 64 | x = Input( ctypes.c_ulong(1), ii_ ) 65 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 66 | 67 | def ReleaseKey(hexKeyCode): 68 | extra = ctypes.c_ulong(0) 69 | ii_ = Input_I() 70 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) ) 71 | x = Input( ctypes.c_ulong(1), ii_ ) 72 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 73 | 74 | if __name__ == '__main__': 75 | PressKey(0x11) 76 | time.sleep(1) 77 | ReleaseKey(0x11) 78 | time.sleep(1) -------------------------------------------------------------------------------- /directkeys.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Dec 28 09:12:54 2018 4 | 5 | @author: jack 6 | """ 7 | 8 | import ctypes 9 | import time 10 | 11 | SendInput = ctypes.windll.user32.SendInput 12 | 13 | 14 | W = 0x11 15 | A = 0x1E 16 | S = 0x1F 17 | D = 0x20 18 | E = 0x12 19 | R = 0x13 20 | C = 0x2E 21 | UP = 0x27 22 | DOWN = 0xd0 23 | SPACE = 0x39 24 | ONE = 0x02 25 | Z = 0x2C 26 | X = 0x2D 27 | SHIFTUP = 0x34 28 | SHIFTDOWN = 0x33 29 | 30 | # C struct redefinitions 31 | PUL = ctypes.POINTER(ctypes.c_ulong) 32 | class KeyBdInput(ctypes.Structure): 33 | _fields_ = [("wVk", ctypes.c_ushort), 34 | ("wScan", ctypes.c_ushort), 35 | ("dwFlags", ctypes.c_ulong), 36 | ("time", ctypes.c_ulong), 37 | ("dwExtraInfo", PUL)] 38 | 39 | class HardwareInput(ctypes.Structure): 40 | _fields_ = [("uMsg", ctypes.c_ulong), 41 | ("wParamL", ctypes.c_short), 42 | ("wParamH", ctypes.c_ushort)] 43 | 44 | class MouseInput(ctypes.Structure): 45 | _fields_ = [("dx", ctypes.c_long), 46 | ("dy", ctypes.c_long), 47 | ("mouseData", ctypes.c_ulong), 48 | ("dwFlags", ctypes.c_ulong), 49 | ("time",ctypes.c_ulong), 50 | ("dwExtraInfo", PUL)] 51 | 52 | class Input_I(ctypes.Union): 53 | _fields_ = [("ki", KeyBdInput), 54 | ("mi", MouseInput), 55 | ("hi", HardwareInput)] 56 | 57 | class Input(ctypes.Structure): 58 | _fields_ = [("type", ctypes.c_ulong), 59 | ("ii", Input_I)] 60 | 61 | # Actuals Functions 62 | 63 | def PressKey(hexKeyCode): 64 | extra = ctypes.c_ulong(0) 65 | ii_ = Input_I() 66 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) ) 67 | x = Input( ctypes.c_ulong(1), ii_ ) 68 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 69 | 70 | def ReleaseKey(hexKeyCode): 71 | extra = ctypes.c_ulong(0) 72 | ii_ = Input_I() 73 | ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) ) 74 | x = Input( ctypes.c_ulong(1), ii_ ) 75 | ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x)) 76 | 77 | if __name__ == '__main__': 78 | PressKey(0x11) 79 | time.sleep(1) 80 | ReleaseKey(0x11) 81 | time.sleep(1) -------------------------------------------------------------------------------- /tacc-trucks.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # # Object Detection Demo 3 | # License: Apache License 2.0 (https://github.com/tensorflow/models/blob/master/LICENSE) 4 | # source: https://github.com/tensorflow/models 5 | import numpy as np 6 | import os 7 | import six.moves.urllib as urllib 8 | import sys 9 | import tarfile 10 | import tensorflow as tf 11 | import zipfile 12 | import win32gui 13 | import win32con 14 | import keyboard 15 | import time 16 | from directkeys import PressKey, ReleaseKey, W, A, S, D, E, Z, X, R, C, UP, ONE, SPACE 17 | from collections import defaultdict 18 | from io import StringIO 19 | from matplotlib import pyplot as plt 20 | from PIL import Image 21 | from grabscreen import grab_screen 22 | from playsound import playsound 23 | import cv2 24 | #autosteer START 25 | import math 26 | #select the region of interest for the detected edges 27 | def roi(image, polygons): 28 | mask = np.zeros_like(image) 29 | cv2.fillPoly(mask, polygons, 255) 30 | masked = cv2.bitwise_and(image, mask) 31 | return masked 32 | 33 | #display the lines on the screen 34 | def display_line(image, line): 35 | line_image = np.zeros_like(image) 36 | if lines is not None: 37 | for line in lines: 38 | x1, y1, x2, y2 = line[0] 39 | cv2.line(line_image,(x1,y1),(x2,y2),(0,255,0),10) 40 | return line_image 41 | 42 | #processing image for detecting edge using canny edge detection and blur the image using gaussian blur 43 | def proceesed_img(original_image): 44 | proceesed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY) 45 | proceesed_img = cv2.GaussianBlur(proceesed_img,(5,5), 0) 46 | proceesed_img = cv2.Canny(proceesed_img, threshold1 =150, threshold2 = 163 ) 47 | #TRUCKONLY these polygon repressent the data point within with the pixel data are selected for lane detection 48 | #cockpit view 49 | polygons = np.array([[200,345],[650,345],[650,380],[200,380]]) 50 | proceesed_img = roi(proceesed_img, [polygons]) 51 | return proceesed_img 52 | 53 | #this funtions sends the input to the game which is running on left side of screen 54 | def straight(): 55 | ReleaseKey(A) 56 | ReleaseKey(D) 57 | 58 | def little_left(): 59 | #indicate start 60 | #PressKey(Z) 61 | #ReleaseKey(Z) 62 | #indicate end 63 | PressKey(A) 64 | time.sleep(0.03) 65 | ReleaseKey(A) 66 | time.sleep(0.01) 67 | 68 | def little_right(): 69 | #indicate start 70 | #PressKey(X) 71 | #ReleaseKey(X) 72 | #indicate end 73 | PressKey(D) 74 | time.sleep(0.03) 75 | ReleaseKey(D) 76 | time.sleep(0.01) 77 | #autosteer END 78 | 79 | 80 | # This is needed since the notebook is stored in the object_detection folder. 81 | sys.path.append("..") 82 | 83 | # ## Object detection imports 84 | # Here are the imports from the object detection module. 85 | 86 | from object_detection.utils import label_map_util 87 | 88 | from object_detection.utils import visualization_utils as vis_util 89 | 90 | # # Model preparation 91 | # What model to download. 92 | #MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' 93 | #MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29' 94 | MODEL_NAME = 'ssd_inception_v2_coco_2018_01_28' 95 | MODEL_FILE = MODEL_NAME + '.tar.gz' 96 | DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' 97 | 98 | # Path to frozen detection graph. This is the actual model that is used for the object detection. 99 | PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' 100 | 101 | # List of the strings that is used to add correct label for each box. 102 | PATH_TO_LABELS = os.path.join('D:/Documents/tensorflow/models/research/object_detection/data', 'mscoco_label_map.pbtxt') 103 | 104 | NUM_CLASSES = 90 105 | 106 | # ## Download Model 107 | opener = urllib.request.URLopener() 108 | opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE) 109 | tar_file = tarfile.open(MODEL_FILE) 110 | for file in tar_file.getmembers(): 111 | file_name = os.path.basename(file.name) 112 | if 'frozen_inference_graph.pb' in file_name: 113 | tar_file.extract(file, os.getcwd()) 114 | 115 | 116 | # ## Load a (frozen) Tensorflow model into memory. 117 | detection_graph = tf.Graph() 118 | with detection_graph.as_default(): 119 | od_graph_def = tf.GraphDef() 120 | with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: 121 | serialized_graph = fid.read() 122 | od_graph_def.ParseFromString(serialized_graph) 123 | tf.import_graph_def(od_graph_def, name='') 124 | 125 | # define variable for later 126 | x = False 127 | y = False 128 | apx_stored = 0.0 129 | autosteerEnabled = True 130 | directionLeft = 0 131 | directionRight = 0 132 | directionStraight = 1 133 | cars = 1 134 | 135 | # ## Loading label map 136 | # Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine 137 | label_map = label_map_util.load_labelmap(PATH_TO_LABELS) 138 | categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) 139 | category_index = label_map_util.create_category_index(categories) 140 | 141 | # ## Helper code 142 | def load_image_into_numpy_array(image): 143 | (im_width, im_height) = image.size 144 | return np.array(image.getdata()).reshape( 145 | (im_height, im_width, 3)).astype(np.uint8) 146 | 147 | # Size, in inches, of the output images. 148 | IMAGE_SIZE = (12, 8) 149 | 150 | 151 | 152 | with detection_graph.as_default(): 153 | with tf.Session(graph=detection_graph) as sess: 154 | while True: 155 | #screen = cv2.resize(grab_screen(region=(0,40,1280,745)), (800,450)) 156 | #200 157 | screen = cv2.resize(grab_screen(region=(536,225,1056,530)), (1056,530)) 158 | image_np = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB) 159 | # Expand dimensions since the model expects images to have shape: [1, None, None, 3] 160 | image_np_expanded = np.expand_dims(image_np, axis=0) 161 | image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') 162 | # Each box represents a part of the image where a particular object was detected. 163 | boxes = detection_graph.get_tensor_by_name('detection_boxes:0') 164 | # Each score represent how level of confidence for each of the objects. 165 | # Score is shown on the result image, together with the class label. 166 | scores = detection_graph.get_tensor_by_name('detection_scores:0') 167 | classes = detection_graph.get_tensor_by_name('detection_classes:0') 168 | num_detections = detection_graph.get_tensor_by_name('num_detections:0') 169 | # Actual detection. 170 | (boxes, scores, classes, num_detections) = sess.run( 171 | [boxes, scores, classes, num_detections], 172 | feed_dict={image_tensor: image_np_expanded}) 173 | # Visualization of the results of a detection. 174 | vis_util.visualize_boxes_and_labels_on_image_array( 175 | image_np, 176 | np.squeeze(boxes), 177 | np.squeeze(classes).astype(np.int32), 178 | np.squeeze(scores), 179 | category_index, 180 | use_normalized_coordinates=True, 181 | line_thickness=8) 182 | 183 | for i,b in enumerate(boxes[0]): 184 | 185 | # Main detection 1 person (mop up) 3 car 6 bus 8 truck 7 train (tanker) 37 sports ball (tanker) 11 fire hydrant (weird trucks) 73 laptop (tankers) 61 cake (buses sometimes) 77 cell phone (some cars from TRUCKONLY perspective) 186 | if classes[0][i] == 1 or classes[0][i] == 3 or classes[0][i] == 6 or classes[0][i] == 8 or classes[0][i] == 7 or classes[0][i] == 37 or classes[0][i] == 11 or classes[0][i] == 73 or classes[0][i] == 61 or classes[0][i] == 77: 187 | cars = 1 188 | if scores[0][i] >= 0.35: 189 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 190 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 191 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 192 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 193 | 194 | # TRUCKONLY midx right has been reduced across all & apx dist raised across cc adjusts not friction code. 195 | # If I see something between distances 0.5 and 0.7 within my AOI, I should slow 196 | if apx_distance <=0.8 and apx_distance >0.6: 197 | if mid_x > 0.4 and mid_x < 0.5: 198 | if x == True or y == True: 199 | ReleaseKey(W) 200 | PressKey(S) 201 | 202 | # If I see something closer than distance 0.5 within my AOI, friction brake then re-engage tacc 203 | elif apx_distance <=0.6 and apx_distance >=0.1: 204 | if mid_x > 0.4 and mid_x < 0.5: 205 | if x == True or y == True: 206 | if apx_stored != apx_distance: 207 | playsound('fcws.wav') 208 | ReleaseKey(W) 209 | PressKey(SPACE) 210 | ReleaseKey(SPACE) 211 | time.sleep(1.5) 212 | PressKey(SPACE) 213 | ReleaseKey(SPACE) 214 | PressKey(ONE) 215 | ReleaseKey(ONE) 216 | PressKey(UP) 217 | time.sleep(0.2) 218 | ReleaseKey(UP) 219 | PressKey(R) 220 | ReleaseKey(R) 221 | apx_stored = apx_distance 222 | time.sleep(2) 223 | 224 | # If I see something greater than distance 0.7 in my AOI, accelerate 225 | elif apx_distance >0.8: 226 | if mid_x > 0.4 and mid_x < 0.5: 227 | if x == True or y == True: 228 | ReleaseKey(S) 229 | PressKey(W) 230 | 231 | # Traffic light detection 232 | '''elif classes[0][i] == 10: 233 | cars = 0 234 | if scores[0][i] >= 0.5: 235 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 236 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 237 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 238 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 239 | 240 | if apx_distance <=1.0: 241 | if x == True or y == True: 242 | if apx_stored != apx_distance and cars == 0: 243 | # Wait before applying friction brakes as we see it early 244 | ReleaseKey(W) 245 | playsound('warning.wav') 246 | time.sleep(2) 247 | # Apply friction brakes when closer for 2 secs, check still engaged 248 | if x == True or y == True and cars == 0: 249 | PressKey(SPACE) 250 | ReleaseKey(SPACE) 251 | time.sleep(2) 252 | # Release friction brakes, reset view and cancel AP 253 | PressKey(SPACE) 254 | ReleaseKey(SPACE) 255 | PressKey(ONE) 256 | ReleaseKey(ONE) 257 | apx_stored = apx_distance 258 | x = False 259 | y = False 260 | playsound('off.wav') 261 | 262 | # Stop sign detection 263 | elif classes[0][i] == 13: 264 | cars = 0 265 | if scores[0][i] >= 0.5: 266 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 267 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 268 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 269 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 270 | 271 | if apx_distance <=1.0: 272 | if x == False or y == False and cars == 0: 273 | if apx_stored != apx_distance and cars == 0: 274 | playsound('warning.wav') 275 | time.sleep(2) 276 | apx_stored = apx_distance''' 277 | 278 | cv2.imshow('window',cv2.resize(image_np,(1056,530))) 279 | 280 | if keyboard.is_pressed('z'): 281 | if x == True: # lane change to the left 282 | autosteerEnabled = False 283 | PressKey(A) 284 | time.sleep(0.5) 285 | ReleaseKey(A) 286 | PressKey(D) 287 | time.sleep(0.6) 288 | ReleaseKey(D) 289 | PressKey(Z) 290 | ReleaseKey(Z) 291 | autosteerEnabled = True 292 | 293 | elif keyboard.is_pressed('x'): 294 | if x == True: # lane change to the right 295 | autosteerEnabled = False 296 | PressKey(D) 297 | time.sleep(0.5) 298 | ReleaseKey(D) 299 | PressKey(A) 300 | time.sleep(0.6) 301 | ReleaseKey(A) 302 | PressKey(X) 303 | ReleaseKey(X) 304 | autosteerEnabled = True 305 | 306 | elif keyboard.is_pressed('n'): 307 | if y == False: 308 | y = True 309 | x = False 310 | playsound('on-tacc.wav') 311 | elif y == True: 312 | y = False 313 | playsound('off.wav') 314 | 315 | '''elif keyboard.is_pressed('c'): 316 | if x == False: 317 | x = True 318 | y = False 319 | playsound('on.wav') 320 | elif x == True: 321 | x = False 322 | playsound('off.wav')''' 323 | 324 | #autosteer START 325 | if x == True: 326 | new_image = proceesed_img(screen) 327 | lines = cv2.HoughLinesP(new_image, 2 ,np.pi/180, 10,np.array([]), minLineLength = 0.005, maxLineGap = 50) 328 | left_coordinate = [] 329 | right_coordinate = [] 330 | 331 | if lines is not None: 332 | for line in lines: 333 | x1,y1,x2,y2 = line[0] 334 | slope = (x2-x1)/(y2-y1) 335 | if slope<0: 336 | left_coordinate.append([x1,y1,x2,y2]) 337 | elif slope>0: 338 | right_coordinate.append([x1,y1,x2,y2]) 339 | l_avg = np.average(left_coordinate, axis =0) 340 | r_avg = np.average(right_coordinate, axis =0) 341 | l =l_avg.tolist() 342 | r = r_avg.tolist() 343 | try: 344 | #with the found slope and intercept, this is used to find the value of point x on both left and right line 345 | #the center point is denoted by finding center distance between two lines 346 | c1,d1,c2,d2 = r 347 | a1,b1, a2,b2 = l 348 | l_slope = (b2-b1)/(a2-a1) 349 | r_slope = (d2-d1)/(c2-c1) 350 | l_intercept = b1 - (l_slope*a1) 351 | r_intercept = d1 - (r_slope*c1) 352 | y=360 353 | l_x = (y - l_intercept)/l_slope 354 | r_x = (y - r_intercept)/r_slope 355 | distance = math.sqrt((r_x - l_x)**2+(y-y)**2) 356 | #line_center repressent the center point on the line 357 | line_center = distance/2 358 | center_pt =[(l_x+line_center)] 359 | #TRUCKONLY autosteer criteria for normal curves 360 | f_l = [(l_x+(line_center*1.06))] 361 | f_r = [(l_x+(line_center*0.02))] 362 | #TRUCKONLY create a center point. Higher = left bias. Lower = right bias. 363 | center_fixed =[373] 364 | x_1 = int(l_x) 365 | x_2 = int(r_x) 366 | '''The logic behind this code is simple, 367 | the center_fixed should be in the center_line. 368 | means the cars is in center of the lane, if its get away from center, 369 | then the left and right functions are used accordingly''' 370 | #straight 371 | if center_pt==center_fixed and autosteerEnabled == True: 372 | straight() 373 | directionLeft = 0 374 | directionRight = 0 375 | directionStraight = 1 376 | #normal curves 377 | elif center_fixed < f_r and autosteerEnabled == True: 378 | little_right() 379 | directionLeft = 0 380 | directionRight = 1 381 | directionStraight = 0 382 | elif center_fixed > f_l and autosteerEnabled == True: 383 | little_left() 384 | directionLeft = 1 385 | directionRight = 0 386 | directionStraight = 0 387 | #not sure 388 | else: 389 | straight() 390 | directionLeft = 0 391 | directionRight = 0 392 | directionStraight = 1 393 | except: 394 | #no lines 395 | pass 396 | if directionLeft == 1: 397 | directionLeft = 1 398 | directionRight = 0 399 | directionStraight = 0 400 | little_left() 401 | elif directionRight == 1: 402 | directionLeft = 0 403 | directionRight = 1 404 | directionStraight = 0 405 | little_right() 406 | elif directionStraight == 1: 407 | directionLeft = 0 408 | directionRight = 0 409 | directionStraight = 1 410 | straight() 411 | 412 | line_image = display_line(screen,lines) 413 | combo_image = cv2.addWeighted(screen,0.8, line_image,1.2,2) 414 | cv2.imshow('lane-detection',cv2.cvtColor(combo_image, cv2.COLOR_BGR2RGB)) 415 | #autosteer END 416 | 417 | if cv2.waitKey(25) & 0xFF == ord('q'): 418 | cv2.destroyAllWindows() 419 | break -------------------------------------------------------------------------------- /tacc.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | # # Object Detection Demo 3 | # License: Apache License 2.0 (https://github.com/tensorflow/models/blob/master/LICENSE) 4 | # source: https://github.com/tensorflow/models 5 | import numpy as np 6 | import os 7 | import six.moves.urllib as urllib 8 | import sys 9 | import tarfile 10 | import tensorflow as tf 11 | import zipfile 12 | import win32gui 13 | import win32con 14 | import keyboard 15 | import time 16 | from directkeys import PressKey, ReleaseKey, W, A, S, D, E, Z, X, R, C, UP, ONE, SPACE, DOWN, SHIFTUP, SHIFTDOWN 17 | from collections import defaultdict 18 | from io import StringIO 19 | from matplotlib import pyplot as plt 20 | from PIL import Image 21 | from grabscreen import grab_screen 22 | from playsound import playsound 23 | import cv2 24 | #autosteer START 25 | import math 26 | #select the region of interest for the detected edges 27 | def roi(image, polygons): 28 | mask = np.zeros_like(image) 29 | cv2.fillPoly(mask, polygons, 255) 30 | masked = cv2.bitwise_and(image, mask) 31 | return masked 32 | 33 | #display the lines on the screen 34 | def display_line(image, line): 35 | line_image = np.zeros_like(image) 36 | if lines is not None: 37 | for line in lines: 38 | x1, y1, x2, y2 = line[0] 39 | cv2.line(line_image,(x1,y1),(x2,y2),(0,255,0),10) 40 | return line_image 41 | 42 | #processing image for detecting edge using canny edge detection and blur the image using gaussian blur 43 | def proceesed_img(original_image): 44 | proceesed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY) 45 | proceesed_img = cv2.GaussianBlur(proceesed_img,(5,5), 0) 46 | proceesed_img = cv2.Canny(proceesed_img, threshold1 =150, threshold2 = 163 ) 47 | #these polygon repressent the data point within with the pixel data are selected for lane detection 48 | #cockpit view 49 | polygons = np.array([[200,395],[650,395],[650,430],[200,430]]) 50 | proceesed_img = roi(proceesed_img, [polygons]) 51 | return proceesed_img 52 | 53 | #this funtions sends the input to the game which is running on left side of screen 54 | def straight(): 55 | ReleaseKey(A) 56 | ReleaseKey(D) 57 | 58 | def little_left(): 59 | #indicate start 60 | #PressKey(Z) 61 | #ReleaseKey(Z) 62 | #indicate end 63 | PressKey(A) 64 | time.sleep(0.02) 65 | ReleaseKey(A) 66 | time.sleep(0.025) 67 | 68 | def little_right(): 69 | #indicate start 70 | #PressKey(X) 71 | #ReleaseKey(X) 72 | #indicate end 73 | PressKey(D) 74 | time.sleep(0.02) 75 | ReleaseKey(D) 76 | time.sleep(0.025) 77 | #autosteer END 78 | 79 | # This is needed since the notebook is stored in the object_detection folder. 80 | sys.path.append("..") 81 | 82 | # ## Object detection imports 83 | # Here are the imports from the object detection module. 84 | 85 | from object_detection.utils import label_map_util 86 | 87 | from object_detection.utils import visualization_utils as vis_util 88 | 89 | # # Model preparation 90 | # What model to download. 91 | #MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017' 92 | #MODEL_NAME = 'ssd_mobilenet_v2_coco_2018_03_29' 93 | MODEL_NAME = 'ssd_inception_v2_coco_2018_01_28' 94 | MODEL_FILE = MODEL_NAME + '.tar.gz' 95 | DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' 96 | 97 | # Path to frozen detection graph. This is the actual model that is used for the object detection. 98 | PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb' 99 | 100 | # List of the strings that is used to add correct label for each box. 101 | PATH_TO_LABELS = os.path.join('D:/Documents/tensorflow/models/research/object_detection/data', 'mscoco_label_map.pbtxt') 102 | 103 | NUM_CLASSES = 90 104 | 105 | # ## Download Model 106 | opener = urllib.request.URLopener() 107 | opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE) 108 | tar_file = tarfile.open(MODEL_FILE) 109 | for file in tar_file.getmembers(): 110 | file_name = os.path.basename(file.name) 111 | if 'frozen_inference_graph.pb' in file_name: 112 | tar_file.extract(file, os.getcwd()) 113 | 114 | 115 | # ## Load a (frozen) Tensorflow model into memory. 116 | detection_graph = tf.Graph() 117 | with detection_graph.as_default(): 118 | od_graph_def = tf.GraphDef() 119 | with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid: 120 | serialized_graph = fid.read() 121 | od_graph_def.ParseFromString(serialized_graph) 122 | tf.import_graph_def(od_graph_def, name='') 123 | 124 | # define variable for later 125 | x = False 126 | y = False 127 | apx_stored = 0.0 128 | autosteerEnabled = True 129 | directionLeft = 0 130 | directionRight = 0 131 | directionStraight = 1 132 | cars = 1 133 | 134 | # ## Loading label map 135 | # Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine 136 | label_map = label_map_util.load_labelmap(PATH_TO_LABELS) 137 | categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) 138 | category_index = label_map_util.create_category_index(categories) 139 | 140 | # ## Helper code 141 | def load_image_into_numpy_array(image): 142 | (im_width, im_height) = image.size 143 | return np.array(image.getdata()).reshape( 144 | (im_height, im_width, 3)).astype(np.uint8) 145 | 146 | # Size, in inches, of the output images. 147 | IMAGE_SIZE = (12, 8) 148 | 149 | 150 | 151 | with detection_graph.as_default(): 152 | with tf.Session(graph=detection_graph) as sess: 153 | while True: 154 | #screen = cv2.resize(grab_screen(region=(0,40,1280,745)), (800,450)) 155 | screen = cv2.resize(grab_screen(region=(536,98,1056,530)), (1056,530)) 156 | image_np = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB) 157 | # Expand dimensions since the model expects images to have shape: [1, None, None, 3] 158 | image_np_expanded = np.expand_dims(image_np, axis=0) 159 | image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') 160 | # Each box represents a part of the image where a particular object was detected. 161 | boxes = detection_graph.get_tensor_by_name('detection_boxes:0') 162 | # Each score represent how level of confidence for each of the objects. 163 | # Score is shown on the result image, together with the class label. 164 | scores = detection_graph.get_tensor_by_name('detection_scores:0') 165 | classes = detection_graph.get_tensor_by_name('detection_classes:0') 166 | num_detections = detection_graph.get_tensor_by_name('num_detections:0') 167 | # Actual detection. 168 | (boxes, scores, classes, num_detections) = sess.run( 169 | [boxes, scores, classes, num_detections], 170 | feed_dict={image_tensor: image_np_expanded}) 171 | # Visualization of the results of a detection. 172 | vis_util.visualize_boxes_and_labels_on_image_array( 173 | image_np, 174 | np.squeeze(boxes), 175 | np.squeeze(classes).astype(np.int32), 176 | np.squeeze(scores), 177 | category_index, 178 | use_normalized_coordinates=True, 179 | line_thickness=8) 180 | 181 | for i,b in enumerate(boxes[0]): 182 | 183 | # Main detection 1 person (mop up) 3 car 6 bus 8 truck 7 train (tanker) 37 sports ball (tanker) 11 fire hydrant (weird trucks) 73 laptop (tankers) 61 cake (buses sometimes) 184 | if classes[0][i] == 1 or classes[0][i] == 3 or classes[0][i] == 6 or classes[0][i] == 8 or classes[0][i] == 7 or classes[0][i] == 37 or classes[0][i] == 11 or classes[0][i] == 73 or classes[0][i] == 61: 185 | cars = 1 186 | if scores[0][i] >= 0.35: 187 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 188 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 189 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 190 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 191 | 192 | # If I see something between distances 0.5 and 0.7 within my AOI, I should slow 193 | if apx_distance <=0.7 and apx_distance >0.5: 194 | if mid_x > 0.4 and mid_x < 0.55: 195 | if x == True or y == True: 196 | ReleaseKey(W) 197 | PressKey(S) 198 | 199 | # If I see something closer than distance 0.5 within my AOI, friction brake then re-engage tacc 200 | elif apx_distance <=0.5 and apx_distance >=0.1: 201 | if mid_x > 0.4 and mid_x < 0.55: 202 | if x == True or y == True: 203 | if apx_stored != apx_distance: 204 | playsound('fcws.wav') 205 | ReleaseKey(W) 206 | PressKey(SPACE) 207 | ReleaseKey(SPACE) 208 | time.sleep(1.5) 209 | PressKey(SPACE) 210 | ReleaseKey(SPACE) 211 | PressKey(ONE) 212 | ReleaseKey(ONE) 213 | PressKey(UP) 214 | time.sleep(0.2) 215 | ReleaseKey(UP) 216 | PressKey(R) 217 | ReleaseKey(R) 218 | apx_stored = apx_distance 219 | time.sleep(2) 220 | 221 | # If I see something greater than distance 0.7 in my AOI, accelerate 222 | elif apx_distance >0.7: 223 | if mid_x > 0.4 and mid_x < 0.55: 224 | if x == True or y == True: 225 | ReleaseKey(S) 226 | PressKey(W) 227 | 228 | # Traffic light detection 229 | elif classes[0][i] == 10: 230 | cars = 0 231 | if scores[0][i] >= 0.8: 232 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 233 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 234 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 235 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 236 | 237 | if apx_distance <=1.0: 238 | if x == True or y == True: 239 | if apx_stored != apx_distance and cars == 0: 240 | # Wait before applying friction brakes as we see it early 241 | ReleaseKey(W) 242 | playsound('warning.wav') 243 | time.sleep(2) 244 | # Apply friction brakes when closer for 2 secs, check still engaged 245 | if x == True or y == True and cars == 0: 246 | PressKey(SPACE) 247 | ReleaseKey(SPACE) 248 | time.sleep(2) 249 | # Release friction brakes, reset view and cancel AP 250 | PressKey(SPACE) 251 | ReleaseKey(SPACE) 252 | PressKey(ONE) 253 | ReleaseKey(ONE) 254 | apx_stored = apx_distance 255 | x = False 256 | y = False 257 | playsound('off.wav') 258 | 259 | # Stop sign detection 260 | elif classes[0][i] == 13: 261 | cars = 0 262 | if scores[0][i] >= 0.5: 263 | mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 264 | mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 265 | apx_distance = round(((1 - (boxes[0][i][3] - boxes[0][i][1]))**4),1) 266 | cv2.putText(image_np, '{}'.format(apx_distance), (int(mid_x*1056),int(mid_y*530)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255), 2) 267 | 268 | if apx_distance <=1.0: 269 | if x == False or y == False and cars == 0: 270 | if apx_stored != apx_distance and cars == 0: 271 | playsound('warning.wav') 272 | time.sleep(2) 273 | apx_stored = apx_distance 274 | 275 | cv2.imshow('window',cv2.resize(image_np,(1056,530))) 276 | 277 | if keyboard.is_pressed('p'): 278 | PressKey(D) 279 | time.sleep(2) 280 | PressKey(UP) 281 | time.sleep(2.3) 282 | ReleaseKey(D) 283 | ReleaseKey(UP) 284 | PressKey(SPACE) 285 | ReleaseKey(SPACE) 286 | time.sleep(2) 287 | PressKey(A) 288 | time.sleep(1.6) 289 | ReleaseKey(A) 290 | PressKey(SHIFTDOWN) 291 | ReleaseKey(SHIFTDOWN) 292 | time.sleep(0.5) 293 | PressKey(SHIFTDOWN) 294 | ReleaseKey(SHIFTDOWN) 295 | time.sleep(2) 296 | PressKey(SPACE) 297 | ReleaseKey(SPACE) 298 | PressKey(UP) 299 | time.sleep(1.1) 300 | ReleaseKey(UP) 301 | PressKey(SPACE) 302 | ReleaseKey(SPACE) 303 | 304 | elif keyboard.is_pressed('z'): 305 | if x == True: # lane change to the left 306 | autosteerEnabled = False 307 | PressKey(A) 308 | time.sleep(0.5) 309 | ReleaseKey(A) 310 | PressKey(D) 311 | time.sleep(0.6) 312 | ReleaseKey(D) 313 | PressKey(Z) 314 | ReleaseKey(Z) 315 | autosteerEnabled = True 316 | 317 | elif keyboard.is_pressed('x'): 318 | if x == True: # lane change to the right 319 | autosteerEnabled = False 320 | PressKey(D) 321 | time.sleep(0.5) 322 | ReleaseKey(D) 323 | PressKey(A) 324 | time.sleep(0.6) 325 | ReleaseKey(A) 326 | PressKey(X) 327 | ReleaseKey(X) 328 | autosteerEnabled = True 329 | 330 | elif keyboard.is_pressed('c'): 331 | if x == False: 332 | x = True 333 | y = False 334 | playsound('on.wav') 335 | elif x == True: 336 | x = False 337 | playsound('off.wav') 338 | 339 | elif keyboard.is_pressed('n'): 340 | if y == False: 341 | y = True 342 | x = False 343 | playsound('on-tacc.wav') 344 | elif y == True: 345 | y = False 346 | playsound('off.wav') 347 | 348 | #autosteer START 349 | if x == True: 350 | new_image = proceesed_img(screen) 351 | lines = cv2.HoughLinesP(new_image, 2 ,np.pi/180, 10,np.array([]), minLineLength = 0.005, maxLineGap = 50) 352 | left_coordinate = [] 353 | right_coordinate = [] 354 | 355 | if lines is not None: 356 | for line in lines: 357 | x1,y1,x2,y2 = line[0] 358 | slope = (x2-x1)/(y2-y1) 359 | if slope<0: 360 | left_coordinate.append([x1,y1,x2,y2]) 361 | elif slope>0: 362 | right_coordinate.append([x1,y1,x2,y2]) 363 | l_avg = np.average(left_coordinate, axis =0) 364 | r_avg = np.average(right_coordinate, axis =0) 365 | l =l_avg.tolist() 366 | r = r_avg.tolist() 367 | try: 368 | #with the found slope and intercept, this is used to find the value of point x on both left and right line 369 | #the center point is denoted by finding center distance between two lines 370 | c1,d1,c2,d2 = r 371 | a1,b1, a2,b2 = l 372 | l_slope = (b2-b1)/(a2-a1) 373 | r_slope = (d2-d1)/(c2-c1) 374 | l_intercept = b1 - (l_slope*a1) 375 | r_intercept = d1 - (r_slope*c1) 376 | y=360 377 | l_x = (y - l_intercept)/l_slope 378 | r_x = (y - r_intercept)/r_slope 379 | distance = math.sqrt((r_x - l_x)**2+(y-y)**2) 380 | #line_center repressent the center point on the line 381 | line_center = distance/2 382 | center_pt =[(l_x+line_center)] 383 | # autosteer criteria for normal curves 384 | f_l = [(l_x+(line_center*1.017))] 385 | f_r = [(l_x+(line_center*0.03))] 386 | #create a center point which is fixed technically 375, but we set higher to engineer out right bias 387 | center_fixed =[390] 388 | x_1 = int(l_x) 389 | x_2 = int(r_x) 390 | '''The logic behind this code is simple, 391 | the center_fixed should be in the center_line. 392 | means the cars is in center of the lane, if its get away from center, 393 | then the left and right functions are used accordingly''' 394 | #straight 395 | if center_pt==center_fixed and autosteerEnabled == True: 396 | straight() 397 | directionLeft = 0 398 | directionRight = 0 399 | directionStraight = 1 400 | #normal curves 401 | elif center_fixed < f_r and autosteerEnabled == True: 402 | little_right() 403 | directionLeft = 0 404 | directionRight = 1 405 | directionStraight = 0 406 | elif center_fixed > f_l and autosteerEnabled == True: 407 | little_left() 408 | directionLeft = 1 409 | directionRight = 0 410 | directionStraight = 0 411 | #not sure 412 | else: 413 | straight() 414 | directionLeft = 0 415 | directionRight = 0 416 | directionStraight = 1 417 | except: 418 | #no lines 419 | pass 420 | if directionLeft == 1: 421 | directionLeft = 1 422 | directionRight = 0 423 | directionStraight = 0 424 | little_left() 425 | elif directionRight == 1: 426 | directionLeft = 0 427 | directionRight = 1 428 | directionStraight = 0 429 | little_right() 430 | elif directionStraight == 1: 431 | directionLeft = 0 432 | directionRight = 0 433 | directionStraight = 1 434 | straight() 435 | 436 | line_image = display_line(screen,lines) 437 | combo_image = cv2.addWeighted(screen,0.8, line_image,1.2,2) 438 | cv2.imshow('lane-detection',cv2.cvtColor(combo_image, cv2.COLOR_BGR2RGB)) 439 | #autosteer END 440 | 441 | if cv2.waitKey(25) & 0xFF == ord('q'): 442 | cv2.destroyAllWindows() 443 | break --------------------------------------------------------------------------------