├── Raspberrypi_sensor ├── RFID.py └── Ultra.py ├── YOLO ├── parameter.py ├── Streaming.py ├── Model2.py ├── training.py ├── line_object.py └── utils.py ├── Lane Detection ├── line_stream.py └── line.py ├── LICENSE ├── README.md └── Raspberrypi_control └── Control.py /Raspberrypi_sensor/RFID.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import RPi.GPIO as GPIO 3 | import socket, time 4 | import MFRC522 5 | from multiprocessing import Process, Queue 6 | 7 | 8 | ip = '192.168.43.160' 9 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 10 | server_address = (ip, 3444) 11 | sock.connect(server_address) 12 | print("sensor connected... port : 3444") 13 | 14 | MIFAREReader = MFRC522.MFRC522() 15 | print("RFID") 16 | while True: 17 | (status, TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL) 18 | if status == MIFAREReader.MI_OK: 19 | print("STOP") 20 | sock.send("stop".encode()) 21 | time.sleep(5) 22 | sock.send("gogo".encode()) 23 | time.sleep(5) 24 | 25 | GPIO.cleanup() 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /YOLO/parameter.py: -------------------------------------------------------------------------------- 1 | #LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'traffic_light', 'street_sign', 'stop_sign', 'parking_meter'] 2 | #LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign'] 3 | LABELS = ['person', 'car', 'traffic_light', 'stop_sign'] 4 | WIDTH = 800 5 | HEIGHT = 600 6 | 7 | COLORS = [(43,206,72),(255,204,153),(120,150,0),(148,255,181)] 8 | 9 | input_shape = (416, 416, 3) 10 | NORM_H, NORM_W = 416, 416 11 | GRID_H, GRID_W = 13 , 13 12 | BATCH_SIZE = 8 13 | BOX = 5 14 | CLASS = len(LABELS) 15 | THRESHOLD = 0.65 16 | ANCHORS = '1.08,1.19, 3.42,4.41, 6.63,11.38, 9.42,5.11, 16.62,10.52' 17 | ANCHORS = [float(ANCHORS.strip()) for ANCHORS in ANCHORS.split(',')] 18 | SCALE_NOOB, SCALE_CONF, SCALE_COOR, SCALE_PROB = 0.5, 5.0, 5.0, 1.0 19 | -------------------------------------------------------------------------------- /Lane Detection/line_stream.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import socket 3 | from line import get_lane 4 | from utils import Rotate 5 | 6 | width, height = 800, 600 7 | ip = '192.168.43.160' 8 | 9 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 10 | server_address = (ip, 3442) 11 | sock.connect(server_address) 12 | 13 | cap = cv2.VideoCapture(1) 14 | #cap = cv2.VideoCapture(1) 15 | print(cap.isOpened()) 16 | 17 | while cap.isOpened(): 18 | ret, frame = cap.read() 19 | 20 | frame = cv2.resize(frame, (width, height)) 21 | 22 | #Gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 23 | #Gray = cv2.bitwise_not(Gray) 24 | 25 | try: 26 | frame, point = get_lane(frame) 27 | print(point) 28 | sock.send(point.encode()) 29 | except: 30 | print("error") 31 | pass 32 | 33 | cv2.imshow('a', frame) 34 | if cv2.waitKey(1) & 0xFF == ord('q'): 35 | break 36 | 37 | cv2.destroyAllWindows() 38 | cap.release() 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Beomyoung Kim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Raspberrypi_sensor/Ultra.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import RPi.GPIO as GPIO 3 | import socket,time 4 | 5 | ip = '192.168.43.160' 6 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 7 | server_address = (ip, 3445) 8 | sock.connect(server_address) 9 | print("sensor connected... port : 3445") 10 | 11 | 12 | GPIO.setmode(GPIO.BOARD) 13 | try : 14 | trig = 18 15 | echo = 16 16 | GPIO.setup(trig, GPIO.OUT) 17 | GPIO.setup(echo, GPIO.IN) 18 | print("Ultra") 19 | while True: 20 | GPIO.output(trig, False) 21 | time.sleep(0.5) 22 | GPIO.output(trig, True) 23 | time.sleep(0.00001) 24 | GPIO.output(trig, False) 25 | 26 | while GPIO.input(echo) == 0: 27 | pulse_start = time.time() 28 | while GPIO.input(echo) == 1: 29 | pulse_end = time.time() 30 | 31 | pulse_duration = pulse_end - pulse_start 32 | distance = pulse_duration * 17000 33 | distance = round(distance, 2) 34 | if (distance < 14): 35 | sock.send("stop".encode()) 36 | print("Obstacle detect!") 37 | else: 38 | sock.send("gogo".encode()) 39 | print("distance : ", distance) 40 | 41 | except : 42 | GPIO.cleanup() 43 | 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Alpha-Car 2 | 3 | 4 | ### Members 5 | - 이용한 : 팀장, 차선 인식 6 | - 김범영 : 딥러닝, 물체 인식 7 | - 이지훈 : Motor, Sensor 제어 8 | - 이신효 : 통신 9 | 10 | ### What is Alpha-Car? 11 | - 2017 한이음 공모전에 나간 Alpha-car팀의 '딥러닝 기반 자율 주행 버스 운행 시스템' 이다. 12 | - Deep Learning, OpenCV, Raspberry pi, RFID, 초음파 센서 등을 이용하여 구현하였다. 13 | - AlphaGo + Car 14 | 15 | 16 | ### Implements Detail 17 | - SW 18 | - lane tracing using OpenCV (image processing Library) 19 | - Object Detection using Deep Learning ([YOLO v2](https://arxiv.org/pdf/1612.08242.pdf)) 20 | - We can detect Car, Pedestrian, Stop sign and Traffic sign. 21 | 22 | - HW 23 | - DC Motor for driving power 24 | - Servo Motor for direction control 25 | - Ultrasonic sensor for front obstacle detection 26 | - RFID for Bus Stop Recognition 27 | 28 | ### Result 29 | - 2017 한이음 공모전 금상(과기정통부 장관상) 수상 프로젝트이며 자세한 사항들은 아래 링크에서 확인할 수 있다. 30 | - [Youtube 데모 영상](https://www.youtube.com/watch?v=BcBvTIv5zpw&t=1s) 31 | - [프로젝트 보고서 - 한이음 수상작 페이지](http://www.hanium.or.kr/portal/project/awardList.do) 32 | 33 | ### Limitations 34 | - 학부 3학년 때 진행했던 프로젝트라 기술적인 문제들이 많다. 35 | - 라즈베리파이를 사용했기 때문에 차내에서 자체적인 실시간 연산이 불가능해서 GPU 서버에서 연산 하도록 설계. 36 | - GPU 보드를 사용하면 해결 가능 (NVIDIA JETSON TX1,2 / JETSON NANO / GOOGLE EDGE TPU 등) 37 | - 프로젝트 당시 딥러닝 물체 인식 모델인 YOLOv2에 대한 이해도 부족. 38 | - Pre-train된 weight를 사용한 것이 아니라 데이터 수집(from COCO, VOC, Udacity dataset) 해서 직접 학습을 시키는 부분에서 문제. 39 | - DarkNet의 BottleNeck 부분 잘못사용하고 있었음ㅜ 40 | - 논문에서 언급한 Multi-scale Training이나 Warm-up training 같은 성능을 올릴 수 있는 테크닉들을 사용하지 않음... 41 | - 모바일/임베디드 환경에서 연산 속도를 올릴 수 있는 Compression, Quantization 같은 Optimization 기법들을 사용했다면 더 좋았을듯. (TensorRT나 Tensorflow Lite 사용 권장) 42 | - 신호등 색상 판단을 딥러닝으로 하지 못했다. 43 | - 별도의 Class로 학습하기에는 labeling이 안되어있거나 그 수가 매우 적었다. 44 | - 임시로 HSV의 threshold로 Green/Red를 판단하긴 했는데, 카메라 환경 변화에 매우 민감해서 잘못된 판단이 일어난다. 45 | - 차선 인식 알고리즘도 많은 개선이 필요하다. 46 | - Opencv의 가장 기본적인 알고리즘으로 사용했기 때문에 여러 문제점이 많다. 47 | - 차선 영역이 뚜렷해야 동작이 되고, 환경 변화에 매우 민감하며 커브길에서 불안정하다. 48 | - 개선된 영상처리 알고리즘을 사용하거나 딥러닝 (Semantic Segmentation)으로 차선 인식할 필요가 있다. 49 | - 차량 제어 알고리즘이 매우 부실함. 50 | - Lane Tracing을 차선 중앙 좌표만으로 판단하기 때문에 불안정하다. 51 | - 딥러닝으로 물체가 어디있는지 위치(좌표)는 알지만, 차량과의 거리나 각도 등을 제대로 알지 못한다. (2D perspective image 이기 때문) 52 | - 우선 임시로 y좌표 값으로 거리를 판단하는 식으로 사용하긴 했지만, 제대로 될리가 없다.. 53 | - 또한 frame마다 물체를 인식하는 것이기 때문에 Object Tracking을 해야 하는데 이 부분도 임시 알고리즘으로 사용했음. 54 | - Multi-Threading 부분도 다시 손봐야 할 것 같다. 55 | - 라즈베리파이에서 여러 센서들을 처리하기 위해 Multi Threading을 사용하긴 했음. 56 | - 하지만 당시 막판에 급하게 적용했던 코드이기 때문에 다듬어야할 필요가 있어보인다. 57 | 58 | -------------------------------------------------------------------------------- /YOLO/Streaming.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import socket 4 | from utils import interpret_netout,get_Object, Rotate, ccw 5 | from Model2 import model 6 | from line_object import get_lane 7 | 8 | ip = '192.168.43.160' 9 | 10 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 11 | server_address = (ip, 3443) 12 | sock.connect(server_address) 13 | 14 | print("connected") 15 | cap = cv2.VideoCapture(0) # USB 카메라로부터 영상 취득 16 | model.load_weights("person_10_6.hdf5") # 학습된 모델 데이터 불러오기 17 | print("go capture") 18 | # {label : [ 발견 횟수, 미발견 횟수, 발견 여부, 발견 스위치]} 19 | Check = {'car':[0,5,False,False], 'person':[0,5,False,False], 'red':[0,5,False,False], 20 | 'green':[0,5,False,False],'stop_sign':[0,5,False,False]} 21 | 22 | # 물체 이름을 코드로 변경 23 | Encode = {'car' : 'A', 'person' : 'B', 'red' : 'C', 'green' : 'D', 'stop_sign' : 'E'} 24 | Count = 0 # 아무것도 발견 안되는 횟수 25 | 26 | while cap.isOpened(): 27 | ret, frame = cap.read() 28 | frame = cv2.resize(frame, (800,600)) 29 | 30 | Gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) 31 | Gray = cv2.bitwise_not(Gray) 32 | 33 | input_image = cv2.resize(frame, (416, 416)) 34 | input_image = input_image / 255. 35 | input_image = input_image[:, :, ::-1] # 물체 인식을 위해 36 | input_image = np.expand_dims(input_image, axis=0) # input 이미지 크기와 차원 변경 37 | netout = model.predict(input_image) # 물체 인식 38 | image, mark = interpret_netout(frame, netout[0]) # 물체 정보 취득 39 | 40 | try: 41 | image, left, right = get_lane(image) 42 | except: 43 | continue 44 | # 미발견횟수가 4이상이면 출발 신호 전송 45 | if Count >= 4: # 아무것도 발견 안될때 46 | Count = 0 47 | print('start') 48 | sock.send("gogo".encode()) # 출발 신호 전송 49 | 50 | if len(mark) == 0: # 아무것도 발견 안될 때 51 | Count+=1 # 미발견 횟수 증가 52 | 53 | for m in mark: 54 | Obj, xmin, xmax, ymin, ymax = get_Object(image, m, Check) # 발견 물체 정보 추출 55 | if Obj == None: 56 | Count += 1 57 | continue 58 | 59 | if Obj == 'red' or Obj == 'green' or Obj == 'stop_sign' or \ 60 | (ccw(left, [xmax, ymax]) == 1 and ccw(right, [xmin, ymax]) == -1): 61 | 62 | if Check[Obj][0] >= 4: # 발견횟수 4이상 63 | Check[Obj][3] = True # 발견으로 처리 64 | Count = 0 65 | if Obj == 'green': # 초록불 인식 66 | print("restart ", Encode[Obj] + str(ymax)) 67 | sock.send("gogo".encode()) # 출발 신호 전송 68 | continue 69 | # 다른 물체 인식 70 | print("stop! ", Encode[Obj] + str(ymax)) 71 | sock.send((Encode[Obj] + str(ymax)).encode()) # 물체 정보 전송 72 | 73 | else: 74 | #print("Object : out of range") 75 | continue 76 | 77 | for m in Check: # 탐지 후처리 78 | #미발견 횟수 79 | if Check[m][2] == False: 80 | Check[m][0] = 0 #탐지 숫자 0으로q 81 | Check[m][1] += 1 #미탐지 숫자 증가 82 | if Check[m][1] >= 4 and Check[m][3] == True: 83 | Check[m][3] = False # 발견 스위치 off 84 | print("restart") 85 | sock.send("gogo".encode()) 86 | 87 | cv2.imshow('detect', image) 88 | if cv2.waitKey(1) & 0xFF == ord('q'): 89 | break 90 | 91 | cap.release() 92 | -------------------------------------------------------------------------------- /YOLO/Model2.py: -------------------------------------------------------------------------------- 1 | from keras.models import Model 2 | from keras.layers import Reshape, Conv2D, MaxPooling2D, Lambda 3 | from keras.layers import BatchNormalization, Input, Activation 4 | from keras.layers.advanced_activations import LeakyReLU 5 | from keras.layers.merge import concatenate 6 | 7 | from parameter import CLASS, BOX, input_shape 8 | 9 | def space_to_depth_x2(x): 10 | """Thin wrapper for Tensorflow space_to_depth with block_size=2.""" 11 | import tensorflow as tf 12 | return tf.space_to_depth(x, block_size=2) 13 | 14 | def space_to_depth_x2_output_shape(input_shape): 15 | """Determine space_to_depth output shape for block_size=2. 16 | 17 | Note: For Lambda with TensorFlow backend, output shape may not be needed. 18 | """ 19 | return (input_shape[0], input_shape[1] // 2, input_shape[2] // 2, 4 * 20 | input_shape[3]) if input_shape[1] else (input_shape[0], None, None, 21 | 4 * input_shape[3]) 22 | 23 | def Conv_BN_LR_MP(inputs, depth, pooling=True): 24 | x = Conv2D(depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(inputs) 25 | x = BatchNormalization()(x) 26 | x = LeakyReLU(alpha=0.1)(x) 27 | if pooling: 28 | x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) 29 | 30 | return x 31 | 32 | def Conv_bottleneck(inputs, count, side_depth, middle_depth, pooling=True): 33 | x = Conv2D(side_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(inputs) 34 | x = BatchNormalization()(x) 35 | x = LeakyReLU(alpha=0.1)(x) 36 | x = Conv2D(middle_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 37 | x = BatchNormalization()(x) 38 | x = LeakyReLU(alpha=0.1)(x) 39 | x = Conv2D(side_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 40 | x = BatchNormalization()(x) 41 | x = LeakyReLU(alpha=0.1)(x) 42 | if count == 5 or count == 7: 43 | x = Conv2D(middle_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 44 | x = BatchNormalization()(x) 45 | x = LeakyReLU(alpha=0.1)(x) 46 | x = Conv2D(side_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 47 | x = BatchNormalization()(x) 48 | x = LeakyReLU(alpha=0.1)(x) 49 | if count == 7: 50 | x = Conv2D(side_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 51 | x = BatchNormalization()(x) 52 | x = LeakyReLU(alpha=0.1)(x) 53 | x = Conv2D(side_depth, (3, 3), strides=(1, 1), padding='same', use_bias=False)(x) 54 | x = BatchNormalization()(x) 55 | x = LeakyReLU(alpha=0.1)(x) 56 | if pooling: 57 | x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x) 58 | 59 | return x 60 | 61 | 62 | inputs = Input(shape=input_shape) 63 | 64 | conv1 = Conv_BN_LR_MP(inputs, 32) 65 | conv2 = Conv_BN_LR_MP(conv1, 64) 66 | conv3_5 = Conv_bottleneck(conv2, count=3, side_depth=128, middle_depth=64) 67 | conv6_8 = Conv_bottleneck(conv3_5, count=3, side_depth=256, middle_depth=128) 68 | conv9_13 = Conv_bottleneck(conv6_8, count=5, side_depth=512, middle_depth=256, pooling=False) 69 | 70 | conv21 = conv9_13 71 | 72 | conv9_13 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv9_13) 73 | conv14_20 = Conv_bottleneck(conv9_13, count=7, side_depth=1024, middle_depth=512, pooling=False) 74 | 75 | conv21 = Conv_BN_LR_MP(conv21, 64, pooling=False) 76 | conv21 = Lambda( 77 | space_to_depth_x2, 78 | output_shape=space_to_depth_x2_output_shape, 79 | name='space_to_depth')(conv21) 80 | 81 | merge = concatenate([conv14_20, conv21]) 82 | conv22 = Conv_BN_LR_MP(merge, 1024, pooling=False) 83 | conv23 = Conv2D(5*(CLASS+BOX), (1, 1), strides=(1, 1))(conv22) 84 | 85 | conv23 = Activation('linear')(conv23) 86 | final = Reshape((13, 13, 5, CLASS+BOX))(conv23) 87 | 88 | model = Model(inputs, final) 89 | model.summary() 90 | -------------------------------------------------------------------------------- /YOLO/training.py: -------------------------------------------------------------------------------- 1 | from keras.callbacks import EarlyStopping, ModelCheckpoint 2 | from keras.optimizers import SGD, Adagrad 3 | import numpy as np 4 | import tensorflow as tf 5 | 6 | from parameter import * 7 | from utils import parse_annotation, data_gen 8 | from Model2 import model 9 | 10 | all_img = parse_annotation(ann_dir) 11 | 12 | 13 | def custom_loss(y_true, y_pred): 14 | ### Adjust prediction 15 | # adjust x and y 16 | pred_box_xy = tf.sigmoid(y_pred[:, :, :, :, :2]) 17 | 18 | # adjust w and h 19 | pred_box_wh = tf.exp(y_pred[:, :, :, :, 2:4]) * np.reshape(ANCHORS, [1, 1, 1, BOX, 2]) 20 | pred_box_wh = tf.sqrt(pred_box_wh / np.reshape([float(GRID_W), float(GRID_H)], [1, 1, 1, 1, 2])) 21 | 22 | # adjust confidence 23 | pred_box_conf = tf.expand_dims(tf.sigmoid(y_pred[:, :, :, :, 4]), -1) 24 | 25 | # adjust probability 26 | pred_box_prob = tf.nn.softmax(y_pred[:, :, :, :, 5:]) 27 | 28 | y_pred = tf.concat([pred_box_xy, pred_box_wh, pred_box_conf, pred_box_prob], 4) 29 | 30 | ### Adjust ground truth 31 | # adjust x and y 32 | center_xy = .5 * (y_true[:, :, :, :, 0:2] + y_true[:, :, :, :, 2:4]) 33 | center_xy = center_xy / np.reshape([(float(NORM_W) / GRID_W), (float(NORM_H) / GRID_H)], [1, 1, 1, 1, 2]) 34 | true_box_xy = center_xy - tf.floor(center_xy) 35 | 36 | # adjust w and h 37 | true_box_wh = (y_true[:, :, :, :, 2:4] - y_true[:, :, :, :, 0:2]) 38 | true_box_wh = tf.sqrt(true_box_wh / np.reshape([float(NORM_W), float(NORM_H)], [1, 1, 1, 1, 2])) 39 | 40 | # adjust confidence 41 | pred_tem_wh = tf.pow(pred_box_wh, 2) * np.reshape([GRID_W, GRID_H], [1, 1, 1, 1, 2]) 42 | pred_box_area = pred_tem_wh[:, :, :, :, 0] * pred_tem_wh[:, :, :, :, 1] 43 | pred_box_ul = pred_box_xy - 0.5 * pred_tem_wh 44 | pred_box_bd = pred_box_xy + 0.5 * pred_tem_wh 45 | 46 | true_tem_wh = tf.pow(true_box_wh, 2) * np.reshape([GRID_W, GRID_H], [1, 1, 1, 1, 2]) 47 | true_box_area = true_tem_wh[:, :, :, :, 0] * true_tem_wh[:, :, :, :, 1] 48 | true_box_ul = true_box_xy - 0.5 * true_tem_wh 49 | true_box_bd = true_box_xy + 0.5 * true_tem_wh 50 | 51 | intersect_ul = tf.maximum(pred_box_ul, true_box_ul) 52 | intersect_br = tf.minimum(pred_box_bd, true_box_bd) 53 | intersect_wh = intersect_br - intersect_ul 54 | intersect_wh = tf.maximum(intersect_wh, 0.0) 55 | intersect_area = intersect_wh[:, :, :, :, 0] * intersect_wh[:, :, :, :, 1] 56 | 57 | iou = tf.truediv(intersect_area, true_box_area + pred_box_area - intersect_area) 58 | best_box = tf.equal(iou, tf.reduce_max(iou, [3], True)) 59 | best_box = tf.to_float(best_box) 60 | true_box_conf = tf.expand_dims(best_box * y_true[:, :, :, :, 4], -1) 61 | 62 | # adjust confidence 63 | true_box_prob = y_true[:, :, :, :, 5:] 64 | 65 | y_true = tf.concat([true_box_xy, true_box_wh, true_box_conf, true_box_prob], 4) 66 | # y_true = tf.Print(y_true, [true_box_wh], message='DEBUG', summarize=30000) 67 | 68 | ### Compute the weights 69 | weight_coor = tf.concat(4 * [true_box_conf], 4) 70 | weight_coor = SCALE_COOR * weight_coor 71 | 72 | weight_conf = SCALE_NOOB * (1. - true_box_conf) + SCALE_CONF * true_box_conf 73 | 74 | weight_prob = tf.concat(CLASS * [true_box_conf], 4) 75 | weight_prob = SCALE_PROB * weight_prob 76 | 77 | weight = tf.concat([weight_coor, weight_conf, weight_prob], 4) 78 | 79 | ### Finalize the loss 80 | loss = tf.pow(y_pred - y_true, 2) 81 | loss = loss * weight 82 | loss = tf.reshape(loss, [-1, GRID_W * GRID_H * BOX * (4 + 1 + CLASS)]) 83 | loss = tf.reduce_sum(loss, 1) 84 | loss = .5 * tf.reduce_mean(loss) 85 | 86 | return loss 87 | 88 | 89 | layer = model.layers[-3] # the last convolutional layer 90 | weights = layer.get_weights() 91 | 92 | new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W) 93 | new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W) 94 | 95 | layer.set_weights([new_kernel, new_bias]) 96 | 97 | try: 98 | model.load_weights("deepcoco_weights3.hdf5") 99 | print("Previous data") 100 | except: 101 | print("New data") 102 | 103 | 104 | sgd = SGD(lr=0.001, decay=0.0005, momentum=0.9) 105 | early_stop = EarlyStopping(monitor='loss', min_delta=0.001, patience=7, mode='min', verbose=1) 106 | checkpoint = ModelCheckpoint('deepcoco_weights3.hdf5', monitor='loss', verbose=1, save_best_only=True, mode='min', period=1) 107 | 108 | #sgd = SGD(lr=0.001, decay=0.0005, momentum=0.9) 109 | 110 | model.compile(loss=custom_loss, optimizer=sgd) 111 | model.fit_generator(generator=data_gen(all_img, BATCH_SIZE), 112 | steps_per_epoch=int(len(all_img)/BATCH_SIZE), 113 | epochs = 100, 114 | verbose = 1, 115 | callbacks = [early_stop, checkpoint], 116 | max_queue_size = 3) 117 | 118 | -------------------------------------------------------------------------------- /Raspberrypi_control/Control.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import Adafruit_PCA9685 3 | import RPi.GPIO as GPIO 4 | import socket, threading, time 5 | 6 | ip = '192.168.43.160' 7 | 8 | Encode = {'A' : 370, 'B' : 370, 'C' : 280, 'D' : 280, 'E' : 30} 9 | # car / person / red / green / stop_sign 10 | pwm = Adafruit_PCA9685.PCA9685() 11 | pwm.set_pwm_freq(50) 12 | 13 | front_left = 12 14 | front_right = 13 15 | back_left = 15 16 | back_right = 11 17 | 18 | GPIO.setmode(GPIO.BOARD) 19 | GPIO.setup(front_left, GPIO.OUT) 20 | GPIO.setup(front_right, GPIO.OUT) 21 | GPIO.setup(back_left, GPIO.OUT) 22 | GPIO.setup(back_right, GPIO.OUT) 23 | #390 24 | CAR_CENTER = 405 25 | CENTER = 307 26 | center = CENTER 27 | SPEED = 1300 28 | 29 | CUR_SPEED = SPEED 30 | pwm.set_pwm(0, 0, CENTER) 31 | 32 | def straight(pwm=pwm, GPIO=GPIO, speed=CUR_SPEED, center=CENTER): 33 | pwm.set_pwm(0, 0, center) 34 | pwm.set_pwm(4, 0, speed) 35 | pwm.set_pwm(5, 0, speed) 36 | GPIO.output(front_left, GPIO.HIGH) 37 | GPIO.output(front_right, GPIO.HIGH) 38 | GPIO.output(back_left, GPIO.LOW) 39 | GPIO.output(back_right, GPIO.LOW) 40 | 41 | def stop(pwm=pwm, GPIO=GPIO, speed=CUR_SPEED): 42 | pwm.set_pwm(4, 0, speed) 43 | pwm.set_pwm(5, 0, speed) 44 | GPIO.output(front_left, GPIO.HIGH) 45 | GPIO.output(front_right, GPIO.HIGH) 46 | GPIO.output(back_left, GPIO.LOW) 47 | GPIO.output(back_right, GPIO.LOW) 48 | 49 | def left(pwm=pwm, GPIO=GPIO, speed=CUR_SPEED, center=CENTER, turn=100): 50 | pwm.set_pwm(0, 0, center + turn) # 410 51 | pwm.set_pwm(4, 0, speed) 52 | pwm.set_pwm(5, 0, speed) 53 | GPIO.output(front_left, GPIO.HIGH) 54 | GPIO.output(front_right, GPIO.HIGH) 55 | GPIO.output(back_left, GPIO.LOW) 56 | GPIO.output(back_right, GPIO.LOW) 57 | 58 | def right(pwm=pwm, GPIO=GPIO, speed=CUR_SPEED, center=CENTER, turn=100): 59 | pwm.set_pwm(0, 0, center - turn) # 210 60 | pwm.set_pwm(4, 0, speed) 61 | pwm.set_pwm(5, 0, speed) 62 | GPIO.output(front_left, GPIO.HIGH) 63 | GPIO.output(front_right, GPIO.HIGH) 64 | GPIO.output(back_left, GPIO.LOW) 65 | GPIO.output(back_right, GPIO.LOW) 66 | 67 | Object = False 68 | Bus_stop = False 69 | Obstacle = False 70 | red_light = False 71 | 72 | def from_line(): 73 | global Object, Bus_stop, Obstacle, red_light 74 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 75 | server_address = (ip, 3442) 76 | print("line socket listening...") 77 | sock.bind(server_address) 78 | sock.listen(1) 79 | 80 | try: 81 | client, address = sock.accept() 82 | print("Line Connected") 83 | while True: 84 | data = client.recv(4) 85 | if Object or Bus_stop or Obstacle or red_light: 86 | #print("stop") 87 | stop(speed=0) 88 | continue 89 | try: 90 | x_point = int(data) 91 | except: 92 | continue 93 | if x_point >= CAR_CENTER - 50 and x_point <= CAR_CENTER + 50: 94 | #print("straight : ", x_point) 95 | straight() 96 | elif CAR_CENTER >= x_point: 97 | k = int((CAR_CENTER - x_point) / 4) 98 | #print("left : ", x_point," -> ",k + CENTER) 99 | right(speed=CUR_SPEED + 100, turn=k) 100 | elif CAR_CENTER < x_point: 101 | k = int((x_point - CAR_CENTER) / 4) 102 | #print("right : ", x_point, " -> ", k + CENTER) 103 | left(speed=CUR_SPEED + 100, turn=k) 104 | except: 105 | print("close line") 106 | GPIO.cleanup() 107 | exit(0) 108 | 109 | 110 | def from_YOLO(): 111 | global Object, Bus_stop, Obstacle, CUR_SPEED, red_light 112 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 113 | server_address = (ip, 3443) 114 | print("YOLO socket listening...") 115 | sock.bind(server_address) 116 | sock.listen(1) 117 | try: 118 | client, address = sock.accept() 119 | print("YOLO connected") 120 | while True: 121 | data = client.recv(4) 122 | if data == 'gogo': 123 | if not Bus_stop and not Obstacle: 124 | print("Object : GO") 125 | stop(speed=SPEED) 126 | Object = False 127 | red_light = False 128 | continue 129 | 130 | Type, ymax = data[0], int(data[1:]) 131 | if Type == 0 or Type == None or Type == '0': 132 | continue 133 | 134 | if Encode[Type] < ymax: 135 | stop(speed=0) 136 | if Type == 'C': 137 | red_light = True 138 | elif Type == 'D': 139 | red_light = False 140 | else: 141 | Object = True 142 | print("Object : Stop!! ",Type) 143 | elif Encode[Type] - 80 >= ymax: 144 | CUR_SPEED = SPEED 145 | if Type == 'C': 146 | red_light = False 147 | else: 148 | Object = False 149 | print("Object : enough distance") 150 | else: 151 | CUR_SPEED = SPEED-200 152 | if Type == 'C': 153 | red_light = False 154 | else: 155 | Object = False 156 | #CUR_SPEED = Encode[Type] - ymax + 1200 #1100 is min-speed 157 | print("Object : Slow Down ",CUR_SPEED) 158 | 159 | except: 160 | print("close yolo") 161 | sock.close() 162 | GPIO.cleanup() 163 | exit(1) 164 | 165 | def from_RFID(): 166 | global Bus_stop, Obstacle 167 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 168 | server_address = (ip, 3444) 169 | print("Sensor socket listening...") 170 | sock.bind(server_address) 171 | sock.listen(1) 172 | 173 | try: 174 | client, address = sock.accept() 175 | while True: 176 | data = client.recv(4) 177 | if data == "gogo" and not Obstacle: 178 | stop(speed=SPEED) 179 | Bus_stop = False 180 | elif data == "stop": 181 | stop(speed=0) 182 | Bus_stop = True 183 | except: 184 | print("close RFID") 185 | sock.close() 186 | GPIO.cleanup() 187 | exit(1) 188 | 189 | def from_Ultra(): 190 | global Obstacle, Bus_stop 191 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 192 | server_address = (ip, 3445) 193 | print("Sensor socket listening...") 194 | sock.bind(server_address) 195 | sock.listen(1) 196 | 197 | try: 198 | client, address = sock.accept() 199 | while True: 200 | data = client.recv(4) 201 | if data == "gogo" and not Bus_stop: 202 | stop(speed=SPEED) 203 | Obstacle = False 204 | elif data == "stop": 205 | stop(speed=0) 206 | Obstacle = True 207 | except: 208 | print("close ultra") 209 | sock.close() 210 | GPIO.cleanup() 211 | exit(1) 212 | 213 | LINE = threading.Thread(target=from_line) 214 | DETECT = threading.Thread(target=from_YOLO) 215 | RFID = threading.Thread(target=from_RFID) 216 | ULTRA = threading.Thread(target=from_Ultra) 217 | 218 | LINE.start() 219 | DETECT.start() 220 | RFID.start() 221 | ULTRA.start() 222 | 223 | LINE.join() 224 | DETECT.join() 225 | RFID.join() 226 | ULTRA.join() 227 | 228 | pwm.set_pwm(0, 0, 0) 229 | GPIO.cleanup() 230 | 231 | -------------------------------------------------------------------------------- /YOLO/line_object.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import time 4 | 5 | global lx1, lx2, ly1, ly2, gx1, gx2, gy1, gy2, warped_eq1 6 | lx1, lx2, ly1, ly2, gx1, gy1, gx2, gy2 = int(0), int(0), int(0), int(0),int(0), int(0), int(0), int(0) 7 | 8 | 9 | # Frame width & Height 10 | w = 800 11 | h = 600 12 | 13 | def ROI(img, vertices, color3=(255, 255, 255), color1=255): 14 | mask = np.zeros_like(img) 15 | 16 | if len(img.shape) > 2: 17 | color = color3 18 | else: 19 | color = color1 20 | 21 | cv2.fillPoly(mask, vertices, color) 22 | ROI_image = cv2.bitwise_and(img, mask) 23 | 24 | return ROI_image 25 | 26 | def get_fitline_left(img, f_lines): 27 | global lx1, lx2, ly1, ly2 28 | lines = f_lines.reshape(f_lines.shape[0] * 2, 2) 29 | vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01) 30 | 31 | if lines.shape[0] != 0: 32 | x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1 33 | x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100) 34 | lx1, ly1, lx2, ly2 = x1, y1, x2, y2 35 | 36 | else: 37 | if lx1 == 0 and lx2 == 0 and ly1 == 0 and ly2 == 0: 38 | x1, y1 = img.shape[1] / 2, img.shape[0] / 2 39 | x2, y2 = img.shape[1] / 2, img.shape[0] / 2 40 | else: 41 | x1, y1, x2, y2 = lx1, ly1, lx2, ly2 42 | 43 | result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2] 44 | return result 45 | 46 | def get_fitline_right(img, f_lines): 47 | global gx1, gx2, gy1, gy2 48 | lines = f_lines.reshape(f_lines.shape[0] * 2, 2) 49 | vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01) 50 | 51 | if lines.shape[0] != 0: 52 | x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1 53 | x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100) 54 | gx1, gy1, gx2, gy2 = x1, y1, x2, y2 55 | 56 | else: 57 | if gx1 == 0 and gx2 == 0 and gy1 == 0 and gy2 == 0: 58 | x1, y1 = img.shape[1] / 2, img.shape[0] / 2 59 | x2, y2 = img.shape[1] / 2, img.shape[0] / 2 60 | else: 61 | x1, y1, x2, y2 = gx1, gy1, gx2, gy2 62 | 63 | result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2] 64 | return result 65 | 66 | def draw(img, lines): 67 | cv2.line(img, (lines[0], lines[1]), (lines[2], lines[3]), (0, 0, 255), 6) 68 | 69 | def get_lane(img): 70 | 71 | global line 72 | height, weight = img.shape[:2] 73 | 74 | #canny_img = cv2.Canny(img, 550, 600)#black, white 75 | canny_img = cv2.Canny(img, 250, 300) # black, white 76 | 77 | #cv2.imshow("canny", canny_img) 78 | 79 | #vertices = np.array([[(0, height), (270, height / 2), (weight - 270, height / 2), (weight, height)]], 80 | # dtype=np.int32) 81 | vertices = np.array([[(0, height), (0, height * 2 / 3), (weight, height * 2 / 3), (weight, height)]], 82 | dtype=np.int32) 83 | 84 | ROI_img = ROI(canny_img, vertices) 85 | #cv2.imshow('roi', ROI_img) 86 | #img = cv2.polylines(img, vertices, True, (255, 0 ,0), 5) 87 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 88 | 89 | if type(line_arr).__name__ == 'NoneType': 90 | line_arr = line 91 | elif line_arr.shape[0] != 1: 92 | line_arr = np.squeeze(line_arr) 93 | line = line_arr 94 | elif line_arr.shape[0] == 1: 95 | line_arr = np.squeeze(line_arr, axis=1) 96 | line = line_arr 97 | 98 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 99 | 100 | line_arr1,line_arr2,slope_degree1,slope_degree2 = line_arr,line_arr,slope_degree,slope_degree 101 | line_arr1 = line_arr1[np.abs(slope_degree1) < 180] 102 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 180] 103 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 104 | slope_degree1 = slope_degree1[np.abs(slope_degree1) >90] 105 | 106 | line_arr2 = line_arr2[np.abs(slope_degree2) > 0] 107 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 0] 108 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 109 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 110 | 111 | if line_arr1.shape[0]!=0 and line_arr2.shape[0]!=0: 112 | line_arr = np.concatenate((line_arr1,line_arr1),axis=0) 113 | slope_degree = np.concatenate((slope_degree2,slope_degree1),axis=0) 114 | 115 | elif line_arr1.shape[0]!=0 and line_arr2.shape[0]==0: 116 | 117 | line_arr = line_arr1 118 | slope_degree = slope_degree1 119 | elif line_arr1.shape[0]==0 and line_arr2.shape[0]!=0: 120 | line_arr = line_arr2 121 | slope_degree = slope_degree2 122 | 123 | else: 124 | 125 | line_arr = line_arr[np.abs(slope_degree) < 160] 126 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 127 | line_arr = line_arr[np.abs(slope_degree) >30] 128 | slope_degree = slope_degree[np.abs(slope_degree)>30] 129 | 130 | L_lines, R_lines = line_arr[(slope_degree > 0), :], line_arr[(slope_degree < 0), :] 131 | 132 | if L_lines.shape[0]!=0 and R_lines.shape[0]==0: 133 | vertices = np.array([[(0, height), (0, height * 4 / 5), (weight/2, height * 4 / 5), (weight/2, height)]], 134 | dtype=np.int32) 135 | ROI_img = ROI(canny_img, vertices) 136 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 137 | if type(line_arr).__name__ == 'NoneType': 138 | line_arr = line 139 | elif line_arr.shape[0] != 1: 140 | line_arr = np.squeeze(line_arr) 141 | line = line_arr 142 | elif line_arr.shape[0] == 1: 143 | line_arr = np.squeeze(line_arr, axis=1) 144 | line = line_arr 145 | 146 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 147 | 148 | line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree 149 | 150 | line_arr1 = line_arr1[np.abs(slope_degree1) < 160] 151 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160] 152 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 153 | slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90] 154 | 155 | line_arr2 = line_arr2[np.abs(slope_degree2) > 30] 156 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30] 157 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 158 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 159 | 160 | if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0: 161 | line_arr = np.concatenate((line_arr1, line_arr1), axis=0) 162 | slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0) 163 | 164 | elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0: 165 | line_arr = line_arr1 166 | slope_degree = slope_degree1 167 | elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0: 168 | line_arr = line_arr2 169 | slope_degree = slope_degree2 170 | else: 171 | line_arr = line_arr[np.abs(slope_degree) < 160] 172 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 173 | line_arr = line_arr[np.abs(slope_degree) > 30] 174 | slope_degree = slope_degree[np.abs(slope_degree) > 30] 175 | 176 | L_lines, R_lines = line_arr[(slope_degree > 0), :], np.array([[weight,height, weight, height*0.7]]) 177 | 178 | elif L_lines.shape[0]==0 and R_lines.shape[0]!=0: 179 | vertices = np.array([[(weight/2, height), (weight/2, height * 4 / 5), (weight, height * 4 / 5), (weight, height)]], 180 | dtype=np.int32) 181 | ROI_img = ROI(canny_img, vertices) 182 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 183 | if type(line_arr).__name__ == 'NoneType': 184 | line_arr = line 185 | elif line_arr.shape[0] != 1: 186 | line_arr = np.squeeze(line_arr) 187 | line = line_arr 188 | elif line_arr.shape[0] == 1: 189 | line_arr = np.squeeze(line_arr, axis=1) 190 | line = line_arr 191 | 192 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 193 | 194 | line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree 195 | 196 | line_arr1 = line_arr1[np.abs(slope_degree1) < 160] 197 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160] 198 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 199 | slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90] 200 | 201 | line_arr2 = line_arr2[np.abs(slope_degree2) > 30] 202 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30] 203 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 204 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 205 | 206 | if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0: 207 | line_arr = np.concatenate((line_arr1, line_arr1), axis=0) 208 | slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0) 209 | 210 | elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0: 211 | line_arr = line_arr1 212 | slope_degree = slope_degree1 213 | elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0: 214 | line_arr = line_arr2 215 | slope_degree = slope_degree2 216 | else: 217 | line_arr = line_arr[np.abs(slope_degree) < 160] 218 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 219 | line_arr = line_arr[np.abs(slope_degree) > 30] 220 | slope_degree = slope_degree[np.abs(slope_degree) > 30] 221 | 222 | L_lines, R_lines = np.array([[0, height, 0, height*0.7]]), line_arr[(slope_degree < 0), :] 223 | 224 | 225 | temp = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) 226 | left_fit_line = get_fitline_left(img, L_lines) 227 | right_fit_line = get_fitline_right(img, R_lines) 228 | 229 | draw(img, right_fit_line[:4]) 230 | draw(img, left_fit_line[:4]) 231 | 232 | result = cv2.addWeighted(img, 1, temp, 1, 0) 233 | 234 | return result, left_fit_line[:4], right_fit_line[:4] 235 | -------------------------------------------------------------------------------- /Lane Detection/line.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import time 4 | 5 | global lx1, lx2, ly1, ly2, gx1, gx2, gy1, gy2, warped_eq1 6 | lx1, lx2, ly1, ly2, gx1, gy1, gx2, gy2 = int(0), int(0), int(0), int(0),int(0), int(0), int(0), int(0) 7 | 8 | 9 | # Frame width & Height 10 | w = 800 11 | h = 600 12 | 13 | def ROI(img, vertices, color3=(255, 255, 255), color1=255): 14 | mask = np.zeros_like(img) 15 | 16 | if len(img.shape) > 2: 17 | color = color3 18 | else: 19 | color = color1 20 | 21 | cv2.fillPoly(mask, vertices, color) 22 | ROI_image = cv2.bitwise_and(img, mask) 23 | 24 | return ROI_image 25 | 26 | def get_fitline_left(img, f_lines): 27 | global lx1, lx2, ly1, ly2 28 | lines = f_lines.reshape(f_lines.shape[0] * 2, 2) 29 | vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01) 30 | 31 | if lines.shape[0] != 0: 32 | x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1 33 | x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100) 34 | lx1, ly1, lx2, ly2 = x1, y1, x2, y2 35 | 36 | else: 37 | if lx1 == 0 and lx2 == 0 and ly1 == 0 and ly2 == 0: 38 | x1, y1 = img.shape[1] / 2, img.shape[0] / 2 39 | x2, y2 = img.shape[1] / 2, img.shape[0] / 2 40 | else: 41 | x1, y1, x2, y2 = lx1, ly1, lx2, ly2 42 | 43 | result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2] 44 | return result 45 | 46 | def get_fitline_right(img, f_lines): 47 | global gx1, gx2, gy1, gy2 48 | lines = f_lines.reshape(f_lines.shape[0] * 2, 2) 49 | vx, vy, x, y = cv2.fitLine(lines, cv2.DIST_L2, 0, 0.01, 0.01) 50 | 51 | if lines.shape[0] != 0: 52 | x1, y1 = int(((img.shape[0] - 1) - y) / vy * vx + x), img.shape[0] - 1 53 | x2, y2 = int(((img.shape[0] / 2 + 100) - y) / vy * vx + x), int(img.shape[0] / 2 + 100) 54 | gx1, gy1, gx2, gy2 = x1, y1, x2, y2 55 | 56 | else: 57 | if gx1 == 0 and gx2 == 0 and gy1 == 0 and gy2 == 0: 58 | x1, y1 = img.shape[1] / 2, img.shape[0] / 2 59 | x2, y2 = img.shape[1] / 2, img.shape[0] / 2 60 | else: 61 | x1, y1, x2, y2 = gx1, gy1, gx2, gy2 62 | 63 | result = [x1, y1, x2, y2, (x1 + x2) / 2, (y1 + y2) / 2] 64 | return result 65 | 66 | def draw(img, lines): 67 | cv2.line(img, (lines[0], lines[1]), (lines[2], lines[3]), (0, 0, 255), 10) 68 | 69 | 70 | count=0 71 | def get_lane(img): 72 | 73 | #readRefImages() 74 | global line 75 | 76 | height, weight = img.shape[:2] 77 | 78 | #canny_img = cv2.Canny(Gray, 50, 200) # black, white 79 | canny_img = cv2.Canny(img, 150, 200)#black, white 80 | 81 | 82 | #cv2.imshow("canny", canny_img) 83 | 84 | vertices = np.array([[(0, height), (0, height / 2), (weight, height / 2), (weight, height)]], 85 | dtype=np.int32) 86 | 87 | #vertices = np.array([[(0, height), (100, height / 2), (weight-100, height / 2), (weight, height)]], 88 | # dtype=np.int32) 89 | 90 | ROI_img = ROI(canny_img, vertices) 91 | cv2.imshow('roi', ROI_img) 92 | img = cv2.polylines(img, vertices, True, (255, 0 ,0), 5) 93 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 94 | 95 | if type(line_arr).__name__ == 'NoneType': 96 | line_arr = line 97 | elif line_arr.shape[0] != 1: 98 | line_arr = np.squeeze(line_arr) 99 | line = line_arr 100 | elif line_arr.shape[0] == 1: 101 | line_arr = np.squeeze(line_arr, axis=1) 102 | line = line_arr 103 | 104 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 105 | 106 | line_arr1,line_arr2,slope_degree1,slope_degree2 = line_arr,line_arr,slope_degree,slope_degree 107 | line_arr1 = line_arr1[np.abs(slope_degree1) < 180] 108 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 180] 109 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 110 | slope_degree1 = slope_degree1[np.abs(slope_degree1) >90] 111 | 112 | line_arr2 = line_arr2[np.abs(slope_degree2) > 0] 113 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 0] 114 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 115 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 116 | 117 | if line_arr1.shape[0]!=0 and line_arr2.shape[0]!=0: 118 | line_arr = np.concatenate((line_arr1,line_arr1),axis=0) 119 | slope_degree = np.concatenate((slope_degree2,slope_degree1),axis=0) 120 | 121 | elif line_arr1.shape[0]!=0 and line_arr2.shape[0]==0: 122 | 123 | line_arr = line_arr1 124 | slope_degree = slope_degree1 125 | elif line_arr1.shape[0]==0 and line_arr2.shape[0]!=0: 126 | line_arr = line_arr2 127 | slope_degree = slope_degree2 128 | 129 | else: 130 | 131 | line_arr = line_arr[np.abs(slope_degree) < 160] 132 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 133 | line_arr = line_arr[np.abs(slope_degree) >30] 134 | slope_degree = slope_degree[np.abs(slope_degree)>30] 135 | 136 | L_lines, R_lines = line_arr[(slope_degree > 0), :], line_arr[(slope_degree < 0), :] 137 | 138 | if L_lines.shape[0]!=0 and R_lines.shape[0]==0: 139 | #vertices = np.array([[(0, height), (0, height * 2 / 3), (weight/2, height * 2 / 3), (weight/2, height)]], 140 | # dtype=np.int32) 141 | vertices = np.array([[(0, height), (0, height / 2), (weight / 2, height / 2), (weight / 2, height)]], 142 | dtype=np.int32) 143 | ROI_img = ROI(canny_img, vertices) 144 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 145 | if type(line_arr).__name__ == 'NoneType': 146 | line_arr = line 147 | elif line_arr.shape[0] != 1: 148 | line_arr = np.squeeze(line_arr) 149 | line = line_arr 150 | elif line_arr.shape[0] == 1: 151 | line_arr = np.squeeze(line_arr, axis=1) 152 | line = line_arr 153 | 154 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 155 | 156 | line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree 157 | 158 | line_arr1 = line_arr1[np.abs(slope_degree1) < 160] 159 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160] 160 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 161 | slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90] 162 | 163 | line_arr2 = line_arr2[np.abs(slope_degree2) > 30] 164 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30] 165 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 166 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 167 | 168 | if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0: 169 | line_arr = np.concatenate((line_arr1, line_arr1), axis=0) 170 | slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0) 171 | 172 | elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0: 173 | line_arr = line_arr1 174 | slope_degree = slope_degree1 175 | elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0: 176 | line_arr = line_arr2 177 | slope_degree = slope_degree2 178 | else: 179 | line_arr = line_arr[np.abs(slope_degree) < 160] 180 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 181 | line_arr = line_arr[np.abs(slope_degree) > 30] 182 | slope_degree = slope_degree[np.abs(slope_degree) > 30] 183 | 184 | L_lines, R_lines = line_arr[(slope_degree > 0), :], np.array([[weight,height, weight, height*0.7]]) 185 | 186 | elif L_lines.shape[0]==0 and R_lines.shape[0]!=0: 187 | #vertices = np.array([[(weight/2, height * 2 / 3), (weight/2, height), (weight, height * 2 / 3), (weight, height)]], 188 | # dtype=np.int32) 189 | vertices = np.array([[(0, height), (0, height / 2), (weight / 2, height / 2), (weight / 2, height)]], 190 | dtype=np.int32) 191 | 192 | ROI_img = ROI(canny_img, vertices) 193 | line_arr = cv2.HoughLinesP(ROI_img, 1, np.pi / 180, 50, minLineLength=10, maxLineGap=50) 194 | if type(line_arr).__name__ == 'NoneType': 195 | line_arr = line 196 | elif line_arr.shape[0] != 1: 197 | line_arr = np.squeeze(line_arr) 198 | line = line_arr 199 | elif line_arr.shape[0] == 1: 200 | line_arr = np.squeeze(line_arr, axis=1) 201 | line = line_arr 202 | 203 | slope_degree = (np.arctan2(line_arr[:, 1] - line_arr[:, 3], line_arr[:, 0] - line_arr[:, 2]) * 180) / np.pi 204 | 205 | line_arr1, line_arr2, slope_degree1, slope_degree2 = line_arr, line_arr, slope_degree, slope_degree 206 | 207 | line_arr1 = line_arr1[np.abs(slope_degree1) < 160] 208 | slope_degree1 = slope_degree1[np.abs(slope_degree1) < 160] 209 | line_arr1 = line_arr1[np.abs(slope_degree1) > 90] 210 | slope_degree1 = slope_degree1[np.abs(slope_degree1) > 90] 211 | 212 | line_arr2 = line_arr2[np.abs(slope_degree2) > 30] 213 | slope_degree2 = slope_degree2[np.abs(slope_degree2) > 30] 214 | line_arr2 = line_arr2[np.abs(slope_degree2) < 90] 215 | slope_degree2 = slope_degree2[np.abs(slope_degree2) < 90] 216 | 217 | if line_arr1.shape[0] != 0 and line_arr2.shape[0] != 0: 218 | line_arr = np.concatenate((line_arr1, line_arr1), axis=0) 219 | slope_degree = np.concatenate((slope_degree2, slope_degree1), axis=0) 220 | 221 | elif line_arr1.shape[0] != 0 and line_arr2.shape[0] == 0: 222 | line_arr = line_arr1 223 | slope_degree = slope_degree1 224 | elif line_arr1.shape[0] == 0 and line_arr2.shape[0] != 0: 225 | line_arr = line_arr2 226 | slope_degree = slope_degree2 227 | else: 228 | line_arr = line_arr[np.abs(slope_degree) < 160] 229 | slope_degree = slope_degree[np.abs(slope_degree) < 160] 230 | line_arr = line_arr[np.abs(slope_degree) > 30] 231 | slope_degree = slope_degree[np.abs(slope_degree) > 30] 232 | 233 | L_lines, R_lines = np.array([[0, height, 0, height*0.7]]), line_arr[(slope_degree < 0), :] 234 | 235 | 236 | #temp = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) 237 | 238 | left_fit_line = get_fitline_left(img, L_lines) 239 | right_fit_line = get_fitline_right(img, R_lines) 240 | 241 | draw(img, right_fit_line[:4]) 242 | draw(img, right_fit_line[:4]) 243 | draw(img, left_fit_line[:4]) 244 | center_x_point = int((left_fit_line[4] + right_fit_line[4]) / 2) 245 | center_y_point = int((left_fit_line[5] + right_fit_line[5]) / 2) 246 | result = cv2.circle(img, (center_x_point, center_y_point), 5, (0, 0, 255), -1) 247 | 248 | #result = cv2.addWeighted(img, 1, temp, 1, 0) 249 | 250 | center_x_point = str(center_x_point) 251 | if len(center_x_point) == 1: 252 | center_x_point = "000"+center_x_point 253 | elif len(center_x_point) == 2: 254 | center_x_point = "00"+center_x_point 255 | elif len(center_x_point) == 3: 256 | center_x_point = "0"+center_x_point 257 | 258 | return result, center_x_point 259 | -------------------------------------------------------------------------------- /YOLO/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import copy 3 | import cv2 4 | from parameter import * 5 | 6 | class BoundBox: 7 | def __init__(self, class_num): 8 | self.x, self.y, self.w, self.h, self.c = 0., 0., 0., 0., 0. 9 | self.probs = np.zeros((class_num,)) 10 | 11 | def iou(self, box): 12 | intersection = self.intersect(box) 13 | union = self.w * self.h + box.w * box.h - intersection 14 | return intersection / union 15 | 16 | def intersect(self, box): 17 | width = self.__overlap([self.x - self.w / 2, self.x + self.w / 2], [box.x - box.w / 2, box.x + box.w / 2]) 18 | height = self.__overlap([self.y - self.h / 2, self.y + self.h / 2], [box.y - box.h / 2, box.y + box.h / 2]) 19 | return width * height 20 | 21 | def __overlap(self, interval_a, interval_b): 22 | x1, x2 = interval_a 23 | x3, x4 = interval_b 24 | if x3 < x1: 25 | if x4 < x1: 26 | return 0 27 | else: 28 | return min(x2, x4) - x1 29 | else: 30 | if x2 < x3: 31 | return 0 32 | else: 33 | return min(x2, x4) - x1 34 | 35 | 36 | class WeightReader: 37 | def __init__(self, weight_file): 38 | self.offset = 4 39 | self.all_weights = np.fromfile(weight_file, dtype='float32') 40 | 41 | def read_bytes(self, size): 42 | self.offset = self.offset + size 43 | return self.all_weights[self.offset - size:self.offset] 44 | 45 | def reset(self): 46 | self.offset = 4 47 | 48 | 49 | def interpret_netout(image, netout): 50 | boxes = [] 51 | # interpret the output by the network 52 | for row in range(GRID_H): 53 | for col in range(GRID_W): 54 | for b in range(BOX): 55 | box = BoundBox(CLASS) 56 | 57 | # first 5 weights for x, y, w, h and confidence 58 | box.x, box.y, box.w, box.h, box.c = netout[row, col, b, :5] 59 | 60 | box.x = (col + sigmoid(box.x)) / GRID_W 61 | box.y = (row + sigmoid(box.y)) / GRID_H 62 | box.w = ANCHORS[2 * b + 0] * np.exp(box.w) / GRID_W 63 | box.h = ANCHORS[2 * b + 1] * np.exp(box.h) / GRID_H 64 | box.c = sigmoid(box.c) 65 | 66 | # last 20 weights for class likelihoods 67 | classes = netout[row, col, b, 5:] 68 | box.probs = softmax(classes) * box.c 69 | box.probs *= box.probs > THRESHOLD 70 | 71 | boxes.append(box) 72 | 73 | # suppress non-maximal boxes 74 | for c in range(CLASS): 75 | sorted_indices = list(reversed(np.argsort([box.probs[c] for box in boxes]))) 76 | 77 | for i in range(len(sorted_indices)): 78 | index_i = sorted_indices[i] 79 | 80 | if boxes[index_i].probs[c] == 0: 81 | continue 82 | else: 83 | for j in range(i + 1, len(sorted_indices)): 84 | index_j = sorted_indices[j] 85 | 86 | if boxes[index_i].iou(boxes[index_j]) >= 0.4: 87 | boxes[index_j].probs[c] = 0 88 | 89 | # draw the boxes using a threshold 90 | mark = [] 91 | for box in boxes: 92 | max_indx = np.argmax(box.probs) 93 | max_prob = box.probs[max_indx] 94 | thresh = THRESHOLD 95 | #if LABELS[max_indx] == 'traffic_light': 96 | # thresh = 0.6 97 | 98 | if max_prob > thresh: 99 | xmin = int((box.x - box.w / 2) * image.shape[1]) 100 | xmax = int((box.x + box.w / 2) * image.shape[1]) 101 | ymin = int((box.y - box.h / 2) * image.shape[0]) 102 | ymax = int((box.y + box.h / 2) * image.shape[0]) 103 | 104 | #if LABELS[max_indx] == 'traffic_light': 105 | # ymin = int(ymax * 0.7) 106 | cv2.rectangle(image, (xmin, ymin), (xmax, ymax), COLORS[max_indx], 5) 107 | cv2.putText(image, LABELS[max_indx]+" "+str(round(max_prob,2)), (xmin, ymin - 12), 0, 1e-3 * image.shape[0], (0, 255, 0), 2) 108 | mark.append({'label' : LABELS[max_indx], 'prob' : max_prob, 'xmin' : xmin, 109 | 'ymin' : ymin, 'xmax' : xmax, 'ymax' : ymax}) 110 | return image, mark 111 | 112 | 113 | def parse_annotation(ann_dir): 114 | f = open(ann_dir, 'r') 115 | _f = f.read() 116 | f_content = _f.split('\n') 117 | 118 | all_img = [] 119 | current = "" 120 | 121 | for ann in f_content: 122 | img_data = ann.split(' ') 123 | if img_data == ['']: 124 | break 125 | 126 | file_name, width, height, xmin, ymin, xmax, ymax, label = img_data 127 | 128 | if not current == file_name: 129 | img = {'height': float(width), 'width': float(height), 'object': [], 'filename': file_name} 130 | current = file_name 131 | all_img.append(img) 132 | 133 | img['object'].append({'xmin': float(xmin), 'ymin': float(ymin), 134 | 'name': label, 'xmax': float(xmax), 135 | 'ymax': float(ymax)}) 136 | 137 | return all_img 138 | 139 | 140 | def aug_img(train_instance): 141 | path = train_instance['filename'] 142 | all_obj = copy.deepcopy(train_instance['object'][:]) 143 | img = cv2.imread(img_dir + path) 144 | h, w, c = img.shape 145 | 146 | # scale the image 147 | scale = np.random.uniform() / 10. + 1. 148 | img = cv2.resize(img, (0, 0), fx=scale, fy=scale) 149 | 150 | # translate the image 151 | max_offx = (scale - 1.) * w 152 | max_offy = (scale - 1.) * h 153 | offx = int(np.random.uniform() * max_offx) 154 | offy = int(np.random.uniform() * max_offy) 155 | img = img[offy: (offy + h), offx: (offx + w)] 156 | 157 | # flip the image 158 | flip = np.random.binomial(1, .5) 159 | if flip > 0.5: img = cv2.flip(img, 1) 160 | 161 | # re-color 162 | t = [np.random.uniform()] 163 | t += [np.random.uniform()] 164 | t += [np.random.uniform()] 165 | t = np.array(t) 166 | 167 | img = img * (1 + t) 168 | img = img / (255. * 2.) 169 | 170 | # resize the image to standard size 171 | img = cv2.resize(img, (NORM_H, NORM_W)) 172 | img = img[:, :, ::-1] 173 | 174 | # fix object's position and size 175 | for obj in all_obj: 176 | for attr in ['xmin', 'xmax']: 177 | obj[attr] = int(obj[attr] * scale - offx) 178 | obj[attr] = int(obj[attr] * float(NORM_W) / w) 179 | obj[attr] = max(min(obj[attr], NORM_W), 0) 180 | 181 | for attr in ['ymin', 'ymax']: 182 | obj[attr] = int(obj[attr] * scale - offy) 183 | obj[attr] = int(obj[attr] * float(NORM_H) / h) 184 | obj[attr] = max(min(obj[attr], NORM_H), 0) 185 | 186 | if flip > 0.5: 187 | xmin = obj['xmin'] 188 | obj['xmin'] = NORM_W - obj['xmax'] 189 | obj['xmax'] = NORM_W - xmin 190 | 191 | return img, all_obj 192 | 193 | 194 | def data_gen(all_img, batch_size): 195 | num_img = len(all_img) 196 | shuffled_indices = np.random.permutation(np.arange(num_img)) 197 | l_bound = 0 198 | r_bound = batch_size if batch_size < num_img else num_img 199 | 200 | while True: 201 | if l_bound == r_bound: 202 | l_bound = 0 203 | r_bound = batch_size if batch_size < num_img else num_img 204 | shuffled_indices = np.random.permutation(np.arange(num_img)) 205 | 206 | batch_size = r_bound - l_bound 207 | currt_inst = 0 208 | x_batch = np.zeros((batch_size, NORM_W, NORM_H, 3)) 209 | y_batch = np.zeros((batch_size, GRID_W, GRID_H, BOX, 5 + CLASS)) 210 | 211 | for index in shuffled_indices[l_bound:r_bound]: 212 | train_instance = all_img[index] 213 | 214 | # augment input image and fix object's position and size 215 | img, all_obj = aug_img(train_instance) 216 | # for obj in all_obj: 217 | # cv2.rectangle(img[:,:,::-1], (obj['xmin'],obj['ymin']), (obj['xmax'],obj['ymax']), (1,1,0), 3) 218 | # plt.imshow(img); plt.show() 219 | 220 | # construct output from object's position and size 221 | for obj in all_obj: 222 | box = [] 223 | center_x = .5 * (obj['xmin'] + obj['xmax']) # xmin, xmax 224 | center_x = center_x / (float(NORM_W) / GRID_W) 225 | center_y = .5 * (obj['ymin'] + obj['ymax']) # ymin, ymax 226 | center_y = center_y / (float(NORM_H) / GRID_H) 227 | 228 | grid_x = int(np.floor(center_x)) 229 | grid_y = int(np.floor(center_y)) 230 | 231 | if grid_x < GRID_W and grid_y < GRID_H: 232 | obj_indx = LABELS.index(obj['name']) 233 | box = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']] 234 | 235 | y_batch[currt_inst, grid_y, grid_x, :, 0:4] = BOX * [box] 236 | y_batch[currt_inst, grid_y, grid_x, :, 4] = BOX * [1.] 237 | y_batch[currt_inst, grid_y, grid_x, :, 5:] = BOX * [[0.] * CLASS] 238 | y_batch[currt_inst, grid_y, grid_x, :, 5 + obj_indx] = 1.0 239 | 240 | # concatenate batch input from the image 241 | x_batch[currt_inst] = img 242 | currt_inst += 1 243 | 244 | del img, all_obj 245 | 246 | yield x_batch, y_batch 247 | 248 | l_bound = r_bound 249 | r_bound = r_bound + batch_size 250 | if r_bound > num_img: r_bound = num_img 251 | 252 | 253 | def sigmoid(x): 254 | return 1. / (1. + np.exp(-x)) 255 | 256 | 257 | def softmax(x): 258 | return np.exp(x) / np.sum(np.exp(x), axis=0) 259 | 260 | def Rotate(src, degrees): 261 | if degrees == 90: 262 | dst = cv2.transpose(src) 263 | dst = cv2.flip(dst, 1) 264 | 265 | elif degrees == 180: 266 | dst = cv2.flip(src, -1) 267 | 268 | elif degrees == 270: 269 | dst = cv2.transpose(src) 270 | dst = cv2.flip(dst, 0) 271 | return dst 272 | 273 | 274 | def get_Object(image, mark, Check): 275 | label, prob, xmin, ymin, xmax, ymax = mark['label'], mark['prob'], mark['xmin'], \ 276 | mark['ymin'], mark['xmax'], mark['ymax'] 277 | #print("ymax : ",ymax) 278 | #print("label : ", label, "prob : ", prob, "xmin : ", xmin, "ymin : ", ymin, "xmax : ", xmax, "ymax : ", ymax) 279 | try: 280 | if label == 'traffic_light': 281 | Object = image[ymin:ymax, xmin:xmax, :] 282 | b, g, r = 0, 0, 0 283 | 284 | for y in range(ymax - ymin): 285 | for x in range(xmax - xmin): 286 | try: 287 | b += Object[y, x, 0] 288 | g += Object[y, x, 1] 289 | r += Object[y, x, 2] 290 | except: 291 | continue 292 | h, s, v = rgb2hsv(r,g,b) 293 | if h < 120: 294 | label = "red" 295 | print("red ", h) 296 | elif h >= 120: 297 | label = "green" 298 | print("green ", h) 299 | 300 | # 발견했을때 301 | Check[label][2] = True 302 | Check[label][0] += 1 303 | Check[label][1] = 0 304 | except: 305 | print("error in color extracting") 306 | return None, 0, 0, 0, 0 307 | 308 | return label, int(xmin), int(xmax), int(ymin), int(ymax) 309 | 310 | def ccw(line, p2): # 시계반대방향알고리즘 311 | p0 = [line[0], line[1]] 312 | p1 = [line[2], line[3]] 313 | 314 | dx1 = p1[0] - p0[0]; 315 | dy1 = p1[1] - p0[1]; 316 | dx2 = p2[0] - p0[0]; 317 | dy2 = p2[1] - p0[1]; 318 | 319 | if (dx1 * dy2 > dy1 * dx2): 320 | return 1 #right 321 | if (dx1 * dy2 < dy1 * dx2) : 322 | return -1 #left 323 | 324 | return 0 325 | 326 | 327 | def rgb2hsv(r, g, b): 328 | r, g, b = r/255.0, g/255.0, b/255.0 329 | mx = max(r, g, b) 330 | mn = min(r, g, b) 331 | df = mx-mn 332 | if mx == mn: 333 | h = 0 334 | elif mx == r: 335 | h = (60 * ((g-b)/df) + 360) % 360 336 | elif mx == g: 337 | h = (60 * ((b-r)/df) + 120) % 360 338 | elif mx == b: 339 | h = (60 * ((r-g)/df) + 240) % 360 340 | if mx == 0: 341 | s = 0 342 | else: 343 | s = df/mx 344 | v = mx 345 | return h, s, v --------------------------------------------------------------------------------