├── yolov8_kolay_yol.py ├── Yolov8_ile_IHA_algilama_ve_izleme_ornegi ├── hareketli_ucak3.mp4 ├── yolov8_hareketli_ucak_izleme_1.py └── yolov8_hareketli_ucak_izleme_2.py ├── yolov8-segmentasyon ├── yolov8_ile_segmentasyon.py ├── yolov8_ile_segmentasyon-1.py ├── yolov8_ile_segmentasyon-2.py ├── yolov8_ile_segmentasyon-3.py └── yolov8_ile_segmentasyon-4.py ├── Yolov8-ile-kitap-tanima ├── train_offline.py ├── kitap_projesi.ipynb ├── main.py ├── splitdata.py └── veri_seti_olusturma.py ├── yolov8-byteTrack-supervision ├── yolov8_bytetrack.py ├── yolov8_bytetrack-1.py ├── yolov8_bytetrack-3.py ├── yolov8_bytetrack-2.py └── yolov8_bytetrack-4.py ├── yolov8-opencv-hiz-tespiti ├── deneme.py ├── hiz_tespit11.py ├── hiz_tespit.py ├── hiz_tespit2.py ├── hiz_tespit3.py ├── hiz_tespit4.py ├── hiz_tespit5.py ├── hiz_tespit6.py ├── hiz_tespit10.py ├── hiz_tespit9.py ├── hiz_tespit8.py └── hiz_tespit7.py ├── yoloV8_predict.py ├── ip-kamera-webcam-yolov8 ├── phnecam2.py ├── phnecam3.py └── phnecam4.py ├── yolov8_ile_nesne_izleme-6.py ├── yolov8_webcam_supervision_3.py ├── yolov8 ve supervision ile webcam görünütüsü içinde özel bir bölgedeki nesneleri saydırma.txt ├── yolov8_opencv_kod.py ├── yolov8_nesne_bulaniklastirma.py ├── yolov8_nesne_izleme_4.py └── yolov8_nesne_sayma_4.py /yolov8_kolay_yol.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | 3 | model = YOLO('yolov8n.pt') 4 | 5 | sonuc = model.predict(source="ucus1.mp4", show=True, save=True) -------------------------------------------------------------------------------- /Yolov8_ile_IHA_algilama_ve_izleme_ornegi/hareketli_ucak3.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bulentsezen/YoloV8/HEAD/Yolov8_ile_IHA_algilama_ve_izleme_ornegi/hareketli_ucak3.mp4 -------------------------------------------------------------------------------- /yolov8-segmentasyon/yolov8_ile_segmentasyon.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | 3 | def main(): 4 | model = YOLO("yolov8n-seg.pt") 5 | result = model.track(source=0, show=True, conf=0.5) 6 | 7 | 8 | if __name__ == "__main__": 9 | main() -------------------------------------------------------------------------------- /Yolov8-ile-kitap-tanima/train_offline.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | 3 | model = YOLO('yolov8n.pt') 4 | 5 | def main(): 6 | model.train(data='Dataset/SplitData/data.yaml', epochs=200, patience=500) 7 | 8 | 9 | if __name__ == '__main__': 10 | main() -------------------------------------------------------------------------------- /yolov8-byteTrack-supervision/yolov8_bytetrack.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | 3 | model = YOLO('yolov8n.pt') 4 | 5 | sonuc = model.track(source=0, show=True, tracker="bytetrack.yaml") 6 | # sonuc = model.track(source="cctv_trafik.mp4", show=True, tracker="bytetrack.yaml") 7 | -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/deneme.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | points = np.array([[1, 2], [4, 5]]) 4 | print("points :", points) 5 | 6 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 7 | print("3-d reshaped points:", reshaped_points) 8 | 9 | transformed_points = reshaped_points.reshape(-1, 2) 10 | print("2-d points:", transformed_points) -------------------------------------------------------------------------------- /yolov8-segmentasyon/yolov8_ile_segmentasyon-1.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | 4 | def main(): 5 | model = YOLO("yolov8n-seg.pt") 6 | 7 | for result in model.track(source=0, show=True, stream=True): 8 | frame = result.orig_img 9 | 10 | cv2.imshow("yolov8", frame) 11 | 12 | if (cv2.waitKey(30) == 27): 13 | break 14 | 15 | if __name__ == "__main__": 16 | main() -------------------------------------------------------------------------------- /yoloV8_predict.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | from PIL import Image 3 | 4 | # modeli yükle 5 | model = YOLO('yolov8n.pt') # load a pretrained model 6 | 7 | # modeli kullanarak video ve webcam görüntüsü ile nesne tahmini yap 8 | #sonuc = model.predict(source="0") # 0 webcam için 9 | #sonuc = model.predict(source="Video.mp4", show=True) 10 | 11 | # resim dosyası üzerinde nesne tanıma 12 | im1 = Image.open("Elon_Musk.jpg") 13 | sonuc = model.predict(source=im1, save=True) # save ile resmi kaydeder -------------------------------------------------------------------------------- /ip-kamera-webcam-yolov8/phnecam2.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | #Starting the video capture 4 | 5 | cap = cv2.VideoCapture("http://192.168.1.22:8080//video") 6 | 7 | while(cap.isOpened()): 8 | ret,img = cap.read() 9 | 10 | #Controlling the algorithm with keys 11 | try: 12 | img = cv2.resize(img,(640,480)) 13 | cv2.imshow('img',img) 14 | a = cv2.waitKey(1) 15 | if a == ord('q'): 16 | break 17 | except cv2.error: 18 | print("stream ended") 19 | break 20 | 21 | cap.release() 22 | cv2.destroyAllWindows() 23 | -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit11.py: -------------------------------------------------------------------------------- 1 | import supervision as sv 2 | import cv2 3 | from ultralytics import YOLO 4 | 5 | if __name__ == "__main__": 6 | 7 | video_info = sv.VideoInfo.from_video_path(video_path="cctv_trafik.mp4") 8 | model = YOLO("yolov8n.pt") 9 | 10 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=4) 11 | 12 | frame_generator = sv.get_video_frames_generator(source_path="cctv_trafik.mp4") 13 | 14 | for frame in frame_generator: 15 | result = model(frame)[0] 16 | detections = sv.Detections.from_ultralytics(result) 17 | 18 | annotated_frame = frame.copy() 19 | annotated_frame = bounding_box_annotator.annotate( 20 | scene=annotated_frame, detections=detections 21 | ) 22 | 23 | outfile = 'resim_1.jpg' 24 | cv2.imwrite(outfile, annotated_frame) 25 | 26 | cv2.imshow("frame", annotated_frame) 27 | if cv2.waitKey(1) & 0xFF == ord("q"): 28 | break 29 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /ip-kamera-webcam-yolov8/phnecam3.py: -------------------------------------------------------------------------------- 1 | # yolov8 ultralytics ile nesne algılama 2 | 3 | import cv2 4 | from ultralytics import YOLO 5 | #Starting the video capture 6 | model = YOLO("yolov8n.pt") 7 | cap = cv2.VideoCapture("http://192.168.1.22:8080//video") 8 | 9 | 10 | while(cap.isOpened()): 11 | ret,img = cap.read() 12 | img = cv2.resize(img, (640, 480)) 13 | results = model(img, stream=True) 14 | 15 | #Controlling the algorithm with keys 16 | try: 17 | for r in results: 18 | boxes = r.boxes 19 | for box in boxes: 20 | # Bounding Box 21 | x1, y1, x2, y2 = box.xyxy[0] 22 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 23 | cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 24 | 25 | 26 | cv2.imshow('img',img) 27 | a = cv2.waitKey(1) 28 | if a == ord('q'): 29 | break 30 | except cv2.error: 31 | print("stream ended") 32 | break 33 | 34 | cap.release() 35 | cv2.destroyAllWindows() 36 | -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit.py: -------------------------------------------------------------------------------- 1 | # pip install ultralytics 2 | # pip install supervision 3 | # uninstall opencv-python, opencv-python-heaadless (opencv 4.5.5.62 versiyonu yükle) 4 | import supervision as sv 5 | import cv2 6 | from ultralytics import YOLO 7 | 8 | if __name__ == "__main__": 9 | 10 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 11 | model = YOLO("yolov8n.pt") 12 | 13 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=4) 14 | 15 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 16 | 17 | for frame in frame_generator: 18 | result = model(frame)[0] 19 | detections = sv.Detections.from_ultralytics(result) 20 | 21 | annotated_frame = frame.copy() 22 | annotated_frame = bounding_box_annotator.annotate( 23 | scene=annotated_frame, detections=detections 24 | ) 25 | 26 | cv2.imshow("frame", annotated_frame) 27 | if cv2.waitKey(1) & 0xFF == ord("q"): 28 | break 29 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit2.py: -------------------------------------------------------------------------------- 1 | ## ekrana sığmayan resim boyutunu resize ile 0.3 oranında küçültme 2 | ## half_fram fx=0.3, fy=0.3) 3 | import supervision as sv 4 | import cv2 5 | from ultralytics import YOLO 6 | 7 | if __name__ == "__main__": 8 | 9 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 10 | model = YOLO("yolov8n.pt") 11 | 12 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=4) 13 | 14 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 15 | 16 | for frame in frame_generator: 17 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 18 | result = model(half_frame)[0] 19 | detections = sv.Detections.from_ultralytics(result) 20 | 21 | annotated_frame = half_frame.copy() 22 | annotated_frame = bounding_box_annotator.annotate( 23 | scene=annotated_frame, detections=detections 24 | ) 25 | 26 | cv2.imshow("frame", annotated_frame) 27 | if cv2.waitKey(1) & 0xFF == ord("q"): 28 | break 29 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-segmentasyon/yolov8_ile_segmentasyon-2.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | 4 | def main(): 5 | model = YOLO("yolov8n-seg.pt") 6 | 7 | cap = cv2.VideoCapture(0) 8 | 9 | while cap.isOpened(): 10 | success, frame = cap.read() 11 | if not success: 12 | break 13 | 14 | print('frame_shape:', frame.shape) 15 | 16 | # Run YOLOv8 inference on the frame 17 | results = model(frame, classes=0, verbose=False) 18 | 19 | # if not exist person 20 | if results[0].masks is None: 21 | continue 22 | 23 | # get box object 24 | box = results[0].boxes[0].xyxy[0] 25 | box = box.numpy().astype(int) 26 | 27 | # background subtraction 28 | mask = (results[0].masks.data[0].numpy() * 255).astype('uint8') 29 | 30 | cv2.imshow('Background remove', mask) 31 | #cv2.imshow("Orijinal", frame) 32 | # Break the loop if 'q' is pressed 33 | if cv2.waitKey(1) & 0xFF == ord("q"): 34 | break 35 | 36 | 37 | 38 | if __name__ == "__main__": 39 | main() -------------------------------------------------------------------------------- /yolov8_ile_nesne_izleme-6.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from ultralytics import YOLO 3 | import supervision as sv 4 | 5 | def main(): 6 | box_annotator = sv.BoxAnnotator( 7 | thickness=2, 8 | text_thickness=1, 9 | text_scale=0.5) 10 | 11 | model = YOLO("yolov8n.pt") 12 | for result in model.track(source=0, show=True, stream=True): 13 | frame = result.orig_img 14 | 15 | detections = sv.Detections.from_yolov8(result) 16 | 17 | if result.boxes.id is not None: 18 | detections.tracker_id = result.boxes.id.cpu().numpy().astype(int) 19 | 20 | detections = detections[(detections.class_id != 0)] 21 | 22 | labels = [ 23 | f"{tracker_id} {model.model.names[class_id]} {confidence:0.2f}" 24 | for _, confidence, class_id, tracker_id 25 | in detections 26 | ] 27 | 28 | frame = box_annotator.annotate(scene=frame,detections=detections,labels=labels) 29 | 30 | cv2.imshow("yolov8", frame) 31 | 32 | if (cv2.waitKey(30) == 27): 33 | break 34 | 35 | 36 | if __name__ == "__main__": 37 | main() -------------------------------------------------------------------------------- /Yolov8-ile-kitap-tanima/kitap_projesi.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "nbformat": 4, 3 | "nbformat_minor": 0, 4 | "metadata": { 5 | "colab": { 6 | "provenance": [], 7 | "gpuType": "T4" 8 | }, 9 | "kernelspec": { 10 | "name": "python3", 11 | "display_name": "Python 3" 12 | }, 13 | "language_info": { 14 | "name": "python" 15 | }, 16 | "accelerator": "GPU" 17 | }, 18 | "cells": [ 19 | { 20 | "cell_type": "code", 21 | "execution_count": null, 22 | "metadata": { 23 | "id": "fPIov35Lvuck" 24 | }, 25 | "outputs": [], 26 | "source": [ 27 | "!nvidia-smi" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "source": [ 33 | "!pip install ultralytics" 34 | ], 35 | "metadata": { 36 | "id": "xcChrPQowJ-y" 37 | }, 38 | "execution_count": null, 39 | "outputs": [] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "source": [ 44 | "from ultralytics import YOLO" 45 | ], 46 | "metadata": { 47 | "id": "WRdMVZ5Iv_ZA" 48 | }, 49 | "execution_count": 3, 50 | "outputs": [] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "source": [ 55 | "!yolo task=detect mode=train model=yolov8n.pt data=../content/Data/data.yaml epochs=200 imgsz=640 patience=100" 56 | ], 57 | "metadata": { 58 | "id": "oEYldpLty9E9" 59 | }, 60 | "execution_count": null, 61 | "outputs": [] 62 | } 63 | ] 64 | } -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit3.py: -------------------------------------------------------------------------------- 1 | ## yolo class id'leri ekranda göster 2 | # thickness = 3 3 | # text_scale = 1 4 | 5 | # label_annotator = sv.LabelAnnotator( 6 | # text_scale=text_scale, 7 | # text_thickness=thickness, 8 | # text_position=sv.Position.BOTTOM_CENTER) 9 | 10 | import supervision as sv 11 | import cv2 12 | from ultralytics import YOLO 13 | 14 | if __name__ == "__main__": 15 | 16 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 17 | model = YOLO("yolov8n.pt") 18 | 19 | thickness = 3 20 | text_scale = 1 21 | 22 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 23 | label_annotator = sv.LabelAnnotator( 24 | text_scale=text_scale, 25 | text_thickness=thickness, 26 | text_position=sv.Position.BOTTOM_CENTER) 27 | 28 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 29 | 30 | for frame in frame_generator: 31 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 32 | result = model(half_frame)[0] 33 | detections = sv.Detections.from_ultralytics(result) 34 | 35 | annotated_frame = half_frame.copy() 36 | annotated_frame = bounding_box_annotator.annotate( 37 | scene=annotated_frame, detections=detections) 38 | 39 | annotated_frame = label_annotator.annotate( 40 | scene=annotated_frame, detections=detections) 41 | 42 | 43 | cv2.imshow("frame", annotated_frame) 44 | if cv2.waitKey(1) & 0xFF == ord("q"): 45 | break 46 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-segmentasyon/yolov8_ile_segmentasyon-3.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import numpy as np 4 | 5 | def main(): 6 | model = YOLO("yolov8n-seg.pt") 7 | 8 | cap = cv2.VideoCapture(0) 9 | 10 | while cap.isOpened(): 11 | success, frame = cap.read() 12 | if not success: 13 | break 14 | 15 | print('frame_shape:', frame.shape) 16 | 17 | # Run YOLOv8 inference on the frame 18 | results = model(frame, classes=0, verbose=False) 19 | 20 | # if not exist person 21 | if results[0].masks is None: 22 | continue 23 | 24 | # get box object 25 | box = results[0].boxes[0].xyxy[0] 26 | box = box.numpy().astype(int) 27 | 28 | # background subtraction 29 | mask = (results[0].masks.data[0].numpy() * 255).astype('uint8') 30 | 31 | beyaz_pixel_sayisi = np.sum(mask == 255) # sadece beyaz pixelleri sayar 32 | sihay_pixel_sayisi = np.sum(mask == 0) # sadece siyah pixelleri sayar 33 | toplam_pixel_sayisi = beyaz_pixel_sayisi + sihay_pixel_sayisi 34 | beyaz_yuzde = int(beyaz_pixel_sayisi / toplam_pixel_sayisi * 100) 35 | print(beyaz_yuzde) 36 | 37 | cv2.putText(mask, f'Doluluk: %{str(beyaz_yuzde)}', (20, 50), cv2.FONT_HERSHEY_PLAIN, 2, 38 | (255, 0, 0), 3) 39 | 40 | cv2.imshow('Background remove', mask) 41 | cv2.imshow("Orijinal", frame) 42 | # Break the loop if 'q' is pressed 43 | if cv2.waitKey(1) & 0xFF == ord("q"): 44 | break 45 | 46 | 47 | 48 | if __name__ == "__main__": 49 | main() -------------------------------------------------------------------------------- /yolov8-segmentasyon/yolov8_ile_segmentasyon-4.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import numpy as np 4 | 5 | def main(): 6 | model = YOLO("yolov8n-seg.pt") # önceden eğitilmiş ağırlıklar 7 | model.classes = ['cell phone'] 8 | 9 | cap = cv2.VideoCapture(0) 10 | 11 | while cap.isOpened(): 12 | success, frame = cap.read() 13 | if not success: 14 | break 15 | 16 | print('frame_shape:', frame.shape) 17 | 18 | # Run YOLOv8 inference on the frame 19 | results = model(frame, verbose=False) # classes=0 silindi 20 | 21 | # if not exist person 22 | if results[0].masks is None: 23 | continue 24 | 25 | # get box object 26 | box = results[0].boxes[0].xyxy[0] 27 | box = box.numpy().astype(int) 28 | 29 | # background subtraction 30 | mask = (results[0].masks.data[0].numpy() * 255).astype('uint8') 31 | 32 | beyaz_pixel_sayisi = np.sum(mask == 255) # sadece beyaz pixelleri sayar 33 | sihay_pixel_sayisi = np.sum(mask == 0) # sadece siyah pixelleri sayar 34 | toplam_pixel_sayisi = beyaz_pixel_sayisi + sihay_pixel_sayisi 35 | beyaz_yuzde = int(beyaz_pixel_sayisi / toplam_pixel_sayisi * 100) 36 | print(beyaz_yuzde) 37 | 38 | cv2.putText(mask, f'Doluluk: %{str(beyaz_yuzde)}', (20, 50), cv2.FONT_HERSHEY_PLAIN, 2, 39 | (255, 0, 0), 3) 40 | 41 | cv2.imshow('Background remove', mask) 42 | cv2.imshow("Orijinal", frame) 43 | # Break the loop if 'q' is pressed 44 | if cv2.waitKey(1) & 0xFF == ord("q"): 45 | break 46 | 47 | 48 | 49 | if __name__ == "__main__": 50 | main() -------------------------------------------------------------------------------- /Yolov8-ile-kitap-tanima/main.py: -------------------------------------------------------------------------------- 1 | import math 2 | import time 3 | 4 | import cv2 5 | import cvzone 6 | from ultralytics import YOLO 7 | 8 | confidence = 0.5 9 | 10 | cap = cv2.VideoCapture(0) # For Webcam 11 | cap.set(3, 640) 12 | cap.set(4, 480) 13 | # cap = cv2.VideoCapture("../Videos/motorbikes.mp4") # For Video 14 | 15 | 16 | model = YOLO("best.pt") 17 | # model = YOLO("last.pt") 18 | 19 | classNames = ["Canakkale", "Tesla"] 20 | 21 | prev_frame_time = 0 22 | new_frame_time = 0 23 | 24 | while True: 25 | new_frame_time = time.time() 26 | success, img = cap.read() 27 | results = model(img, stream=True, verbose=False) 28 | for r in results: 29 | boxes = r.boxes 30 | for box in boxes: 31 | # Bounding Box 32 | x1, y1, x2, y2 = box.xyxy[0] 33 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 34 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 35 | w, h = x2 - x1, y2 - y1 36 | # Confidence 37 | conf = math.ceil((box.conf[0] * 100)) / 100 38 | # Class Name 39 | cls = int(box.cls[0]) 40 | if conf > confidence: 41 | print("yakalandı") 42 | if classNames[cls] == 'acik': 43 | color = (0, 255, 0) 44 | else: 45 | color = (0, 0, 255) 46 | 47 | cvzone.cornerRect(img, (x1, y1, w, h),colorC=color,colorR=color) 48 | cvzone.putTextRect(img, f'{classNames[cls].upper()} {int(conf*100)}%', 49 | (max(0, x1), max(35, y1)), scale=2, thickness=4,colorR=color, 50 | colorB=color) 51 | 52 | 53 | fps = 1 / (new_frame_time - prev_frame_time) 54 | prev_frame_time = new_frame_time 55 | # print(fps) 56 | 57 | cv2.imshow("Image", img) 58 | cv2.waitKey(1) -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit4.py: -------------------------------------------------------------------------------- 1 | ## tracking (araç takibi) için byte_track (supervision) izleyiciyi aktifleştir, ve ekranda göster. 2 | ## byte_track = sv.ByteTrack(frame_rate=video_info.fps) 3 | ## detections = byte_track.update_with_detections(detections=detections) 4 | 5 | # labels = [] 6 | # for tracker_id in np.array(detections.tracker_id): 7 | # labels.append(f"#{tracker_id}") 8 | 9 | import supervision as sv 10 | import cv2 11 | from ultralytics import YOLO 12 | import numpy as np 13 | 14 | if __name__ == "__main__": 15 | 16 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 17 | model = YOLO("yolov8n.pt") 18 | 19 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 20 | 21 | thickness = 3 22 | text_scale = 1 23 | 24 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 25 | label_annotator = sv.LabelAnnotator( 26 | text_scale=text_scale, 27 | text_thickness=thickness, 28 | text_position=sv.Position.BOTTOM_CENTER) 29 | 30 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 31 | 32 | for frame in frame_generator: 33 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 34 | result = model(half_frame)[0] 35 | detections = sv.Detections.from_ultralytics(result) 36 | detections = byte_track.update_with_detections(detections=detections) 37 | 38 | labels = [] 39 | for tracker_id in np.array(detections.tracker_id): 40 | labels.append(f"#{tracker_id}") 41 | 42 | annotated_frame = half_frame.copy() 43 | annotated_frame = bounding_box_annotator.annotate( 44 | scene=annotated_frame, detections=detections) 45 | 46 | annotated_frame = label_annotator.annotate( 47 | scene=annotated_frame, detections=detections, labels=labels) 48 | 49 | cv2.imshow("frame", annotated_frame) 50 | if cv2.waitKey(1) & 0xFF == ord("q"): 51 | break 52 | 53 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-byteTrack-supervision/yolov8_bytetrack-1.py: -------------------------------------------------------------------------------- 1 | # supervision box_annotator ile nesne tespiti 2 | 3 | from ultralytics import YOLO 4 | import numpy as np 5 | import cv2 6 | import supervision as sv 7 | from tqdm import tqdm 8 | 9 | COLORS = sv.ColorPalette.default() 10 | 11 | class VideoProcessor: 12 | def __init__( 13 | self, 14 | source_video_path: str, 15 | ) -> None: 16 | self.conf_threshold = 0.5 17 | self.iou_threshold = 0.5 18 | self.source_video_path = "cctv_trafik.mp4" 19 | self.model = YOLO('yolov8n.pt') 20 | self.video_info = sv.VideoInfo.from_video_path(source_video_path) 21 | self.box_annotator = sv.BoxAnnotator(color=COLORS) 22 | 23 | def process_video(self): 24 | frame_generator = sv.get_video_frames_generator( 25 | source_path=self.source_video_path) 26 | 27 | for frame in tqdm(frame_generator, total=self.video_info.total_frames): 28 | annotated_frame = self.process_frame(frame) 29 | cv2.imshow("Processed Video", annotated_frame) 30 | if cv2.waitKey(1) & 0xFF == ord("q"): 31 | break 32 | cv2.destroyAllWindows() 33 | 34 | def process_frame(self, frame: np.ndarray) -> np.ndarray: 35 | results = self.model( 36 | frame, verbose=False, conf=self.conf_threshold, iou=self.iou_threshold 37 | )[0] 38 | detections = sv.Detections.from_ultralytics(results) 39 | return self.annotate_frame(frame, detections) 40 | 41 | def annotate_frame( 42 | self, frame: np.ndarray, detections: sv.Detections 43 | ) -> np.ndarray: 44 | annotated_frame = frame.copy() 45 | annotated_frame = self.box_annotator.annotate( 46 | scene=annotated_frame, detections=detections 47 | ) 48 | return annotated_frame 49 | 50 | 51 | if __name__ == "__main__": 52 | processor = VideoProcessor( 53 | source_video_path="cctv_trafik.mp4", 54 | ) 55 | processor.process_video() 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /yolov8_webcam_supervision_3.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from ultralytics import YOLO 3 | import supervision as sv 4 | import numpy as np 5 | import argparse 6 | 7 | ZONE_POLYGON = np.array([ 8 | [0, 0], 9 | [0.5, 0], 10 | [0.5, 1], 11 | [0, 1] 12 | ]) 13 | 14 | def parse_arguments() -> argparse.Namespace: 15 | parser = argparse.ArgumentParser(description="YOLOv8 live") 16 | parser.add_argument( 17 | "--webcam-resolution", 18 | default=[1280, 720], 19 | nargs=2, 20 | type=int 21 | ) 22 | args = parser.parse_args() 23 | return args 24 | 25 | def main(): 26 | args = parse_arguments() 27 | frame_width, frame_height = args.webcam_resolution 28 | 29 | cap = cv2.VideoCapture(0) 30 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) 31 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) 32 | 33 | model = YOLO("yolov8n.pt") 34 | 35 | box_annotator = sv.BoxAnnotator( 36 | thickness=2, 37 | text_thickness=2, 38 | text_scale=1 39 | ) 40 | 41 | zone_polygon = (ZONE_POLYGON * np.array(args.webcam_resolution)).astype(int) 42 | zone = sv.PolygonZone(polygon=zone_polygon, frame_resolution_wh=tuple(args.webcam_resolution)) 43 | zone_annotator = sv.PolygonZoneAnnotator( 44 | zone=zone, 45 | color=sv.Color.blue(), 46 | thickness=7, 47 | text_thickness=7, 48 | text_scale=2 49 | ) 50 | 51 | while True: 52 | ret, frame = cap.read() 53 | 54 | result = model(frame, agnostic_nms=True)[0] 55 | 56 | detections = sv.Detections.from_yolov8(result) 57 | 58 | labels = [ 59 | f"{model.model.names[class_id]} {confidence:0.2f}" 60 | for _, confidence, class_id, _ 61 | in detections 62 | ] 63 | frame = box_annotator.annotate( 64 | scene=frame, 65 | detections=detections, 66 | labels=labels 67 | ) 68 | 69 | zone.trigger(detections=detections) 70 | frame = zone_annotator.annotate(scene=frame) 71 | 72 | cv2.imshow("yolov8", frame) 73 | 74 | if (cv2.waitKey(30) == 27): 75 | break 76 | 77 | 78 | if __name__ == "__main__": 79 | main() -------------------------------------------------------------------------------- /ip-kamera-webcam-yolov8/phnecam4.py: -------------------------------------------------------------------------------- 1 | # sadece insan yada sadece araba algılatma 2 | 3 | import cv2 4 | from ultralytics import YOLO 5 | #Starting the video capture 6 | model = YOLO("yolov8n.pt") 7 | cap = cv2.VideoCapture("http://192.168.1.22:8080//video") 8 | 9 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 10 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 11 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 12 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 13 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 14 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 15 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 16 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 17 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 18 | "teddy bear", "hair drier", "toothbrush" 19 | ] 20 | 21 | 22 | while(cap.isOpened()): 23 | ret,img = cap.read() 24 | img = cv2.resize(img, (640, 480)) 25 | results = model(img, stream=True) 26 | 27 | #Controlling the algorithm with keys 28 | try: 29 | for r in results: 30 | boxes = r.boxes 31 | for box in boxes: 32 | # Bounding Box 33 | x1, y1, x2, y2 = box.xyxy[0] 34 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 35 | 36 | 37 | # Class Name 38 | cls = int(box.cls[0]) 39 | print(cls) 40 | 41 | # 0 insan 2 araba 42 | if cls == 0: 43 | cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 255), 3) 44 | 45 | 46 | cv2.imshow('img',img) 47 | a = cv2.waitKey(1) 48 | if a == ord('q'): 49 | break 50 | except cv2.error: 51 | print("stream ended") 52 | break 53 | 54 | cap.release() 55 | cv2.destroyAllWindows() 56 | -------------------------------------------------------------------------------- /yolov8 ve supervision ile webcam görünütüsü içinde özel bir bölgedeki nesneleri saydırma.txt: -------------------------------------------------------------------------------- 1 | Yolov8 ve supervision ile webcam görünütüsü içinde özel bir bölgedeki nesneleri saydırma 2 | 3 | 1-nesne algılama 4 | 5 | from ultralytics import YOLO 6 | 7 | model = YOLO("yolov8n.pt") 8 | 9 | result = model(frame) 10 | result = model(frame, show=True) # ekranda nesneyi göstermek için show=True 11 | 12 | 2-supervision ile nesneleri gösterme 13 | 14 | import supervision as sv 15 | 16 | box_annotator = sv.BoxAnnotator( 17 | thickness=2, 18 | text_thickness=2, 19 | text_scale=1 20 | ) 21 | 22 | result = model(frame, agnostic_nms=True)[0] 23 | 24 | detections = sv.Detections.from_yolov8(result) 25 | labels = [ 26 | f"{model.model.names[class_id]} {confidence:0.2f}" 27 | for _, confidence, class_id, _ 28 | in detections 29 | ] 30 | frame = box_annotator.annotate( 31 | scene=frame, 32 | detections=detections, 33 | labels=labels 34 | ) 35 | 36 | 3- bölge (zone) oluşturma ve o bölgedeki nesneleri saydırma 37 | 38 | import numpy as np 39 | import argparse 40 | 41 | ZONE_POLYGON = np.array([ 42 | [0, 0], 43 | [0.5, 0], 44 | [0.5, 1], 45 | [0, 1] 46 | ]) 47 | 48 | 49 | def parse_arguments() -> argparse.Namespace: 50 | parser = argparse.ArgumentParser(description="YOLOv8 live") 51 | parser.add_argument( 52 | "--webcam-resolution", 53 | default=[1280, 720], 54 | nargs=2, 55 | type=int 56 | ) 57 | args = parser.parse_args() 58 | return args 59 | 60 | 61 | args = parse_arguments() 62 | frame_width, frame_height = args.webcam_resolution 63 | 64 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, frame_width) 65 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_height) 66 | 67 | 68 | zone_polygon = (ZONE_POLYGON * np.array(args.webcam_resolution)).astype(int) 69 | zone = sv.PolygonZone(polygon=zone_polygon, frame_resolution_wh=tuple(args.webcam_resolution)) 70 | zone_annotator = sv.PolygonZoneAnnotator( 71 | zone=zone, 72 | color=sv.Color.blue(), 73 | thickness=2, 74 | text_thickness=4, 75 | text_scale=2 76 | ) 77 | 78 | zone.trigger(detections=detections) 79 | frame = zone_annotator.annotate(scene=frame) 80 | 81 | -------------------------------------------------------------------------------- /yolov8-byteTrack-supervision/yolov8_bytetrack-3.py: -------------------------------------------------------------------------------- 1 | # webcam ile nesne takibi 2 | # process video fonksiyonu revize edildi 3 | 4 | from ultralytics import YOLO 5 | import numpy as np 6 | import cv2 7 | import supervision as sv 8 | # from tqdm import tqdm 9 | 10 | COLORS = sv.ColorPalette.default() 11 | 12 | class VideoProcessor: 13 | def __init__( 14 | self, 15 | # source_video_path: str, 16 | 17 | ) -> None: 18 | self.conf_threshold = 0.5 19 | self.iou_threshold = 0.5 20 | # self.source_video_path = "cctv_trafik.mp4" 21 | 22 | self.model = YOLO('yolov8n.pt') 23 | self.tracker = sv.ByteTrack() 24 | 25 | # self.video_info = sv.VideoInfo.from_video_path(source_video_path) 26 | 27 | self.bounding_box_annotator = sv.BoundingBoxAnnotator(color=COLORS) 28 | self.label_annotator = sv.LabelAnnotator( 29 | color=COLORS, text_color=sv.Color.blue() 30 | ) 31 | 32 | def process_video(self): 33 | cap = cv2.VideoCapture(0) 34 | 35 | while True: 36 | success, img = cap.read() 37 | 38 | annotated_frame = self.process_frame(img) 39 | cv2.imshow("Processed Video", annotated_frame) 40 | if cv2.waitKey(1) & 0xFF == ord("q"): 41 | break 42 | cv2.destroyAllWindows() 43 | 44 | def process_frame(self, frame: np.ndarray) -> np.ndarray: 45 | results = self.model( 46 | frame, verbose=False, conf=self.conf_threshold, iou=self.iou_threshold 47 | )[0] 48 | detections = sv.Detections.from_ultralytics(results) 49 | detections.class_id = np.zeros(len(detections)) 50 | detections = self.tracker.update_with_detections(detections) 51 | 52 | return self.annotate_frame(frame, detections) 53 | 54 | def annotate_frame( 55 | self, frame: np.ndarray, detections: sv.Detections 56 | ) -> np.ndarray: 57 | annotated_frame = frame.copy() 58 | 59 | labels = [f"#{tracker_id}" for tracker_id in detections.tracker_id] 60 | annotated_frame = self.bounding_box_annotator.annotate( 61 | annotated_frame, detections 62 | ) 63 | annotated_frame = self.label_annotator.annotate( 64 | annotated_frame, detections, labels 65 | ) 66 | return annotated_frame 67 | 68 | 69 | if __name__ == "__main__": 70 | processor = VideoProcessor( 71 | # source_video_path="cctv_trafik.mp4", 72 | ) 73 | processor.process_video() 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit5.py: -------------------------------------------------------------------------------- 1 | ## poligon zon bölgesi tanımlama ve sadece o bölgedeki araçları algılatıp gösterme 2 | # SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 3 | # [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 4 | 5 | ## polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 6 | ## detections = detections[polygon_zone.trigger(detections)] 7 | ## annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 8 | 9 | import supervision as sv 10 | import cv2 11 | from ultralytics import YOLO 12 | import numpy as np 13 | 14 | SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 15 | [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 16 | 17 | if __name__ == "__main__": 18 | 19 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 20 | model = YOLO("yolov8n.pt") 21 | 22 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 23 | 24 | thickness = 3 25 | text_scale = 1 26 | 27 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 28 | label_annotator = sv.LabelAnnotator( 29 | text_scale=text_scale, 30 | text_thickness=thickness, 31 | text_position=sv.Position.BOTTOM_CENTER) 32 | 33 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 34 | 35 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 36 | 37 | for frame in frame_generator: 38 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 39 | result = model(half_frame)[0] 40 | detections = sv.Detections.from_ultralytics(result) 41 | detections = detections[polygon_zone.trigger(detections)] 42 | detections = byte_track.update_with_detections(detections=detections) 43 | 44 | labels = [] 45 | for tracker_id in np.array(detections.tracker_id): 46 | labels.append(f"#{tracker_id}") 47 | 48 | annotated_frame = half_frame.copy() 49 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 50 | annotated_frame = bounding_box_annotator.annotate( 51 | scene=annotated_frame, detections=detections) 52 | 53 | annotated_frame = label_annotator.annotate( 54 | scene=annotated_frame, detections=detections, labels=labels) 55 | 56 | cv2.imshow("frame", annotated_frame) 57 | if cv2.waitKey(1) & 0xFF == ord("q"): 58 | break 59 | 60 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-byteTrack-supervision/yolov8_bytetrack-2.py: -------------------------------------------------------------------------------- 1 | # supervision bytetrack nesne takibi aktif 2 | 3 | from ultralytics import YOLO 4 | import numpy as np 5 | import cv2 6 | import supervision as sv 7 | from tqdm import tqdm 8 | 9 | COLORS = sv.ColorPalette.default() 10 | 11 | class VideoProcessor: 12 | def __init__( 13 | self, 14 | source_video_path: str, 15 | ) -> None: 16 | self.conf_threshold = 0.5 17 | self.iou_threshold = 0.5 18 | self.source_video_path = "cctv_trafik.mp4" 19 | 20 | self.model = YOLO('yolov8n.pt') 21 | self.tracker = sv.ByteTrack() 22 | 23 | self.video_info = sv.VideoInfo.from_video_path(source_video_path) 24 | 25 | self.bounding_box_annotator = sv.BoundingBoxAnnotator(color=COLORS) 26 | self.label_annotator = sv.LabelAnnotator( 27 | color=COLORS, text_color=sv.Color.blue() 28 | ) 29 | 30 | def process_video(self): 31 | frame_generator = sv.get_video_frames_generator( 32 | source_path=self.source_video_path 33 | ) 34 | 35 | for frame in tqdm(frame_generator, total=self.video_info.total_frames): 36 | annotated_frame = self.process_frame(frame) 37 | cv2.imshow("Processed Video", annotated_frame) 38 | if cv2.waitKey(1) & 0xFF == ord("q"): 39 | break 40 | cv2.destroyAllWindows() 41 | 42 | def process_frame(self, frame: np.ndarray) -> np.ndarray: 43 | results = self.model( 44 | frame, verbose=False, conf=self.conf_threshold, iou=self.iou_threshold 45 | )[0] 46 | detections = sv.Detections.from_ultralytics(results) 47 | detections.class_id = np.zeros(len(detections)) 48 | detections = self.tracker.update_with_detections(detections) 49 | 50 | return self.annotate_frame(frame, detections) 51 | 52 | def annotate_frame( 53 | self, frame: np.ndarray, detections: sv.Detections 54 | ) -> np.ndarray: 55 | annotated_frame = frame.copy() 56 | 57 | labels = [f"#{tracker_id}" for tracker_id in detections.tracker_id] 58 | 59 | annotated_frame = self.bounding_box_annotator.annotate( 60 | annotated_frame, detections 61 | ) 62 | annotated_frame = self.label_annotator.annotate( 63 | annotated_frame, detections, labels 64 | ) 65 | return annotated_frame 66 | 67 | 68 | if __name__ == "__main__": 69 | processor = VideoProcessor( 70 | source_video_path="cctv_trafik.mp4", 71 | ) 72 | processor.process_video() 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /Yolov8-ile-kitap-tanima/splitdata.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import shutil 4 | from itertools import islice 5 | 6 | outputFolderPath = "Dataset/SplitData" 7 | inputFolderPath = "Dataset/all" 8 | splitRatio = {"train": 0.7, "val": 0.2, "test": 0.1} 9 | classes = ["Canakkale","Tesla"] 10 | 11 | try: 12 | shutil.rmtree(outputFolderPath) 13 | except OSError as e: 14 | os.mkdir(outputFolderPath) 15 | 16 | # -------- Directories to Create ----------- 17 | os.makedirs(f"{outputFolderPath}/train/images", exist_ok=True) 18 | os.makedirs(f"{outputFolderPath}/train/labels", exist_ok=True) 19 | os.makedirs(f"{outputFolderPath}/val/images", exist_ok=True) 20 | os.makedirs(f"{outputFolderPath}/val/labels", exist_ok=True) 21 | os.makedirs(f"{outputFolderPath}/test/images", exist_ok=True) 22 | os.makedirs(f"{outputFolderPath}/test/labels", exist_ok=True) 23 | 24 | # -------- Get the Names ----------- 25 | listNames = os.listdir(inputFolderPath) 26 | 27 | uniqueNames = [] 28 | for name in listNames: 29 | uniqueNames.append(name.split('.')[0]) 30 | uniqueNames = list(set(uniqueNames)) 31 | 32 | # -------- Shuffle ----------- 33 | random.shuffle(uniqueNames) 34 | 35 | # -------- Find the number of images for each folder ----------- 36 | lenData = len(uniqueNames) 37 | lenTrain = int(lenData * splitRatio['train']) 38 | lenVal = int(lenData * splitRatio['val']) 39 | lenTest = int(lenData * splitRatio['test']) 40 | 41 | # -------- Put remaining images in Training ----------- 42 | if lenData != lenTrain + lenTest + lenVal: 43 | remaining = lenData - (lenTrain + lenTest + lenVal) 44 | lenTrain += remaining 45 | 46 | # -------- Split the list ----------- 47 | lengthToSplit = [lenTrain, lenVal, lenTest] 48 | Input = iter(uniqueNames) 49 | Output = [list(islice(Input, elem)) for elem in lengthToSplit] 50 | print(f'Total Images:{lenData} \nSplit: {len(Output[0])} {len(Output[1])} {len(Output[2])}') 51 | 52 | # -------- Copy the files ----------- 53 | 54 | sequence = ['train', 'val', 'test'] 55 | for i,out in enumerate(Output): 56 | for fileName in out: 57 | shutil.copy(f'{inputFolderPath}/{fileName}.jpg', f'{outputFolderPath}/{sequence[i]}/images/{fileName}.jpg') 58 | shutil.copy(f'{inputFolderPath}/{fileName}.txt', f'{outputFolderPath}/{sequence[i]}/labels/{fileName}.txt') 59 | 60 | print("Split Process Completed...") 61 | 62 | # -------- Creating Data.yaml file ----------- 63 | 64 | dataYaml = f'path: ../Data\n\ 65 | train: ../train/images\n\ 66 | val: ../val/images\n\ 67 | test: ../test/images\n\ 68 | \n\ 69 | nc: {len(classes)}\n\ 70 | names: {classes}' 71 | 72 | 73 | f = open(f"{outputFolderPath}/data.yaml", 'a') 74 | f.write(dataYaml) 75 | f.close() 76 | 77 | print("Data.yaml file Created...") 78 | -------------------------------------------------------------------------------- /yolov8_opencv_kod.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | 7 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 8 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 9 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 10 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 11 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 12 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 13 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 14 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 15 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 16 | "teddy bear", "hair drier", "toothbrush" 17 | ] 18 | 19 | prev_frame_time = 0 20 | new_frame_time = 0 21 | 22 | # cap = cv2.VideoCapture(1) # For Webcam 23 | # cap.set(3, 1280) 24 | # cap.set(4, 720) 25 | 26 | cap = cv2.VideoCapture("video_gece_Trim.mp4") 27 | 28 | # video kayıt için fourcc ve VideoWriter tanımlama 29 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 30 | success, img = cap.read() 31 | size = list(img.shape) 32 | del size[2] 33 | size.reverse() 34 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 35 | 36 | model = YOLO("yolov8n.pt") 37 | 38 | while True: 39 | new_frame_time = time.time() 40 | success, img = cap.read() 41 | results = model(img, stream=True) 42 | for r in results: 43 | boxes = r.boxes 44 | for box in boxes: 45 | # Bounding Box 46 | x1, y1, x2, y2 = box.xyxy[0] 47 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 48 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 49 | w, h = x2 - x1, y2 - y1 50 | cvzone.cornerRect(img, (x1, y1, w, h)) 51 | # Confidence 52 | conf = math.ceil((box.conf[0] * 100)) / 100 53 | # Class Name 54 | cls = int(box.cls[0]) 55 | 56 | cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 57 | 58 | # video kayıt 59 | video.write(img) 60 | 61 | fps = 1 / (new_frame_time - prev_frame_time) 62 | prev_frame_time = new_frame_time 63 | print("fps: ", fps) 64 | 65 | cv2.imshow("Image", img) 66 | cv2.waitKey(1) 67 | 68 | video.release() -------------------------------------------------------------------------------- /yolov8-byteTrack-supervision/yolov8_bytetrack-4.py: -------------------------------------------------------------------------------- 1 | # trace-annotator aktifleştirildi 2 | 3 | from ultralytics import YOLO 4 | import numpy as np 5 | import cv2 6 | import supervision as sv 7 | from tqdm import tqdm 8 | 9 | COLORS = sv.ColorPalette.default() 10 | 11 | class VideoProcessor: 12 | def __init__( 13 | self, 14 | source_video_path: str, 15 | ) -> None: 16 | self.conf_threshold = 0.5 17 | self.iou_threshold = 0.5 18 | self.source_video_path = "cctv_trafik.mp4" 19 | 20 | self.model = YOLO('yolov8n.pt') 21 | self.tracker = sv.ByteTrack() 22 | 23 | self.video_info = sv.VideoInfo.from_video_path(source_video_path) 24 | 25 | self.bounding_box_annotator = sv.BoundingBoxAnnotator(color=COLORS) 26 | self.label_annotator = sv.LabelAnnotator( 27 | color=COLORS, text_color=sv.Color.blue() 28 | ) 29 | 30 | self.trace_annotator = sv.TraceAnnotator( 31 | color=COLORS, position=sv.Position.CENTER, trace_length=100, thickness=2 32 | ) 33 | 34 | def process_video(self): 35 | frame_generator = sv.get_video_frames_generator( 36 | source_path=self.source_video_path 37 | ) 38 | 39 | for frame in tqdm(frame_generator, total=self.video_info.total_frames): 40 | annotated_frame = self.process_frame(frame) 41 | cv2.imshow("Processed Video", annotated_frame) 42 | if cv2.waitKey(1) & 0xFF == ord("q"): 43 | break 44 | cv2.destroyAllWindows() 45 | 46 | def process_frame(self, frame: np.ndarray) -> np.ndarray: 47 | results = self.model( 48 | frame, verbose=False, conf=self.conf_threshold, iou=self.iou_threshold 49 | )[0] 50 | detections = sv.Detections.from_ultralytics(results) 51 | detections.class_id = np.zeros(len(detections)) 52 | detections = self.tracker.update_with_detections(detections) 53 | 54 | return self.annotate_frame(frame, detections) 55 | 56 | def annotate_frame( 57 | self, frame: np.ndarray, detections: sv.Detections 58 | ) -> np.ndarray: 59 | annotated_frame = frame.copy() 60 | 61 | labels = [f"#{tracker_id}" for tracker_id in detections.tracker_id] 62 | 63 | annotated_frame = self.trace_annotator.annotate(annotated_frame, detections) 64 | 65 | annotated_frame = self.bounding_box_annotator.annotate( 66 | annotated_frame, detections 67 | ) 68 | annotated_frame = self.label_annotator.annotate( 69 | annotated_frame, detections, labels 70 | ) 71 | return annotated_frame 72 | 73 | 74 | if __name__ == "__main__": 75 | processor = VideoProcessor( 76 | source_video_path="cctv_trafik.mp4", 77 | ) 78 | processor.process_video() 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /yolov8_nesne_bulaniklastirma.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | 7 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 8 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 9 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 10 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 11 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 12 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 13 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 14 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 15 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 16 | "teddy bear", "hair drier", "toothbrush" 17 | ] 18 | 19 | prev_frame_time = 0 20 | new_frame_time = 0 21 | 22 | # cap = cv2.VideoCapture(1) # For Webcam 23 | # cap.set(3, 1280) 24 | # cap.set(4, 720) 25 | 26 | cap = cv2.VideoCapture(0) 27 | 28 | # video kayıt için fourcc ve VideoWriter tanımlama 29 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 30 | success, img = cap.read() 31 | print(img.shape) 32 | cv2.imwrite("ornek_resim.jpg", img) 33 | size = list(img.shape) 34 | del size[2] 35 | size.reverse() 36 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 37 | 38 | model = YOLO("yolov8n.pt") 39 | 40 | while True: 41 | new_frame_time = time.time() 42 | success, img = cap.read() 43 | results = model(img, stream=True) 44 | for r in results: 45 | boxes = r.boxes 46 | for box in boxes: 47 | # Bounding Box 48 | x1, y1, x2, y2 = box.xyxy[0] 49 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 50 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 51 | w, h = x2 - x1, y2 - y1 52 | cvzone.cornerRect(img, (x1, y1, w, h)) 53 | 54 | # bulanıklastirma 55 | imgCrop = img[y1:y1 + h, x1:x1 + w] 56 | imgBlur = cv2.blur(imgCrop, (35, 35)) 57 | img[y1:y1 + h, x1:x1 + w] = imgBlur 58 | 59 | # Confidence 60 | conf = math.ceil((box.conf[0] * 100)) / 100 61 | # Class Name 62 | cls = int(box.cls[0]) 63 | 64 | cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 65 | 66 | # video kayıt 67 | video.write(img) 68 | 69 | fps = 1 / (new_frame_time - prev_frame_time) 70 | prev_frame_time = new_frame_time 71 | print("fps: ", fps) 72 | 73 | cv2.imshow("Image", img) 74 | cv2.waitKey(1) 75 | 76 | video.release() -------------------------------------------------------------------------------- /Yolov8_ile_IHA_algilama_ve_izleme_ornegi/yolov8_hareketli_ucak_izleme_1.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | 7 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 8 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 9 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 10 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 11 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 12 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 13 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 14 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 15 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 16 | "teddy bear", "hair drier", "toothbrush" 17 | ] 18 | 19 | prev_frame_time = 0 20 | new_frame_time = 0 21 | 22 | # cap = cv2.VideoCapture(1) # For Webcam 23 | # cap.set(3, 640) 24 | # cap.set(4, 480) 25 | 26 | 27 | cap = cv2.VideoCapture("hareketli_ucak3.mp4") 28 | 29 | 30 | # video kayıt için fourcc ve VideoWriter tanımlama 31 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 32 | success, img = cap.read() 33 | print(img.shape) 34 | yukseklik = img.shape[0] 35 | genislik = img.shape[1] 36 | 37 | cv2.imwrite("ornek_resim.jpg", img) 38 | size = list(img.shape) 39 | del size[2] 40 | size.reverse() 41 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 42 | 43 | model = YOLO("yolov8n.pt") 44 | 45 | while True: 46 | new_frame_time = time.time() 47 | success, img = cap.read() 48 | 49 | img = cv2.resize(img, (1280,720), interpolation=cv2.INTER_AREA) 50 | 51 | results = model(img, stream=True) 52 | for r in results: 53 | boxes = r.boxes 54 | for box in boxes: 55 | # Bounding Box 56 | x1, y1, x2, y2 = box.xyxy[0] 57 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 58 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 59 | w, h = x2 - x1, y2 - y1 60 | cvzone.cornerRect(img, (x1, y1, w, h)) 61 | # Confidence 62 | conf = math.ceil((box.conf[0] * 100)) / 100 63 | # Class Name 64 | cls = int(box.cls[0]) 65 | 66 | cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 67 | 68 | cx, cy = x1 + w // 2, y1 + h // 2 69 | cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED) 70 | 71 | cx2, cy2 = 1280 // 2, 720 // 2 72 | cv2.circle(img, (cx2, cy2), 5, (255, 0, 255), cv2.FILLED) 73 | 74 | cv2.rectangle(img, (120, 120), (1280-120,720-120), (255, 0, 0), 2) 75 | 76 | cv2.line(img, (cx2,cy2), (cx,cy), (255, 0, 0), 1) 77 | 78 | # video kayıt 79 | video.write(img) 80 | 81 | fps = 1 / (new_frame_time - prev_frame_time) 82 | prev_frame_time = new_frame_time 83 | print("fps: ", fps) 84 | 85 | cv2.imshow("Image", img) 86 | cv2.waitKey(1) 87 | 88 | video.release() -------------------------------------------------------------------------------- /Yolov8_ile_IHA_algilama_ve_izleme_ornegi/yolov8_hareketli_ucak_izleme_2.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | 7 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 8 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 9 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 10 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 11 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 12 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 13 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 14 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 15 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 16 | "teddy bear", "hair drier", "toothbrush" 17 | ] 18 | 19 | prev_frame_time = 0 20 | new_frame_time = 0 21 | 22 | cap = cv2.VideoCapture(0) # For Webcam 23 | cap.set(3, 640) 24 | cap.set(4, 480) 25 | 26 | 27 | # cap = cv2.VideoCapture("hareketli_ucak2.mp4") 28 | 29 | 30 | # video kayıt için fourcc ve VideoWriter tanımlama 31 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 32 | success, img = cap.read() 33 | print(img.shape) 34 | yukseklik = img.shape[0] 35 | genislik = img.shape[1] 36 | 37 | cv2.imwrite("ornek_resim.jpg", img) 38 | size = list(img.shape) 39 | del size[2] 40 | size.reverse() 41 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 42 | 43 | model = YOLO("yolov8n.pt") 44 | 45 | while True: 46 | new_frame_time = time.time() 47 | success, img = cap.read() 48 | 49 | img = cv2.resize(img, (1280,720), interpolation=cv2.INTER_AREA) 50 | 51 | results = model(img, stream=True) 52 | for r in results: 53 | boxes = r.boxes 54 | for box in boxes: 55 | # Bounding Box 56 | x1, y1, x2, y2 = box.xyxy[0] 57 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 58 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 59 | w, h = x2 - x1, y2 - y1 60 | 61 | # Confidence 62 | conf = math.ceil((box.conf[0] * 100)) / 100 63 | # Class Name 64 | cls = int(box.cls[0]) 65 | 66 | currentClass = classNames[cls] 67 | 68 | if currentClass == "aeroplane" or currentClass == "bird" or currentClass == "kite" and conf > 0.3: 69 | cvzone.cornerRect(img, (x1, y1, w, h)) 70 | cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 71 | 72 | cx, cy = x1 + w // 2, y1 + h // 2 73 | cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED) 74 | 75 | cx2, cy2 = 1280 // 2, 720 // 2 76 | cv2.circle(img, (cx2, cy2), 5, (255, 0, 255), cv2.FILLED) 77 | 78 | cv2.rectangle(img, (120, 120), (1280-120,720-120), (255, 0, 0), 2) 79 | 80 | cv2.line(img, (cx2,cy2), (cx,cy), (255, 0, 0), 1) 81 | 82 | # video kayıt 83 | video.write(img) 84 | 85 | fps = 1 / (new_frame_time - prev_frame_time) 86 | prev_frame_time = new_frame_time 87 | print("fps: ", fps) 88 | 89 | cv2.imshow("Image", img) 90 | cv2.waitKey(1) 91 | 92 | video.release() -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit6.py: -------------------------------------------------------------------------------- 1 | ## perspektif görünümü opencv getPerspectiveTransform ile doğrultulmuş düz görünümlü matrise dönüştürme 2 | ## bu dönüştürülmüş matrisi kullanarak araçların yaklaşık x,y koordinatlarını hesaplama 3 | 4 | ## view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 5 | # points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 6 | # points = view_transformer.transform_points(points=points).astype(int) 7 | 8 | # labels = [f"x: {x}, y: {y}" 9 | # for [x,y] in points] 10 | 11 | 12 | import supervision as sv 13 | import cv2 14 | from ultralytics import YOLO 15 | import numpy as np 16 | 17 | SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 18 | [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 19 | 20 | TARGET_WIDTH = 25 21 | TARGET_HEIGHT = 250 22 | 23 | TARGET = np.array( 24 | [ 25 | [0, 0], 26 | [TARGET_WIDTH - 1, 0], 27 | [TARGET_WIDTH - 1, TARGET_HEIGHT - 1], 28 | [0, TARGET_HEIGHT - 1], 29 | ] 30 | ) 31 | 32 | class ViewTransformer: 33 | def __init__(self, source: np.ndarray, target: np.ndarray) -> None: 34 | source = source.astype(np.float32) 35 | target = target.astype(np.float32) 36 | self.m = cv2.getPerspectiveTransform(source, target) 37 | 38 | def transform_points(self, points: np.ndarray) -> np.ndarray: 39 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 40 | transformed_points = cv2.perspectiveTransform(reshaped_points, self.m) 41 | return transformed_points.reshape(-1, 2) 42 | 43 | 44 | if __name__ == "__main__": 45 | 46 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 47 | model = YOLO("yolov8n.pt") 48 | 49 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 50 | 51 | thickness = 3 52 | text_scale = 1 53 | 54 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 55 | label_annotator = sv.LabelAnnotator( 56 | text_scale=text_scale, 57 | text_thickness=thickness, 58 | text_position=sv.Position.BOTTOM_CENTER) 59 | 60 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 61 | 62 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 63 | 64 | view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 65 | 66 | for frame in frame_generator: 67 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 68 | result = model(half_frame)[0] 69 | detections = sv.Detections.from_ultralytics(result) 70 | detections = detections[polygon_zone.trigger(detections)] 71 | detections = byte_track.update_with_detections(detections=detections) 72 | 73 | points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 74 | points = view_transformer.transform_points(points=points).astype(int) 75 | 76 | labels = [ 77 | f"x: {x}, y: {y}" 78 | for [x,y] in points] 79 | 80 | # for tracker_id in np.array(detections.tracker_id): 81 | # labels.append(f"#{tracker_id}") 82 | 83 | annotated_frame = half_frame.copy() 84 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 85 | annotated_frame = bounding_box_annotator.annotate( 86 | scene=annotated_frame, detections=detections) 87 | 88 | annotated_frame = label_annotator.annotate( 89 | scene=annotated_frame, detections=detections, labels=labels) 90 | 91 | cv2.imshow("frame", annotated_frame) 92 | if cv2.waitKey(1) & 0xFF == ord("q"): 93 | break 94 | 95 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8_nesne_izleme_4.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | from sort import * 7 | 8 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 9 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 10 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 11 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 12 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 13 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 14 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 15 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 16 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 17 | "teddy bear", "hair drier", "toothbrush" 18 | ] 19 | 20 | mask = cv2.imread("maske.jpg") 21 | 22 | # Tracking 23 | tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3) 24 | 25 | prev_frame_time = 0 26 | new_frame_time = 0 27 | 28 | # cap = cv2.VideoCapture(1) # For Webcam 29 | # cap.set(3, 1280) 30 | # cap.set(4, 720) 31 | 32 | cap = cv2.VideoCapture("arabalar.mp4") 33 | 34 | # video kayıt için fourcc ve VideoWriter tanımlama 35 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 36 | success, img = cap.read() 37 | # print(img.shape) 38 | # cv2.imwrite("ornek_resim.jpg", img) 39 | size = list(img.shape) 40 | del size[2] 41 | size.reverse() 42 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 43 | 44 | model = YOLO("yolov8n.pt") 45 | 46 | while True: 47 | new_frame_time = time.time() 48 | success, img = cap.read() 49 | 50 | imgRegion = cv2.bitwise_and(img, mask) 51 | 52 | results = model(imgRegion, stream=True) 53 | 54 | detections = np.empty((0, 5)) 55 | 56 | for r in results: 57 | boxes = r.boxes 58 | for box in boxes: 59 | # Bounding Box 60 | x1, y1, x2, y2 = box.xyxy[0] 61 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 62 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 63 | w, h = x2 - x1, y2 - y1 64 | cvzone.cornerRect(img, (x1, y1, w, h)) 65 | # Confidence 66 | conf = math.ceil((box.conf[0] * 100)) / 100 67 | # Class Name 68 | cls = int(box.cls[0]) 69 | 70 | # cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 71 | 72 | currentArray = np.array([x1, y1, x2, y2, conf]) 73 | detections = np.vstack((detections, currentArray)) 74 | 75 | resultsTracker = tracker.update(detections) 76 | 77 | for result in resultsTracker: 78 | x1, y1, x2, y2, id = result 79 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 80 | print(result) 81 | w, h = x2 - x1, y2 - y1 82 | cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=2, colorR=(255, 0, 255)) 83 | cvzone.putTextRect(img, f' {int(id)}', (max(0, x1), max(35, y1)), 84 | scale=2, thickness=3, offset=10) 85 | 86 | # video kayıt 87 | video.write(img) 88 | 89 | fps = 1 / (new_frame_time - prev_frame_time) 90 | prev_frame_time = new_frame_time 91 | print("fps: ", fps) 92 | 93 | 94 | 95 | cv2.imshow("Image", img) 96 | # cv2.imshow("ImageRegion", imgRegion) 97 | cv2.waitKey(1) 98 | 99 | video.release() -------------------------------------------------------------------------------- /yolov8_nesne_sayma_4.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | import time 6 | from sort import * 7 | 8 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 9 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 10 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 11 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 12 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 13 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 14 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 15 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 16 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 17 | "teddy bear", "hair drier", "toothbrush" 18 | ] 19 | 20 | mask = cv2.imread("maske2.jpg") 21 | 22 | # Tracking 23 | tracker = Sort(max_age=20, min_hits=3, iou_threshold=0.3) 24 | 25 | prev_frame_time = 0 26 | new_frame_time = 0 27 | 28 | # cap = cv2.VideoCapture(1) # For Webcam 29 | # cap.set(3, 1280) 30 | # cap.set(4, 720) 31 | 32 | cap = cv2.VideoCapture("cctv_trafik.mp4") 33 | 34 | # video kayıt için fourcc ve VideoWriter tanımlama 35 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 36 | success, img = cap.read() 37 | # print(img.shape) 38 | # cv2.imwrite("ornek_resim.jpg", img) 39 | size = list(img.shape) 40 | del size[2] 41 | size.reverse() 42 | video = cv2.VideoWriter("kaydedilen_video.mp4", cv2_fourcc, 24, size) #output video name, fourcc, fps, size 43 | 44 | model = YOLO("yolov8n.pt") 45 | 46 | limits = [200, 397, 473, 397] 47 | totalCount = [] 48 | 49 | while True: 50 | new_frame_time = time.time() 51 | success, img = cap.read() 52 | 53 | imgRegion = cv2.bitwise_and(img, mask) 54 | 55 | imgGraphics = cv2.imread("graphics3.png", cv2.IMREAD_UNCHANGED) 56 | img = cvzone.overlayPNG(img, imgGraphics, (0, 0)) 57 | 58 | results = model(imgRegion, stream=True) 59 | 60 | detections = np.empty((0, 5)) 61 | 62 | for r in results: 63 | boxes = r.boxes 64 | for box in boxes: 65 | # Bounding Box 66 | x1, y1, x2, y2 = box.xyxy[0] 67 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 68 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 69 | w, h = x2 - x1, y2 - y1 70 | # cvzone.cornerRect(img, (x1, y1, w, h)) 71 | # Confidence 72 | conf = math.ceil((box.conf[0] * 100)) / 100 73 | # Class Name 74 | cls = int(box.cls[0]) 75 | 76 | # cvzone.putTextRect(img, f'{classNames[cls]} {conf}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 77 | 78 | currentArray = np.array([x1, y1, x2, y2, conf]) 79 | detections = np.vstack((detections, currentArray)) 80 | 81 | resultsTracker = tracker.update(detections) 82 | 83 | cv2.line(img, (limits[0], limits[1]), (limits[2], limits[3]), (0, 0, 255), 5) 84 | 85 | for result in resultsTracker: 86 | x1, y1, x2, y2, id = result 87 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 88 | print(result) 89 | w, h = x2 - x1, y2 - y1 90 | cvzone.cornerRect(img, (x1, y1, w, h), l=9, rt=2, colorR=(255, 0, 255)) 91 | cvzone.putTextRect(img, f' {int(id)}', (max(0, x1), max(35, y1)), 92 | scale=2, thickness=3, offset=10) 93 | 94 | cx, cy = x1 + w // 2, y1 + h // 2 95 | cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED) 96 | 97 | if limits[0] < cx < limits[2] and limits[1] - 15 < cy < limits[1] + 15: 98 | if totalCount.count(id) == 0: 99 | totalCount.append(id) 100 | cv2.line(img, (limits[0], limits[1]), (limits[2], limits[3]), (0, 255, 0), 5) 101 | 102 | cv2.putText(img, str(len(totalCount)), (255, 100), cv2.FONT_HERSHEY_PLAIN, 5, (50, 50, 255), 8) 103 | 104 | # video kayıt 105 | video.write(img) 106 | 107 | fps = 1 / (new_frame_time - prev_frame_time) 108 | prev_frame_time = new_frame_time 109 | print("fps: ", fps) 110 | 111 | 112 | 113 | cv2.imshow("Image", img) 114 | # cv2.imshow("ImageRegion", imgRegion) 115 | cv2.waitKey(1) 116 | 117 | video.release() -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit10.py: -------------------------------------------------------------------------------- 1 | # farklı video görüntüsü ile hız tespiti 2 | # video_path="cctv_trafik.mp4" 3 | # yeni videodan resim_1.jpg üret (hiz_tespiti11.py) ve koordinatları belirle 4 | # SOURCE = np.array([[310, 190], [630, 190],[740, 720], [-70, 720]]) 5 | # TARGET_WIDTH = 20 6 | # TARGET_HEIGHT = 30 7 | 8 | import supervision as sv 9 | import cv2 10 | from ultralytics import YOLO 11 | import numpy as np 12 | from collections import defaultdict, deque 13 | 14 | SOURCE = np.array([[310, 190], [630, 190],[740, 720], [-70, 720]]) 15 | 16 | TARGET_WIDTH = 20 17 | TARGET_HEIGHT = 30 18 | 19 | TARGET = np.array( 20 | [ 21 | [0, 0], 22 | [TARGET_WIDTH - 1, 0], 23 | [TARGET_WIDTH - 1, TARGET_HEIGHT - 1], 24 | [0, TARGET_HEIGHT - 1], 25 | ] 26 | ) 27 | 28 | class ViewTransformer: 29 | def __init__(self, source: np.ndarray, target: np.ndarray) -> None: 30 | source = source.astype(np.float32) 31 | target = target.astype(np.float32) 32 | self.m = cv2.getPerspectiveTransform(source, target) 33 | 34 | def transform_points(self, points: np.ndarray) -> np.ndarray: 35 | print("points :", points) 36 | if len(points) != 0: 37 | 38 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 39 | transformed_points = cv2.perspectiveTransform(reshaped_points, self.m) 40 | 41 | return transformed_points.reshape(-1, 2) 42 | 43 | else: 44 | hata_giderici_gecici_deger = np.array([[1, 2], [5, 8]], dtype='float') 45 | return hata_giderici_gecici_deger 46 | 47 | 48 | if __name__ == "__main__": 49 | 50 | video_info = sv.VideoInfo.from_video_path(video_path="cctv_trafik.mp4") 51 | model = YOLO("yolov8n.pt") 52 | 53 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 54 | 55 | thickness = 3 56 | text_scale = 1 57 | 58 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 59 | label_annotator = sv.LabelAnnotator( 60 | text_scale=text_scale, 61 | text_thickness=thickness, 62 | text_position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 63 | 64 | trace_annotator = sv.TraceAnnotator( 65 | thickness=thickness, 66 | trace_length=video_info.fps * 2, 67 | position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 68 | 69 | frame_generator = sv.get_video_frames_generator(source_path="cctv_trafik.mp4") 70 | 71 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 72 | 73 | view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 74 | 75 | coordinates = defaultdict(lambda: deque(maxlen=video_info.fps)) 76 | 77 | for frame in frame_generator: 78 | result = model(frame)[0] 79 | detections = sv.Detections.from_ultralytics(result) 80 | detections = detections[polygon_zone.trigger(detections)] 81 | detections = byte_track.update_with_detections(detections=detections) 82 | 83 | points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 84 | points = view_transformer.transform_points(points=points).astype(int) 85 | 86 | for tracker_id, [_, y] in zip(detections.tracker_id, points): 87 | coordinates[tracker_id].append(y) 88 | 89 | labels = [] 90 | for tracker_id in detections.tracker_id: 91 | if len(coordinates[tracker_id]) < video_info.fps / 2: 92 | labels.append(f"#{tracker_id}") 93 | else: 94 | coordinate_start = coordinates[tracker_id][-1] 95 | coordinate_end = coordinates[tracker_id][0] 96 | distance = abs(coordinate_start - coordinate_end) 97 | time = len(coordinates[tracker_id]) / video_info.fps 98 | speed = distance / time * 3.6 99 | labels.append(f"#{tracker_id} {int(speed)} km/h") 100 | 101 | annotated_frame = frame.copy() 102 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 103 | annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections) 104 | annotated_frame = bounding_box_annotator.annotate( 105 | scene=annotated_frame, detections=detections) 106 | 107 | annotated_frame = label_annotator.annotate( 108 | scene=annotated_frame, detections=detections, labels=labels) 109 | 110 | cv2.imshow("frame", annotated_frame) 111 | if cv2.waitKey(1) & 0xFF == ord("q"): 112 | break 113 | 114 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /Yolov8-ile-kitap-tanima/veri_seti_olusturma.py: -------------------------------------------------------------------------------- 1 | from ultralytics import YOLO 2 | import cv2 3 | import cvzone 4 | import math 5 | from time import time 6 | 7 | ############################# 8 | classID = 1 # 0 Canakkale, 1 Tesla 9 | save = True 10 | outputFolderPath = 'Dataset/DataCollect' 11 | blurThreshold = 35 # Larger is more focus 12 | floatingPoint = 6 13 | camWidth, camHeight = 640, 480 14 | debug = True 15 | 16 | ############################# 17 | 18 | classNames = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", "boat", 19 | "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", 20 | "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", 21 | "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", 22 | "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", 23 | "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", 24 | "carrot", "hot dog", "pizza", "donut", "cake", "chair", "sofa", "pottedplant", "bed", 25 | "diningtable", "toilet", "tvmonitor", "laptop", "mouse", "remote", "keyboard", "cell phone", 26 | "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", 27 | "teddy bear", "hair drier", "toothbrush" 28 | ] 29 | 30 | cap = cv2.VideoCapture(0) # For Webcam 31 | cap.set(3, camWidth) 32 | cap.set(4, camHeight) 33 | 34 | model = YOLO("yolov8n.pt") 35 | 36 | while True: 37 | success, img = cap.read() 38 | imgOut = img.copy() 39 | results = model(img, stream=True) 40 | 41 | listBlur = [] # True False values indicating if the faces are blur or not 42 | listInfo = [] # The normalized values and the class name for the label txt file 43 | 44 | for r in results: 45 | boxes = r.boxes 46 | for box in boxes: 47 | if int(box.cls[0]) == 73: 48 | # print(int(box.cls[0])) 49 | # Bounding Box 50 | x1, y1, x2, y2 = box.xyxy[0] 51 | x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) 52 | # cv2.rectangle(img,(x1,y1),(x2,y2),(255,0,255),3) 53 | w, h = x2 - x1, y2 - y1 54 | 55 | cvzone.cornerRect(img, (x1, y1, w, h)) 56 | # Confidence 57 | conf = math.ceil((box.conf[0] * 100)) / 100 58 | 59 | # ------ Find Blurriness -------- 60 | imgFace = img[y1:y1 + h, x1:x1 + w] 61 | # cv2.imshow("Face", imgFace) 62 | blurValue = int(cv2.Laplacian(imgFace, cv2.CV_64F).var()) 63 | if blurValue > blurThreshold: 64 | listBlur.append(True) 65 | else: 66 | listBlur.append(False) 67 | 68 | # ------ Normalize Values -------- 69 | ih, iw, _ = img.shape 70 | xc, yc = x1 + w / 2, y1 + h / 2 71 | 72 | xcn, ycn = round(xc / iw, floatingPoint), round(yc / ih, floatingPoint) 73 | wn, hn = round(w / iw, floatingPoint), round(h / ih, floatingPoint) 74 | # print(xcn, ycn, wn, hn) 75 | 76 | # ------ To avoid values above 1 -------- 77 | if xcn > 1: xcn = 1 78 | if ycn > 1: ycn = 1 79 | if wn > 1: wn = 1 80 | if hn > 1: hn = 1 81 | 82 | listInfo.append(f"{classID} {xcn} {ycn} {wn} {hn}\n") 83 | 84 | # Class Name 85 | cls = int(box.cls[0]) 86 | 87 | cvzone.putTextRect(img, f'{classNames[cls]} {conf} {blurValue}', (max(0, x1), max(35, y1)), scale=1, thickness=1) 88 | 89 | if debug: 90 | cv2.rectangle(img, (x1, y1, w, h), (255, 0, 0), 3) 91 | cvzone.putTextRect(img, f'% Blur: {blurValue}', (x1, y1 - 0), 92 | scale=2, thickness=3) 93 | 94 | # ------ To Save -------- 95 | if save: 96 | # ------ Save Image -------- 97 | timeNow = time() 98 | timeNow = str(timeNow).split('.') 99 | timeNow = timeNow[0] + timeNow[1] 100 | cv2.imwrite(f"{outputFolderPath}/{timeNow}.jpg", imgOut) 101 | # ------ Save Label Text File -------- 102 | for info in listInfo: 103 | f = open(f"{outputFolderPath}/{timeNow}.txt", 'a') 104 | f.write(info) 105 | f.close() 106 | 107 | cv2.imshow("Image", img) 108 | cv2.waitKey(1) 109 | -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit9.py: -------------------------------------------------------------------------------- 1 | # videoyu çıktı olarak kaydetme 2 | 3 | 4 | import supervision as sv 5 | import cv2 6 | from ultralytics import YOLO 7 | import numpy as np 8 | from collections import defaultdict, deque 9 | 10 | SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 11 | [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 12 | 13 | TARGET_WIDTH = 25 14 | TARGET_HEIGHT = 250 15 | 16 | TARGET = np.array( 17 | [ 18 | [0, 0], 19 | [TARGET_WIDTH - 1, 0], 20 | [TARGET_WIDTH - 1, TARGET_HEIGHT - 1], 21 | [0, TARGET_HEIGHT - 1], 22 | ] 23 | ) 24 | 25 | class ViewTransformer: 26 | def __init__(self, source: np.ndarray, target: np.ndarray) -> None: 27 | source = source.astype(np.float32) 28 | target = target.astype(np.float32) 29 | self.m = cv2.getPerspectiveTransform(source, target) 30 | 31 | def transform_points(self, points: np.ndarray) -> np.ndarray: 32 | print("points :", points) 33 | if len(points) != 0: 34 | 35 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 36 | transformed_points = cv2.perspectiveTransform(reshaped_points, self.m) 37 | 38 | return transformed_points.reshape(-1, 2) 39 | 40 | else: 41 | hata_giderici_gecici_deger = np.array([[1, 2], [5, 8]], dtype='float') 42 | return hata_giderici_gecici_deger 43 | 44 | 45 | if __name__ == "__main__": 46 | 47 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 48 | model = YOLO("yolov8n.pt") 49 | 50 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 51 | 52 | thickness = 3 53 | text_scale = 1 54 | 55 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 56 | label_annotator = sv.LabelAnnotator( 57 | text_scale=text_scale, 58 | text_thickness=thickness, 59 | text_position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 60 | 61 | trace_annotator = sv.TraceAnnotator( 62 | thickness=thickness, 63 | trace_length=video_info.fps * 2, 64 | position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 65 | 66 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 67 | 68 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 69 | 70 | view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 71 | 72 | coordinates = defaultdict(lambda: deque(maxlen=video_info.fps)) 73 | 74 | # video kayıt için fourcc ve VideoWriter tanımlama 75 | cv2_fourcc = cv2.VideoWriter_fourcc(*'mp4v') 76 | size = list([648, 1152, 3]) 77 | del size[2] 78 | size.reverse() 79 | video = cv2.VideoWriter("cikti_videosu.mp4", cv2_fourcc, 24, size) # output video name, fourcc, fps, size 80 | 81 | for frame in frame_generator: 82 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 83 | result = model(half_frame)[0] 84 | detections = sv.Detections.from_ultralytics(result) 85 | detections = detections[polygon_zone.trigger(detections)] 86 | detections = byte_track.update_with_detections(detections=detections) 87 | 88 | points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 89 | points = view_transformer.transform_points(points=points).astype(int) 90 | 91 | for tracker_id, [_, y] in zip(detections.tracker_id, points): 92 | coordinates[tracker_id].append(y) 93 | 94 | labels = [] 95 | for tracker_id in detections.tracker_id: 96 | if len(coordinates[tracker_id]) < video_info.fps / 2: 97 | labels.append(f"#{tracker_id}") 98 | else: 99 | coordinate_start = coordinates[tracker_id][-1] 100 | coordinate_end = coordinates[tracker_id][0] 101 | distance = abs(coordinate_start - coordinate_end) 102 | time = len(coordinates[tracker_id]) / video_info.fps 103 | speed = distance / time * 3.6 104 | labels.append(f"#{tracker_id} {int(speed)} km/h") 105 | 106 | annotated_frame = half_frame.copy() 107 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 108 | annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections) 109 | annotated_frame = bounding_box_annotator.annotate( 110 | scene=annotated_frame, detections=detections) 111 | 112 | annotated_frame = label_annotator.annotate( 113 | scene=annotated_frame, detections=detections, labels=labels) 114 | 115 | # video kayıt 116 | video.write(annotated_frame) 117 | 118 | cv2.imshow("frame", annotated_frame) 119 | if cv2.waitKey(1) & 0xFF == ord("q"): 120 | break 121 | 122 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit8.py: -------------------------------------------------------------------------------- 1 | # araçların arkasından iz çizgisi çizdirme (trace_annotator ile) 2 | # boş points değerleri için hata giderme 3 | 4 | # trace_annotator = sv.TraceAnnotator( 5 | # thickness=thickness, 6 | # trace_length=video_info.fps * 2, 7 | # position=sv.Position.BOTTOM_CENTER) 8 | 9 | # annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections) 10 | 11 | # hata giderildi: if len(points) != 0: 12 | 13 | import supervision as sv 14 | import cv2 15 | from ultralytics import YOLO 16 | import numpy as np 17 | from collections import defaultdict, deque 18 | 19 | SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 20 | [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 21 | 22 | TARGET_WIDTH = 25 23 | TARGET_HEIGHT = 250 24 | 25 | TARGET = np.array( 26 | [ 27 | [0, 0], 28 | [TARGET_WIDTH - 1, 0], 29 | [TARGET_WIDTH - 1, TARGET_HEIGHT - 1], 30 | [0, TARGET_HEIGHT - 1], 31 | ] 32 | ) 33 | 34 | class ViewTransformer: 35 | def __init__(self, source: np.ndarray, target: np.ndarray) -> None: 36 | source = source.astype(np.float32) 37 | target = target.astype(np.float32) 38 | self.m = cv2.getPerspectiveTransform(source, target) 39 | 40 | def transform_points(self, points: np.ndarray) -> np.ndarray: 41 | print("points :", points) 42 | if len(points) != 0: 43 | 44 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 45 | transformed_points = cv2.perspectiveTransform(reshaped_points, self.m) 46 | 47 | return transformed_points.reshape(-1, 2) 48 | 49 | else: 50 | hata_giderici_gecici_deger = np.array([[1, 2], [5, 8]], dtype='float') 51 | return hata_giderici_gecici_deger 52 | 53 | 54 | if __name__ == "__main__": 55 | 56 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 57 | model = YOLO("yolov8n.pt") 58 | 59 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 60 | 61 | thickness = 3 62 | text_scale = 1 63 | 64 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 65 | label_annotator = sv.LabelAnnotator( 66 | text_scale=text_scale, 67 | text_thickness=thickness, 68 | text_position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 69 | 70 | trace_annotator = sv.TraceAnnotator( 71 | thickness=thickness, 72 | trace_length=video_info.fps * 2, 73 | position=sv.Position.BOTTOM_CENTER, color_lookup=sv.ColorLookup.TRACK) 74 | 75 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 76 | 77 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 78 | 79 | view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 80 | 81 | coordinates = defaultdict(lambda: deque(maxlen=video_info.fps)) 82 | 83 | for frame in frame_generator: 84 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 85 | result = model(half_frame)[0] 86 | detections = sv.Detections.from_ultralytics(result) 87 | detections = detections[polygon_zone.trigger(detections)] 88 | detections = byte_track.update_with_detections(detections=detections) 89 | 90 | points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 91 | points = view_transformer.transform_points(points=points).astype(int) 92 | 93 | for tracker_id, [_, y] in zip(detections.tracker_id, points): 94 | coordinates[tracker_id].append(y) 95 | 96 | labels = [] 97 | for tracker_id in detections.tracker_id: 98 | if len(coordinates[tracker_id]) < video_info.fps / 2: 99 | labels.append(f"#{tracker_id}") 100 | else: 101 | coordinate_start = coordinates[tracker_id][-1] 102 | coordinate_end = coordinates[tracker_id][0] 103 | distance = abs(coordinate_start - coordinate_end) 104 | time = len(coordinates[tracker_id]) / video_info.fps 105 | speed = distance / time * 3.6 106 | labels.append(f"#{tracker_id} {int(speed)} km/h") 107 | 108 | annotated_frame = half_frame.copy() 109 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 110 | annotated_frame = trace_annotator.annotate(scene=annotated_frame, detections=detections) 111 | annotated_frame = bounding_box_annotator.annotate( 112 | scene=annotated_frame, detections=detections) 113 | 114 | annotated_frame = label_annotator.annotate( 115 | scene=annotated_frame, detections=detections, labels=labels) 116 | 117 | cv2.imshow("frame", annotated_frame) 118 | if cv2.waitKey(1) & 0xFF == ord("q"): 119 | break 120 | 121 | cv2.destroyAllWindows() -------------------------------------------------------------------------------- /yolov8-opencv-hiz-tespiti/hiz_tespit7.py: -------------------------------------------------------------------------------- 1 | ## hesaplanan koordinatları bir sözlük şeklinde kaydetme 2 | ## çok kısa mesafe farklarından kaynaklı hataları gidermek için 3 | # sadece koordinatlar sayısı video fps oranının yarısından büyük olan görüntüleri takibe al 4 | 5 | # coordinates = defaultdict(lambda: deque(maxlen=video_info.fps)) 6 | # for tracker_id, [_, y] in zip(detections.tracker_id, points): 7 | # coordinates[tracker_id].append(y) 8 | 9 | # labels = [] 10 | # for tracker_id in detections.tracker_id: 11 | # if len(coordinates[tracker_id]) < video_info.fps / 2: 12 | # labels.append(f"#{tracker_id}") 13 | # else: 14 | # coordinate_start = coordinates[tracker_id][-1] 15 | # coordinate_end = coordinates[tracker_id][0] 16 | # distance = abs(coordinate_start - coordinate_end) 17 | # time = len(coordinates[tracker_id]) / video_info.fps 18 | # speed = distance / time * 3.6 19 | # labels.append(f"#{tracker_id} {int(speed)} km/h") 20 | 21 | import supervision as sv 22 | import cv2 23 | from ultralytics import YOLO 24 | import numpy as np 25 | from collections import defaultdict, deque 26 | 27 | SOURCE = np.array([[int(1252*0.3), int(787*0.3)], [int(2298*0.3), int(803*0.3)], 28 | [int(5039*0.3), int(2159*0.3)], [int(-550*0.3), int(2159*0.3)]]) 29 | 30 | TARGET_WIDTH = 25 31 | TARGET_HEIGHT = 250 32 | 33 | TARGET = np.array( 34 | [ 35 | [0, 0], 36 | [TARGET_WIDTH - 1, 0], 37 | [TARGET_WIDTH - 1, TARGET_HEIGHT - 1], 38 | [0, TARGET_HEIGHT - 1], 39 | ] 40 | ) 41 | 42 | class ViewTransformer: 43 | def __init__(self, source: np.ndarray, target: np.ndarray) -> None: 44 | source = source.astype(np.float32) 45 | target = target.astype(np.float32) 46 | self.m = cv2.getPerspectiveTransform(source, target) 47 | 48 | def transform_points(self, points: np.ndarray) -> np.ndarray: 49 | reshaped_points = points.reshape(-1, 1, 2).astype(np.float32) 50 | transformed_points = cv2.perspectiveTransform(reshaped_points, self.m) 51 | return transformed_points.reshape(-1, 2) 52 | 53 | 54 | if __name__ == "__main__": 55 | 56 | video_info = sv.VideoInfo.from_video_path(video_path="vehicles.mp4") 57 | model = YOLO("yolov8n.pt") 58 | 59 | byte_track = sv.ByteTrack(frame_rate=video_info.fps, track_thresh=0.5) 60 | 61 | thickness = 3 62 | text_scale = 1 63 | 64 | bounding_box_annotator = sv.BoundingBoxAnnotator(thickness=thickness) 65 | label_annotator = sv.LabelAnnotator( 66 | text_scale=text_scale, 67 | text_thickness=thickness, 68 | text_position=sv.Position.BOTTOM_CENTER) 69 | 70 | frame_generator = sv.get_video_frames_generator(source_path="vehicles.mp4") 71 | 72 | polygon_zone = sv.PolygonZone(polygon=SOURCE, frame_resolution_wh=video_info.resolution_wh) 73 | 74 | view_transformer = ViewTransformer(source=SOURCE, target=TARGET) 75 | 76 | coordinates = defaultdict(lambda: deque(maxlen=video_info.fps)) 77 | 78 | for frame in frame_generator: 79 | half_frame = cv2.resize(frame, (0, 0), fx=0.3, fy=0.3) 80 | result = model(half_frame)[0] 81 | detections = sv.Detections.from_ultralytics(result) 82 | detections = detections[polygon_zone.trigger(detections)] 83 | detections = byte_track.update_with_detections(detections=detections) 84 | 85 | points = detections.get_anchors_coordinates(anchor=sv.Position.BOTTOM_CENTER) 86 | points = view_transformer.transform_points(points=points).astype(int) 87 | 88 | for tracker_id, [_, y] in zip(detections.tracker_id, points): 89 | coordinates[tracker_id].append(y) 90 | 91 | labels = [] 92 | for tracker_id in detections.tracker_id: 93 | if len(coordinates[tracker_id]) < video_info.fps / 2: 94 | labels.append(f"#{tracker_id}") 95 | else: 96 | coordinate_start = coordinates[tracker_id][-1] 97 | coordinate_end = coordinates[tracker_id][0] 98 | distance = abs(coordinate_start - coordinate_end) 99 | time = len(coordinates[tracker_id]) / video_info.fps 100 | speed = distance / time * 3.6 101 | labels.append(f"#{tracker_id} {int(speed)} km/h") 102 | 103 | annotated_frame = half_frame.copy() 104 | annotated_frame = sv.draw_polygon(annotated_frame, polygon=SOURCE, color=sv.Color.red()) 105 | annotated_frame = bounding_box_annotator.annotate( 106 | scene=annotated_frame, detections=detections) 107 | 108 | annotated_frame = label_annotator.annotate( 109 | scene=annotated_frame, detections=detections, labels=labels) 110 | 111 | cv2.imshow("frame", annotated_frame) 112 | if cv2.waitKey(1) & 0xFF == ord("q"): 113 | break 114 | 115 | cv2.destroyAllWindows() --------------------------------------------------------------------------------