├── .nojekyll
├── docs
├── images
│ ├── demo2.jpg
│ ├── demo1.jpg
│ └── logo.png
├── style.css
└── index.html
├── Example.png
├── pi5-23.png
├── ONNX.py
├── Schematic_VisionPilot_2025-06-26.png
├── RKNN.py
├── image
├── resize.py
├── frames.py
└── VideoRecorder.py
├── .github
└── workflows
│ └── discord-notify.yml
├── README.md
├── CAMFPV.py
├── CAM_Driverless_Taxi.py
├── anchor_tracking.py
├── uartTOuartV2.py
└── uartTOuart.py
/.nojekyll:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/images/demo2.jpg:
--------------------------------------------------------------------------------
1 | FAKEIMAGE
--------------------------------------------------------------------------------
/Example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VohminV/VisionPilot/HEAD/Example.png
--------------------------------------------------------------------------------
/pi5-23.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VohminV/VisionPilot/HEAD/pi5-23.png
--------------------------------------------------------------------------------
/docs/images/demo1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VohminV/VisionPilot/HEAD/docs/images/demo1.jpg
--------------------------------------------------------------------------------
/docs/images/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VohminV/VisionPilot/HEAD/docs/images/logo.png
--------------------------------------------------------------------------------
/ONNX.py:
--------------------------------------------------------------------------------
1 | from ultralytics import YOLO
2 |
3 | model = YOLO("Y.pt")
4 | model.export(format="onnx", task="segment")
5 |
--------------------------------------------------------------------------------
/Schematic_VisionPilot_2025-06-26.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/VohminV/VisionPilot/HEAD/Schematic_VisionPilot_2025-06-26.png
--------------------------------------------------------------------------------
/RKNN.py:
--------------------------------------------------------------------------------
1 | from ultralytics import YOLO
2 |
3 | # Load the YOLO11 model
4 | model = YOLO("object.pt")
5 |
6 | # Export the model to RKNN format
7 | # 'name' can be one of rk3588, rk3576, rk3566, rk3568, rk3562, rv1103, rv1106, rv1103b, rv1106b, rk2118
8 | model.export(format="rknn", name="rk3588") #
--------------------------------------------------------------------------------
/docs/style.css:
--------------------------------------------------------------------------------
1 | body {
2 | font-family: Arial, sans-serif;
3 | background: #f4f4f4;
4 | color: #333;
5 | margin: 0;
6 | padding: 0;
7 | }
8 | header {
9 | background: #004080;
10 | color: white;
11 | padding: 2rem;
12 | text-align: center;
13 | }
14 | .logo {
15 | width: 100px;
16 | }
17 | main {
18 | padding: 2rem;
19 | }
20 | section {
21 | margin-bottom: 2rem;
22 | }
23 | .screenshot {
24 | width: 100%;
25 | max-width: 600px;
26 | display: block;
27 | margin-bottom: 1rem;
28 | }
29 | footer {
30 | background: #ddd;
31 | text-align: center;
32 | padding: 1rem;
33 | }
34 |
--------------------------------------------------------------------------------
/image/resize.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 |
4 | def resize_images_recursive(folder_path, filename_substring="_frame_", target_size=(320, 320)):
5 | for root, _, files in os.walk(folder_path):
6 | for file in files:
7 | if filename_substring in file and file.endswith(".jpg"):
8 | file_path = os.path.join(root, file)
9 | image = cv2.imread(file_path)
10 | if image is None:
11 | print(f"❌ Ошибка: Не удалось загрузить {file_path}")
12 | continue
13 |
14 | resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)
15 | cv2.imwrite(file_path, resized_image)
16 | print(f"✅ Файл {file_path} сжат до {target_size}")
17 |
18 | folder_to_search = "./frames" # Папка с твоими кадрами
19 | resize_images_recursive(folder_to_search)
20 |
--------------------------------------------------------------------------------
/.github/workflows/discord-notify.yml:
--------------------------------------------------------------------------------
1 | name: Notify Discord on Push
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | notify:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout репозитория
13 | uses: actions/checkout@v3
14 |
15 | - name: Получение информации о последнем коммите
16 | id: gitinfo
17 | run: |
18 | COMMIT_SHA=$(git rev-parse HEAD)
19 | # Убираем перевод строки из сообщения коммита
20 | COMMIT_MESSAGE=$(git log -1 --format=%B | tr '\n' ' ')
21 | COMMIT_AUTHOR=$(git log -1 --format="%an")
22 | echo "message=$COMMIT_MESSAGE" >> $GITHUB_OUTPUT
23 | echo "author=$COMMIT_AUTHOR" >> $GITHUB_OUTPUT
24 |
25 | - name: Отправка уведомления в Discord
26 | env:
27 | DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
28 | run: |
29 | curl -H "Content-Type: application/json" \
30 | -X POST \
31 | -d "{
32 | \"content\": \"📦 Новый пуш от ${{ github.actor }} в репозиторий ${{ github.repository }}!\n👤 Автор коммита: ${{ steps.gitinfo.outputs.author }}\n📝 Сообщение: ${{ steps.gitinfo.outputs.message }}\"
33 | }" \
34 | $DISCORD_WEBHOOK
35 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | VisionPilot
7 |
8 |
9 |
10 |
15 |
16 |
17 |
18 | 🚀 Возможности
19 |
20 | - Обнаружение объектов через YOLOv8
21 | - CRSF протокол управления дроном (UART)
22 | - Интеграция с RKNN Toolkit
23 | - Интеграция с MAVLink полетного контроллера
24 |
25 |
26 |
27 |
28 | ⚙️ Установка
29 |
30 | git clone https://github.com/VohminV/VisionPilot.git
31 | cd VisionPilot
32 | pip install -r requirements.txt
33 | python CAMFPV.py
34 | python uartTOuart.py
35 |
36 |
37 |
38 |
39 | 📷 Скриншоты
40 |
41 |
42 |
43 |
48 |
49 |
50 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # VisionPilot
2 |
3 | 
4 | 
5 | 
6 |
7 | Проект VisionPilot — это система автопилота для одноплатного компьютера Orange Pi, использующая компьютерное зрение и протокол CRSF для управления дистанционным управлением. Основные задачи проекта — захват и обработка видеопотока, детекция объектов с помощью модели YOLO и передача управляющих команд через UART.
8 |
9 | ---
10 |
11 | ## Основные возможности
12 |
13 | - Захват видеопотока с камеры с использованием OpenCV.
14 | - Обработка изображений и детекция объектов с помощью модели YOLO.
15 | - Передача данных управления по протоколу CRSF через UART.
16 | - Реализация потоковой обработки для автономного управления.
17 | - Интеграция с RKNN Toolkit для оптимизации работы на платформе Orange Pi.
18 |
19 | ---
20 |
21 | ## Структура проекта
22 |
23 | - `CAMV2.py` — модуль захвата видео и детекции объектов.
24 | - `uartTOuartV2.py` — модуль передачи данных по UART с поддержкой протокола CRSF.
25 | - `RKNN.py` — модуль для работы с RKNN Toolkit (оптимизация и запуск нейросетевых моделей).
26 | - `offsets.json` — файл для обмена параметрами смещения и углов между модулями.
27 |
28 | ---
29 |
30 | ## Установка и запуск
31 |
32 | 1. Клонируйте репозиторий:
33 |
34 | ```bash
35 | git clone https://github.com/VohminV/VisionPilot.git
36 | cd VisionPilot
37 | ```
38 |
39 | 2. Запустите захват и обработку видеопотока:
40 |
41 | ```bash
42 | python CAMFPV.py
43 | ```
44 |
45 | 3. Запустите модуль передачи данных по UART:
46 |
47 | ```bash
48 | python uartTOuart.py
49 | ```
50 |
51 | ---
52 |
53 | ## Требования
54 |
55 | - Orange Pi 5 или аналогичная одноплатная система с поддержкой RKNN.
56 | - Камера, совместимая с OpenCV.
57 | - Python 3.7+.
58 | - Пакеты: OpenCV, numpy, serial, threading, ultralytics.
59 |
60 | ---
61 |
62 | ## Возможные улучшения
63 |
64 | - Оптимизация моделей для повышения производительности.
65 | - Улучшение обработки ошибок и логирования.
66 | - Добавление поддержки других протоколов управления.
67 | - Разработка GUI для удобного мониторинга и настройки.
68 |
69 | ---
70 |
71 | ## Лицензия
72 |
73 | Проект распространяется под лицензией MIT.
74 |
75 | ---
76 |
77 | ## Контакты
78 |
79 | - Для вопросов и предложений создавайте issue или пишите на email: vohmin3@yandex.ru
80 | - Discord канал https://discord.gg/V2quvqz9S6
81 | ---
82 |
83 | Спасибо за интерес к проекту VisionPilot!
84 |
--------------------------------------------------------------------------------
/image/frames.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | import random
5 |
6 | def rotate_image(image, angle):
7 | (h, w) = image.shape[:2]
8 | center = (w // 2, h // 2)
9 | matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
10 | return cv2.warpAffine(image, matrix, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
11 |
12 | def random_transform(frame):
13 | h, w = frame.shape[:2]
14 |
15 | if random.random() < 0.8:
16 | frame = rotate_image(frame, random.uniform(-10, 10))
17 | if random.random() < 0.6:
18 | tx, ty = random.uniform(-0.05, 0.05) * w, random.uniform(-0.05, 0.05) * h
19 | M_translate = np.float32([[1, 0, tx], [0, 1, ty]])
20 | frame = cv2.warpAffine(frame, M_translate, (w, h), borderMode=cv2.BORDER_REFLECT)
21 | if random.random() < 0.6:
22 | scale = random.uniform(0.95, 1.05)
23 | M_scale = cv2.getRotationMatrix2D((w // 2, h // 2), 0, scale)
24 | frame = cv2.warpAffine(frame, M_scale, (w, h), borderMode=cv2.BORDER_REFLECT)
25 | if random.random() < 0.3:
26 | frame = cv2.flip(frame, 1)
27 |
28 | return frame
29 |
30 | def extract_frames(video_path, output_folder, target_fps=20, prefix="video"):
31 | os.makedirs(output_folder, exist_ok=True)
32 | cap = cv2.VideoCapture(video_path)
33 |
34 | if not cap.isOpened():
35 | print(f"❌ Ошибка при открытии: {video_path}")
36 | return
37 |
38 | original_fps = cap.get(cv2.CAP_PROP_FPS)
39 | frame_interval = int(round(original_fps / target_fps))
40 | saved_count, frame_idx = 0, 0
41 |
42 | while True:
43 | ret, frame = cap.read()
44 | if not ret:
45 | break
46 |
47 | if frame_idx % frame_interval == 0:
48 | frame = random_transform(frame)
49 |
50 | filename = os.path.join(output_folder, f"{prefix}_frame_{saved_count:06d}.jpg")
51 | cv2.imwrite(filename, frame)
52 | saved_count += 1
53 |
54 | frame_idx += 1
55 |
56 | cap.release()
57 | print(f"✅ {os.path.basename(video_path)} → {saved_count} кадров сохранено.")
58 |
59 | # ==== Запуск ====
60 | video_folder = "videos" # Где лежат видео
61 | output_folder = "frames" # Все кадры идут сюда
62 | target_fps = 30
63 |
64 | for filename in os.listdir(video_folder):
65 | if filename.lower().endswith((".mp4", ".avi", ".mov", ".mkv")):
66 | video_path = os.path.join(video_folder, filename)
67 | video_prefix = os.path.splitext(filename)[0] # Убираем расширение
68 | extract_frames(video_path, output_folder, target_fps=target_fps, prefix=video_prefix)
69 |
--------------------------------------------------------------------------------
/image/VideoRecorder.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import cv2
3 | import time
4 | import datetime
5 | from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel, QVBoxLayout, QMessageBox
6 | from PyQt5.QtGui import QImage, QPixmap
7 | from PyQt5.QtCore import QTimer
8 |
9 | class VideoRecorderApp(QWidget):
10 | def __init__(self):
11 | super().__init__()
12 | self.setWindowTitle("Запись с камеры (PyQt5)")
13 | self.setGeometry(100, 100, 800, 600)
14 |
15 | self.cap = cv2.VideoCapture(0)
16 |
17 | if not self.cap.isOpened():
18 | raise Exception("Камера не найдена")
19 |
20 | self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
21 | self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
22 | fps = self.cap.get(cv2.CAP_PROP_FPS)
23 | print(f"Камера запущена с разрешением: {self.width}x{self.height}, FPS: {fps:.2f}")
24 |
25 | self.recording = False
26 |
27 | self.image_label = QLabel(self)
28 | self.start_button = QPushButton("Начать запись", self)
29 | self.stop_button = QPushButton("Остановить запись", self)
30 | self.stop_button.setEnabled(False)
31 |
32 | layout = QVBoxLayout()
33 | layout.addWidget(self.image_label)
34 | layout.addWidget(self.start_button)
35 | layout.addWidget(self.stop_button)
36 | self.setLayout(layout)
37 |
38 | self.timer = QTimer()
39 | self.timer.timeout.connect(self.update_frame)
40 | self.timer.start(30)
41 |
42 | self.start_button.clicked.connect(self.start_recording)
43 | self.stop_button.clicked.connect(self.stop_recording)
44 |
45 | def update_frame(self):
46 | ret, frame = self.cap.read()
47 | if ret:
48 | self.last_frame = frame
49 | h, w, _ = frame.shape
50 | mean_color = frame.mean(axis=(0, 1)).astype(int)
51 | print(f"Кадр: {w}x{h}, Средний цвет: {mean_color}")
52 |
53 | #rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
54 | qt_img = QImage(frame.data, w, h, 3 * w, QImage.Format_RGB888)
55 | self.image_label.setPixmap(QPixmap.fromImage(qt_img))
56 |
57 | # Если запись активна, записываем кадр в файл
58 | if self.recording:
59 | self.out.write(frame)
60 |
61 | def start_recording(self):
62 | if not self.recording:
63 | self.recording = True
64 | self.start_button.setEnabled(False)
65 | self.stop_button.setEnabled(True)
66 |
67 | # Настройки для записи видео
68 | fourcc = cv2.VideoWriter_fourcc(*'XVID')
69 | filename = datetime.datetime.now().strftime("input.avi")
70 | self.out = cv2.VideoWriter(filename, fourcc, 30.0, (self.width, self.height))
71 |
72 | self.record_start_time = time.time() # Сохраняем время начала записи
73 |
74 | # Запись в течение 60 секунд или до остановки
75 | self.timer.timeout.connect(self.check_recording_time)
76 |
77 | print(f"Началась запись: {filename}")
78 |
79 | def check_recording_time(self):
80 | # Прекратить запись через 60 секунд
81 | if time.time() - self.record_start_time > 60:
82 | self.stop_recording()
83 |
84 | def stop_recording(self):
85 | if self.recording:
86 | self.recording = False
87 | self.start_button.setEnabled(True)
88 | self.stop_button.setEnabled(False)
89 |
90 | # Завершаем запись и сохраняем файл
91 | if hasattr(self, 'out'):
92 | self.out.release()
93 |
94 | print("Запись завершена.")
95 | QMessageBox.information(self, "Запись завершена", "Видео сохранено.")
96 |
97 | # Закрытие приложения после завершения записи
98 | QApplication.quit()
99 |
100 | def closeEvent(self, event):
101 | self.stop_recording()
102 | if self.cap.isOpened():
103 | self.cap.release()
104 | event.accept()
105 |
106 |
107 | if __name__ == "__main__":
108 | app = QApplication(sys.argv)
109 | window = VideoRecorderApp()
110 | window.show()
111 | sys.exit(app.exec_())
112 |
--------------------------------------------------------------------------------
/CAMFPV.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import time
4 | import json
5 | import os
6 | from ultralytics import YOLO
7 | from collections import defaultdict, deque
8 | import logging
9 |
10 | logging.basicConfig(filename='detections.log', level=logging.INFO, format='%(asctime)s - %(message)s')
11 |
12 | model = YOLO('/home/orangepi/Documents/YOLO/best_rknn_model')
13 |
14 | screen_width = 720
15 | screen_height = 576
16 | cv2.namedWindow("Detection", cv2.WND_PROP_FULLSCREEN)
17 | cv2.setWindowProperty("Detection", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
18 |
19 | track_history = defaultdict(list)
20 |
21 | offset_buffer = deque(maxlen=20)
22 |
23 | def save_offset(avg_x, avg_y):
24 | data = {'x': avg_x, 'y': avg_y}
25 | tmp_filename = 'offsets_tmp.json'
26 | final_filename = 'offsets.json'
27 | try:
28 | with open(tmp_filename, 'w') as f:
29 | json.dump(data, f)
30 | os.replace(tmp_filename, final_filename)
31 | except Exception as e:
32 | logging.error(f"Error saving offsets: {e}")
33 |
34 | def crop_frame(frame, center_x, center_y, size):
35 | crop_x1 = max(center_x - size // 2, 0)
36 | crop_x2 = min(center_x + size // 2, frame.shape[1])
37 | crop_y1 = max(center_y - size // 2, 0)
38 | crop_y2 = min(center_y + size // 2, frame.shape[0])
39 | return frame[crop_y1:crop_y2, crop_x1:crop_x2].copy()
40 |
41 | def display_frame(frame_resized, cropped_frame, fps, center_x, center_y, avg_x, avg_y):
42 | detection_preview_size = 200
43 | cropped_resized = cv2.resize(cropped_frame, (detection_preview_size, detection_preview_size))
44 | y_offset = screen_height - detection_preview_size
45 | x_offset = screen_width - detection_preview_size
46 | frame_resized[y_offset:y_offset + detection_preview_size, x_offset:x_offset + detection_preview_size] = cropped_resized
47 |
48 | cv2.putText(frame_resized, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
49 | cv2.circle(frame_resized, (center_x, center_y), 5, (0, 0, 255), -1)
50 |
51 | detection_point_x = center_x + avg_x
52 | detection_point_y = center_y + avg_y
53 | cv2.circle(frame_resized, (detection_point_x, detection_point_y), 5, (0, 255, 0), -1)
54 |
55 | cv2.imshow("Detection", frame_resized)
56 |
57 | def main(model):
58 | cap = cv2.VideoCapture(0)
59 | size = 320
60 | prev_time = time.time()
61 |
62 | global offset_buffer
63 |
64 | while True:
65 | ret, frame = cap.read()
66 | if not ret:
67 | logging.error("Failed to grab frame from camera.")
68 | break
69 |
70 | current_time = time.time()
71 | fps = 1 / (current_time - prev_time) if current_time != prev_time else 0
72 | prev_time = current_time
73 |
74 | center_x, center_y = frame.shape[1] // 2, frame.shape[0] // 2
75 | cropped_frame = crop_frame(frame, center_x, center_y, size)
76 |
77 | results = model(cropped_frame, imgsz=320, conf=0.6)
78 | offset_x, offset_y = 0, 0
79 |
80 | if results and hasattr(results[0], 'boxes') and results[0].boxes is not None:
81 | boxes = results[0].boxes.xyxy.cpu().numpy() # x1, y1, x2, y2
82 | classes = results[0].boxes.cls.int().cpu().tolist()
83 | confidences = results[0].boxes.conf.cpu().tolist()
84 |
85 | for (x1, y1, x2, y2), obj_class, confidence in zip(boxes, classes, confidences):
86 | # Центр бокса
87 | x = int((x1 + x2) / 2)
88 | y = int((y1 + y2) / 2)
89 |
90 | cropped_center_x = cropped_frame.shape[1] // 2
91 | cropped_center_y = cropped_frame.shape[0] // 2
92 | offset_x = x - cropped_center_x
93 | offset_y = y - cropped_center_y
94 |
95 | break
96 |
97 | offset_buffer.append((offset_x, offset_y))
98 |
99 | avg_x = int(np.mean([x for x, _ in offset_buffer]))
100 | avg_y = int(np.mean([y for _, y in offset_buffer]))
101 | save_offset(avg_x, avg_y)
102 |
103 | frame_resized = cv2.resize(frame, (screen_width, screen_height))
104 | display_frame(frame_resized, cropped_frame, fps, center_x, center_y, avg_x, avg_y)
105 |
106 | if cv2.waitKey(1) & 0xFF == ord('q'):
107 | logging.info("Exiting program.")
108 | break
109 |
110 | cap.release()
111 | cv2.destroyAllWindows()
112 |
113 |
114 | if __name__ == "__main__":
115 | main(model)
116 |
--------------------------------------------------------------------------------
/CAM_Driverless_Taxi.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import time
4 | import json
5 | import os
6 | from ultralytics import YOLO
7 | from collections import deque
8 | import logging
9 | import math
10 |
11 | # Настройка логгера
12 | logging.basicConfig(filename='detections.log', level=logging.INFO, format='%(asctime)s - %(message)s')
13 |
14 | # Инициализация модели YOLO сегментации
15 | model = YOLO('Y.onnx', task='segment')
16 |
17 | # Параметры отображения
18 | screen_width = 720
19 | screen_height = 576
20 | cv2.namedWindow("Detection", cv2.WND_PROP_FULLSCREEN)
21 | cv2.setWindowProperty("Detection", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
22 |
23 | offset_buffer = deque(maxlen=20)
24 |
25 | def save_offset(avg_x, avg_y):
26 | data = {'x': avg_x, 'y': avg_y}
27 | tmp_filename = 'offsets_tmp.json'
28 | final_filename = 'offsets.json'
29 | try:
30 | with open(tmp_filename, 'w') as f:
31 | json.dump(data, f)
32 | os.replace(tmp_filename, final_filename)
33 | except Exception as e:
34 | logging.error(f"Error saving offsets: {e}")
35 |
36 | def crop_frame(frame, center_x, center_y, size):
37 | crop_x1 = max(center_x - size // 2, 0)
38 | crop_x2 = min(center_x + size // 2, frame.shape[1])
39 | crop_y1 = max(center_y - size // 2, 0)
40 | crop_y2 = min(center_y + size // 2, frame.shape[0])
41 | return frame[crop_y1:crop_y2, crop_x1:crop_x2].copy()
42 |
43 | def display_frame(frame_resized, cropped_frame, fps, center_x, center_y, avg_x, avg_y):
44 | detection_preview_size = 200
45 | cropped_resized = cv2.resize(cropped_frame, (detection_preview_size, detection_preview_size))
46 | y_offset = screen_height - detection_preview_size
47 | x_offset = screen_width - detection_preview_size
48 | frame_resized[y_offset:y_offset + detection_preview_size, x_offset:x_offset + detection_preview_size] = cropped_resized
49 |
50 | cv2.putText(frame_resized, f"FPS: {fps:.2f}", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
51 | cv2.circle(frame_resized, (center_x, center_y), 5, (0, 0, 255), -1)
52 |
53 | detection_point_x = center_x + avg_x
54 | detection_point_y = center_y + avg_y
55 | cv2.circle(frame_resized, (detection_point_x, detection_point_y), 5, (0, 255, 0), -1)
56 |
57 | cv2.imshow("Detection", frame_resized)
58 |
59 | def main(model):
60 | cap = cv2.VideoCapture(0)
61 | cap.set(cv2.CAP_PROP_FPS, 60)
62 | size = 320
63 | prev_time = time.time()
64 |
65 | while True:
66 | ret, frame = cap.read()
67 | if not ret:
68 | logging.error("Failed to grab frame from camera.")
69 | break
70 |
71 | current_time = time.time()
72 | fps = 1 / (current_time - prev_time) if current_time != prev_time else 0
73 | prev_time = current_time
74 |
75 | center_x, center_y = frame.shape[1] // 2, frame.shape[0] // 2
76 | cropped_frame = crop_frame(frame, center_x, center_y, size)
77 |
78 | results = model(cropped_frame, imgsz=320, conf=0.8)
79 |
80 | offset_x, offset_y = 0, 0
81 | avg_angle = 0
82 |
83 | if results and results[0].masks is not None and len(results[0].masks.data) > 0:
84 | boxes = results[0].boxes
85 | class_ids = boxes.cls.cpu().numpy()
86 | confidences = boxes.conf.cpu().numpy()
87 |
88 | target_class_id = 0
89 | best_mask_idx = -1
90 | best_conf = 0
91 | for i, (cls_id, conf) in enumerate(zip(class_ids, confidences)):
92 | if cls_id == target_class_id and conf > best_conf:
93 | best_conf = conf
94 | best_mask_idx = i
95 |
96 | if best_mask_idx >= 0:
97 | mask_tensor = results[0].masks.data[best_mask_idx]
98 | mask = (mask_tensor.cpu().numpy() * 255).astype(np.uint8)
99 |
100 | # Найдём границу маски
101 | contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
102 | if contours:
103 | contour = max(contours, key=cv2.contourArea)
104 | cv2.polylines(cropped_frame, [contour], isClosed=True, color=(0, 255, 255), thickness=2)
105 |
106 | # Центр
107 | moments = cv2.moments(contour)
108 | if moments["m00"] != 0:
109 | cX = int(moments["m10"] / moments["m00"])
110 | cY = int(moments["m01"] / moments["m00"])
111 | else:
112 | cX, cY = 0, 0
113 |
114 | offset_x = cX - cropped_frame.shape[1] // 2
115 | offset_y = cY - cropped_frame.shape[0] // 2
116 |
117 | # Угол объекта
118 | rect = cv2.minAreaRect(contour)
119 | (center_x_rect, center_y_rect), (w, h), angle = rect
120 | if w < h:
121 | angle = 90 + angle
122 |
123 | # Рисуем стрелку направления
124 | cv2.circle(cropped_frame, (cX, cY), 5, (0, 0, 255), -1)
125 | length = 50
126 | angle_rad = math.radians(angle)
127 | end_x = int(cX + length * math.cos(angle_rad))
128 | end_y = int(cY + length * math.sin(angle_rad))
129 | cv2.arrowedLine(cropped_frame, (cX, cY), (end_x, end_y), (255, 0, 0), 2)
130 |
131 | logging.info(f"Smoothed angle: {angle:.2f} degrees")
132 |
133 | offset_buffer.append((offset_x, offset_y))
134 | avg_x = int(np.mean([x for x, _ in offset_buffer]))
135 | avg_y = int(np.mean([y for _, y in offset_buffer]))
136 | save_offset(avg_x, avg_y)
137 |
138 | frame_resized = cv2.resize(frame, (screen_width, screen_height))
139 | display_frame(frame_resized, cropped_frame, fps, center_x, center_y, avg_x, avg_y)
140 |
141 | if cv2.waitKey(1) & 0xFF == ord('q'):
142 | logging.info("Exiting program.")
143 | break
144 |
145 | cap.release()
146 | cv2.destroyAllWindows()
147 |
148 | if __name__ == "__main__":
149 | main(model)
--------------------------------------------------------------------------------
/anchor_tracking.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import time
4 | import json
5 | import os
6 | import logging
7 | import math
8 |
9 | logging.basicConfig(filename='anchor_tracking.log', level=logging.INFO,
10 | format='%(asctime)s - %(message)s')
11 |
12 | FLAG_PATH = '/home/orangepi/Documents/YOLO/tracking_enabled.flag'
13 |
14 | def set_tracking(enabled: bool):
15 | tmp_path = FLAG_PATH + '.tmp'
16 | try:
17 | with open(tmp_path, 'w') as f:
18 | f.write('1' if enabled else '0')
19 | os.replace(tmp_path, FLAG_PATH)
20 | except Exception as e:
21 | logging.error(f"Ошибка записи флага трекинга: {e}")
22 |
23 | def is_tracking_enabled():
24 | if not os.path.exists(FLAG_PATH):
25 | return False
26 | try:
27 | with open(FLAG_PATH, 'r') as f:
28 | return f.read().strip() == '1'
29 | except Exception as e:
30 | logging.error(f"Ошибка чтения флага трекинга: {e}")
31 | return False
32 |
33 | def t_tracking_flag():
34 | current = is_tracking_enabled()
35 | set_tracking(not current)
36 |
37 | def save_offset(avg_x, avg_y, angle):
38 | data = {
39 | 'x': float(avg_x),
40 | 'y': float(avg_y),
41 | 'angle': float(angle)
42 | }
43 | tmp_filename = '/home/orangepi/Documents/YOLO/offsets_tmp.json'
44 | final_filename = '/home/orangepi/Documents/YOLO/offsets.json'
45 | try:
46 | with open(tmp_filename, 'w') as f:
47 | json.dump(data, f)
48 | os.replace(tmp_filename, final_filename)
49 | except Exception as e:
50 | logging.error(f"Error saving offsets: {e}")
51 |
52 | # Инициализация фильтра Калмана для [dx, dy, angle]
53 | def init_kalman():
54 | kf = cv2.KalmanFilter(3, 3) # 3 состояния, 3 измерения
55 |
56 | kf.transitionMatrix = np.eye(3, dtype=np.float32) # состояние без изменений по умолчанию
57 | kf.measurementMatrix = np.eye(3, dtype=np.float32)
58 |
59 | kf.processNoiseCov = np.eye(3, dtype=np.float32) * 1e-4 # шум модели (настройка)
60 | kf.measurementNoiseCov = np.eye(3, dtype=np.float32) * 1e-2 # шум измерений (настройка)
61 |
62 | kf.statePre = np.zeros((3,1), dtype=np.float32)
63 | kf.statePost = np.zeros((3,1), dtype=np.float32)
64 |
65 | return kf
66 |
67 | def main():
68 | cap = cv2.VideoCapture(0)
69 | cv2.namedWindow("Anchor Optical Flow Tracker", cv2.WND_PROP_FULLSCREEN)
70 | cv2.setWindowProperty("Anchor Optical Flow Tracker", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
71 | if not cap.isOpened():
72 | print("Ошибка: не удалось открыть камеру")
73 | return
74 |
75 | proc_width, proc_height = 720, 576
76 |
77 | lk_params = dict(winSize=(21, 21),
78 | maxLevel=3,
79 | criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 30, 0.01))
80 |
81 | first_gray = None
82 | first_pts = None
83 | tracking_initialized = False
84 | accumulated_offset = np.array([0.0, 0.0])
85 | prev_time = time.time()
86 | prev_angle = 0.0
87 |
88 | alpha = 0.3 # коэффициент сглаживания угла
89 |
90 | kalman = init_kalman()
91 |
92 | while True:
93 | ret, frame = cap.read()
94 | if not ret:
95 | logging.error("Не удалось получить кадр с камеры")
96 | break
97 |
98 | tracking_enabled = is_tracking_enabled()
99 |
100 | frame_proc = cv2.resize(frame, (proc_width, proc_height))
101 | gray = cv2.cvtColor(frame_proc, cv2.COLOR_BGR2GRAY)
102 |
103 | if not tracking_enabled:
104 | first_gray = None
105 | first_pts = None
106 | tracking_initialized = False
107 | accumulated_offset = np.array([0.0, 0.0])
108 | prev_angle = 0.0
109 | kalman = init_kalman() # сброс фильтра Калмана
110 |
111 | vis = frame_proc.copy()
112 | cv2.putText(vis, "Tracking disabled (press 't' to enable)", (10, 30),
113 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
114 | cv2.imshow("Anchor Optical Flow Tracker", vis)
115 |
116 | key = cv2.waitKey(1) & 0xFF
117 | if key == ord('q'):
118 | break
119 | elif key == ord('t'):
120 | t_tracking_flag()
121 | continue
122 |
123 | if not tracking_initialized:
124 | pts = cv2.goodFeaturesToTrack(gray, maxCorners=300, qualityLevel=0.015, minDistance=7)
125 | if pts is not None and len(pts) > 0:
126 | first_gray = gray.copy()
127 | first_pts = pts
128 | tracking_initialized = True
129 | accumulated_offset = np.array([0.0, 0.0])
130 | prev_angle = 0.0
131 | kalman = init_kalman()
132 | else:
133 | vis = frame_proc.copy()
134 | cv2.putText(vis, "No keypoints for initialization", (10, 30),
135 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
136 | cv2.imshow("Anchor Optical Flow Tracker", vis)
137 | key = cv2.waitKey(1) & 0xFF
138 | if key == ord('q'):
139 | break
140 | elif key == ord('t'):
141 | t_tracking_flag()
142 | continue
143 |
144 | next_pts, status, error = cv2.calcOpticalFlowPyrLK(first_gray, gray, first_pts, None, **lk_params)
145 |
146 | if next_pts is None or status is None:
147 | logging.warning("Оптический поток не найден, сброс трекинга")
148 | first_gray = None
149 | first_pts = None
150 | tracking_initialized = False
151 | accumulated_offset = np.array([0.0, 0.0])
152 | prev_angle = 0.0
153 | kalman = init_kalman()
154 | continue
155 |
156 | status = status.flatten()
157 | good_new = next_pts[status == 1].reshape(-1, 2)
158 | good_old = first_pts[status == 1].reshape(-1, 2)
159 |
160 | if len(good_new) < 10:
161 | logging.warning("Слишком мало отслеживаемых точек, сброс трекинга")
162 | first_gray = None
163 | first_pts = None
164 | tracking_initialized = False
165 | accumulated_offset = np.array([0.0, 0.0])
166 | prev_angle = 0.0
167 | kalman = init_kalman()
168 | continue
169 |
170 | H, inliers = cv2.estimateAffinePartial2D(
171 | good_old.reshape(-1, 1, 2), good_new.reshape(-1, 1, 2),
172 | method=cv2.RANSAC, ransacReprojThreshold=3, maxIters=2000)
173 |
174 | angle = prev_angle
175 | if H is not None and inliers is not None and np.count_nonzero(inliers) > 5:
176 | raw_angle_rad = math.atan2(H[1, 0], H[0, 0])
177 | raw_angle = math.degrees(raw_angle_rad)
178 |
179 | angle_diff = ((raw_angle - prev_angle + 180) % 360) - 180
180 | angle = prev_angle + alpha * angle_diff
181 | if angle > 180:
182 | angle -= 360
183 | elif angle < -180:
184 | angle += 360
185 |
186 | dxs = good_new[:, 0] - good_old[:, 0]
187 | dys = good_new[:, 1] - good_old[:, 1]
188 | avg_dx = np.mean(dxs)
189 | avg_dy = np.mean(dys)
190 |
191 | # Используем фильтр Калмана для сглаживания dx, dy и угла
192 | measurement = np.array([[np.float32(avg_dx)],
193 | [np.float32(avg_dy)],
194 | [np.float32(angle)]])
195 | predicted = kalman.predict()
196 | corrected = kalman.correct(measurement)
197 | filtered_dx, filtered_dy, filtered_angle = corrected.flatten()
198 |
199 | accumulated_offset = np.array([filtered_dx, filtered_dy])
200 | prev_angle = filtered_angle
201 |
202 | save_offset(accumulated_offset[0], accumulated_offset[1], prev_angle)
203 |
204 | vis = frame_proc.copy()
205 | center = (proc_width // 2, proc_height // 2)
206 | offset_point = (
207 | int(center[0] - accumulated_offset[0] * 1.5),
208 | int(center[1] - accumulated_offset[1] * 1.5)
209 | )
210 | cv2.arrowedLine(vis, center, offset_point, (0, 255, 0), 2, tipLength=0.3)
211 | cv2.circle(vis, center, 5, (0, 0, 255), -1)
212 | cv2.putText(vis, f"Offset X: {accumulated_offset[0]:.2f}", (10, 60),
213 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
214 | cv2.putText(vis, f"Offset Y: {accumulated_offset[1]:.2f}", (10, 90),
215 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
216 | cv2.putText(vis, f"Angle: {prev_angle:.2f}", (10, 120),
217 | cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
218 |
219 | current_time = time.time()
220 | fps = 1.0 / (current_time - prev_time) if current_time != prev_time else 0
221 | prev_time = current_time
222 | cv2.putText(vis, f"FPS: {fps:.1f}", (10, proc_height - 10),
223 | cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
224 |
225 | cv2.imshow("Anchor Optical Flow Tracker", vis)
226 |
227 | key = cv2.waitKey(1) & 0xFF
228 | if key == ord('q'):
229 | break
230 | elif key == ord('r'):
231 | pts = cv2.goodFeaturesToTrack(gray, maxCorners=300, qualityLevel=0.015, minDistance=7)
232 | if pts is not None and len(pts) > 0:
233 | first_pts = pts
234 | first_gray = gray.copy()
235 | accumulated_offset = np.array([0.0, 0.0])
236 | prev_angle = 0.0
237 | tracking_initialized = True
238 | kalman = init_kalman()
239 | elif key == ord('t'):
240 | t_tracking_flag()
241 |
242 | cap.release()
243 | cv2.destroyAllWindows()
244 |
245 | if __name__ == "__main__":
246 | main()
247 |
--------------------------------------------------------------------------------
/uartTOuartV2.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import time
3 | import struct
4 | import os
5 | import random
6 | import json
7 | import logging
8 | import math
9 | import threading
10 | from pymavlink import mavutil
11 |
12 | # Настройка логгера
13 | logging.basicConfig(
14 | level=logging.INFO, # Уровень логирования (можно DEBUG для более подробного вывода)
15 | format='%(asctime)s [%(levelname)s] %(message)s',
16 | handlers=[
17 | logging.FileHandler("/home/orangepi/Documents/YOLO/uart_forwarder.log"), # Лог в файл
18 | logging.StreamHandler() # Также вывод в консоль
19 | ]
20 | )
21 |
22 | FLAG_PATH = '/home/orangepi/Documents/YOLO/tracking_enabled.flag'
23 | def set_tracking(enabled: bool):
24 | tmp_path = FLAG_PATH + '.tmp'
25 | try:
26 | with open(tmp_path, 'w') as f:
27 | f.write('1' if enabled else '0')
28 | os.replace(tmp_path, FLAG_PATH)
29 | except Exception as e:
30 | print(f"Ошибка записи флага: {e}")
31 |
32 | crc8tab = [
33 | 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54, 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
34 | 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06, 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
35 | 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0, 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
36 | 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2, 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
37 | 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9, 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
38 | 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B, 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
39 | 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D, 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
40 | 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F, 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
41 | 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB, 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
42 | 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9, 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
43 | 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F, 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
44 | 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D, 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
45 | 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26, 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
46 | 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74, 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
47 | 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82, 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
48 | 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0, 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
49 | ]
50 |
51 | channels_old = None
52 | data_without_crc_old = None
53 | speed_old = None
54 | correction_active = False
55 | # Объект события для остановки потока
56 | stop_event = threading.Event()
57 | # Флаг для проверки, запущен ли поток
58 | is_thread_running = False
59 | # Глобальная переменная для хранения стартового газа
60 | throttle_start = None
61 |
62 | def crc8(data):
63 | crc = 0
64 | for byte in data:
65 | crc = crc8tab[crc ^ byte]
66 | return crc
67 |
68 | # Function to pack channels into the CRSF payload format (16 channels, 11 bits each)
69 | def pack_channels(channel_data):
70 | # channel data: array of 16 integers
71 | channel_data = list(reversed(channel_data))
72 | pack_bit = []
73 | for idx, channel in enumerate(channel_data):
74 | pack_bit[idx*11: (idx+1)*11] = "{0:011b}".format(channel)
75 | pack_bit=''.join(pack_bit)
76 | pack_byte = []
77 | for idx in range(22):
78 | current_byte = int(pack_bit[idx*8:(idx+1)*8], 2)
79 | pack_byte.append(current_byte)
80 | pack_byte = list(reversed(pack_byte))
81 | return pack_byte
82 |
83 | # Function to extract channels from the CRSF payload (22 bytes representing 16 channels)
84 | def extract_channels(data):
85 | channels = []
86 | if len(data) != 22: # CRSF packed channel data is 22 bytes
87 | return channels
88 |
89 | # Convert bytes to binary string
90 | bits = ''.join(format(byte, '08b')[::-1] for byte in data)
91 |
92 | # Extract 11-bit channel values
93 | for i in range(16): # CRSF supports up to 16 channels
94 | start = i * 11
95 | end = start + 11
96 | if end <= len(bits):
97 | channel_bits = bits[start:end][::-1]
98 | channel_value = int(channel_bits, 2)
99 | channels.append(channel_value)
100 |
101 | return channels
102 |
103 | def update_rc_channels_in_background(channels_old, uart4, data_without_crc_old):
104 | import json
105 | import logging
106 | import time
107 | import math
108 |
109 | # CRSF параметры
110 | CENTER_TICKS = 992
111 | MIN_TICKS = 172
112 | MAX_TICKS = 1811
113 |
114 | # Величина мёртвой зоны — до какого угла yaw не двигаем
115 | DEADZONE_ANGLE = 3 # например, 3°
116 |
117 | angle = 0
118 |
119 | FRAME_WIDTH = 720
120 | FRAME_HEIGHT = 576
121 |
122 | MAX_OFFSET_X_PX = FRAME_WIDTH // 2 # 360
123 | MAX_OFFSET_Y_PX = FRAME_HEIGHT // 2 # 288
124 |
125 | MAX_DEFLECTION_US = 300
126 | MAX_DEFLECTION_TICKS = int(MAX_DEFLECTION_US * 8 / 5)
127 |
128 | ALT_SCALE = 100
129 | CLIMB_SCALE = 100
130 |
131 | P_int = 4 # 400 / 100
132 | D_int = 1 # 100 / 100
133 | FF_int = 2 # 200 / 100
134 |
135 | def heading_diff(desired, current):
136 | return (desired - current + 540) % 360 - 180
137 |
138 | initial_throttle = channels_old[2]
139 | initial_yaw = channels_old[3]
140 |
141 | while not stop_event.is_set():
142 |
143 | try:
144 | with open('offsets.json', 'r') as f:
145 | offsets = json.load(f)
146 | offset_x = offsets.get('x', 0)
147 | offset_y = offsets.get('y', 0)
148 | angle = offsets.get('angle', 0)
149 | except:
150 | offset_x = 0
151 | offset_y = 0
152 | angle = 0
153 |
154 | # Ограничиваем смещение
155 | offset_x = max(-MAX_OFFSET_X_PX, min(offset_x, MAX_OFFSET_X_PX))
156 | offset_y = max(-MAX_OFFSET_Y_PX, min(offset_y, MAX_OFFSET_Y_PX))
157 |
158 | # Масштабируем в тики (по отдельности для X и Y)
159 | def scale_offset_x_to_ticks(offset_px):
160 | return int(offset_px * MAX_DEFLECTION_TICKS / MAX_OFFSET_X_PX)
161 |
162 | def scale_offset_y_to_ticks(offset_px):
163 | return int(offset_px * MAX_DEFLECTION_TICKS / MAX_OFFSET_Y_PX)
164 |
165 | roll_ticks = scale_offset_x_to_ticks(offset_x)
166 | pitch_ticks = scale_offset_y_to_ticks(offset_y)
167 |
168 | channels_old[0] = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + roll_ticks))
169 | channels_old[1] = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + pitch_ticks))
170 |
171 | if angle < -5 or angle > 5: # мёртвая зона, чтобы не дёргался на шум
172 | yaw_error = angle # компенсируем поворот (если угол > 0, надо крутить вправо)
173 |
174 | yaw_error_limited = max(-30, min(30, yaw_error)) # ограничим диапазон
175 | yaw_normalized = yaw_error_limited / 30.0 # [-1.0 ... 1.0]
176 |
177 | yaw_ticks = int(yaw_normalized * MAX_DEFLECTION_TICKS)
178 | yaw_channel = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + yaw_ticks))
179 |
180 | channels_old[3] = yaw_channel
181 | else:
182 | channels_old[3] = CENTER_TICKS
183 |
184 | packed_channels = pack_channels(channels_old)
185 | data_without_crc_old[3:25] = packed_channels
186 | crc = crc8(data_without_crc_old[2:25])
187 | updated_data = data_without_crc_old + [crc]
188 | uart4.write(bytes(updated_data))
189 |
190 | #logging.info(f"HEAD: {current_heading:.1f}, ΔH={yaw_error:.1f}°")
191 |
192 | global is_thread_running
193 | is_thread_running = False
194 |
195 | # Функция для запуска потока обновления RC каналов
196 | def start_update_rc_channels_thread(channels_old, uart4, data_without_crc_old):
197 | global is_thread_running # нужно явно указать, что используем глобальную переменную
198 | if not is_thread_running:
199 | stop_event.clear()
200 | update_thread = threading.Thread(
201 | target=update_rc_channels_in_background,
202 | args=(channels_old, uart4, data_without_crc_old)
203 | )
204 | update_thread.daemon = True # Поток завершится при завершении основного потока
205 | update_thread.start()
206 | is_thread_running = True # Устанавливаем флаг после запуска
207 |
208 | # Функция для обновления RC каналов
209 | def update_rc_channels(data, uart4):
210 | global channels_old, data_without_crc_old, is_thread_running, throttle_start
211 |
212 | if len(data) < 26:
213 | #print(f"❌ Недостаточно данных: {len(data)} байт, нужно минимум 26.")
214 | return data
215 |
216 | data_without_crc = data[:-1] # Без последнего байта CRC
217 | channels = extract_channels(data_without_crc[3:25])
218 |
219 | if len(channels) < 16:
220 | #print(f"❌ Недостаточно каналов для обновления. Найдено {len(channels)} каналов.")
221 | return
222 |
223 | #print(f"Канал 11: {channels[11]}") # Активность канала для контроля
224 |
225 | # Если канал 11 больше 1700 и поток еще не запущен, запускаем его
226 | if channels[11] > 1700:
227 | if not is_thread_running:
228 | set_tracking(True)
229 | channels_old = channels.copy()
230 | data_without_crc_old = data_without_crc
231 | throttle_start = channels[2] # Запоминаем стартовое значение газа
232 | start_update_rc_channels_thread(channels_old, uart4, data_without_crc_old)
233 |
234 | else:
235 | stop_event.set()
236 | set_tracking(False)
237 |
238 | # Вот здесь добавляем логику сохранения газа:
239 | if throttle_start is not None and throttle_start > channels[2] :
240 | # Обновляем channels так, чтобы throttle не упал ниже стартового
241 | channels[2] = max(channels[2], throttle_start)
242 |
243 | # Собираем пакет обратно
244 | packed_channels = pack_channels(channels)
245 | new_data_without_crc = data_without_crc.copy()
246 | new_data_without_crc[3:25] = packed_channels
247 | crc = crc8(new_data_without_crc[2:25])
248 | new_packet = new_data_without_crc + [crc]
249 |
250 | uart4.write(bytes(new_packet))
251 | else:
252 | uart4.write(bytes(data))
253 | # Очистка переменных
254 | channels_old = None
255 | data_without_crc_old = None
256 | throttle_start = None
257 |
258 | # Функция для форвардинга пакетов
259 | def uart_forwarder(uart3, uart4):
260 | global is_thread_running
261 | packet_buffer = []
262 |
263 | while True:
264 | try:
265 | # Чтение данных из uart3
266 | data = uart3.read(512)
267 | if not data:
268 | continue
269 |
270 | packet_buffer.extend(data)
271 | # Обрабатываем пакеты
272 | while len(packet_buffer) >= 4:
273 | try:
274 | # Проверяем начало пакета
275 | if packet_buffer[0] != 0xC8:
276 | #print(f"❌ Неправильный байт начала пакета: {packet_buffer[0]:02x}")
277 | packet_buffer.pop(0)
278 | continue
279 |
280 | length = packet_buffer[1] # Длина пакета из второго байта
281 | print(f"Ожидаемая длина пакета: {length}")
282 |
283 | if len(packet_buffer) < length + 2:
284 | #print("❌ Пакет неполный, ожидаем дополнительные данные...")
285 | break
286 |
287 | packet = packet_buffer[:length + 2]
288 | packet_buffer = packet_buffer[length + 2:]
289 |
290 | #print(f"Получен пакет: {' '.join(f'{x:02x}' for x in packet)}")
291 |
292 | if packet[2] == 0x16: # Проверка на тип пакета
293 | update_rc_channels(packet, uart4)
294 | else:
295 | if not is_thread_running:
296 | uart4.write(bytes(packet))
297 | #print(f"Записано байтов в UART4: {len(packet)}")
298 |
299 | except Exception as e:
300 | logging.error(f"❌ Ошибка при обработке пакета: {e}")
301 | # Очистка буфера для предотвращения зависания
302 | packet_buffer.clear()
303 |
304 | except Exception as e:
305 | logging.error(f"❌ Ошибка при чтении данных с UART3: {e}")
306 |
307 | # Основная функция
308 | def main():
309 | logging.info("🚀 Запуск UART forwarder...")
310 | set_tracking(False)
311 | uart3 = serial.Serial('/dev/ttyS3', 115200, timeout=0) # Настройте нужную скорость
312 | uart4 = serial.Serial('/dev/ttyS4', 420000, timeout=0) # Настройте нужную скорость
313 |
314 | uart_forwarder(uart3, uart4)
315 |
316 | uart3.close()
317 | uart4.close()
318 |
319 | if __name__ == "__main__":
320 | main()
--------------------------------------------------------------------------------
/uartTOuart.py:
--------------------------------------------------------------------------------
1 | import serial
2 | import time
3 | import struct
4 | import os
5 | import random
6 | import json
7 | import logging
8 | import math
9 | import threading
10 | from pymavlink import mavutil
11 | # Настройка логгера
12 | logging.basicConfig(
13 | level=logging.INFO, # Уровень логирования (можно DEBUG для более подробного вывода)
14 | format='%(asctime)s [%(levelname)s] %(message)s',
15 | handlers=[
16 | logging.FileHandler("/home/orangepi/Documents/YOLO/uart_forwarder.log"), # Лог в файл
17 | logging.StreamHandler() # Также вывод в консоль
18 | ]
19 | )
20 |
21 | crc8tab = [
22 | 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54, 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
23 | 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06, 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
24 | 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0, 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
25 | 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2, 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
26 | 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9, 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
27 | 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B, 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
28 | 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D, 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
29 | 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F, 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
30 | 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB, 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
31 | 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9, 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
32 | 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F, 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
33 | 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D, 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
34 | 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26, 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
35 | 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74, 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
36 | 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82, 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
37 | 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0, 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
38 | ]
39 |
40 | channels_old = None
41 | data_without_crc_old = None
42 | speed_old = None
43 | correction_active = False
44 | # Объект события для остановки потока
45 | stop_event = threading.Event()
46 | # Флаг для проверки, запущен ли поток
47 | is_thread_running = False
48 | is_calibrating = False
49 |
50 | current_altitude = 0.0
51 | climb = 0.0
52 |
53 | from smbus import SMBus
54 | bus = SMBus(5)
55 |
56 | def crc8(data):
57 | crc = 0
58 | for byte in data:
59 | crc = crc8tab[crc ^ byte]
60 | return crc
61 |
62 | def calibrate_qmc5883l(duration_sec=30, delay=0.01):
63 | global bus
64 | QMC5883L_ADDR = 0x0D
65 |
66 | min_x = float('inf')
67 | max_x = float('-inf')
68 | min_y = float('inf')
69 | max_y = float('-inf')
70 | min_z = float('inf')
71 | max_z = float('-inf')
72 |
73 | start_time = time.time()
74 | while time.time() - start_time < duration_sec:
75 | data = bus.read_i2c_block_data(QMC5883L_ADDR, 0x00, 6)
76 | mag_x = (data[1] << 8) | data[0]
77 | mag_y = (data[3] << 8) | data[2]
78 | mag_z = (data[5] << 8) | data[4]
79 |
80 | if mag_x >= 32768: mag_x -= 65536
81 | if mag_y >= 32768: mag_y -= 65536
82 | if mag_z >= 32768: mag_z -= 65536
83 |
84 | min_x = min(min_x, mag_x)
85 | max_x = max(max_x, mag_x)
86 | min_y = min(min_y, mag_y)
87 | max_y = max(max_y, mag_y)
88 | min_z = min(min_z, mag_z)
89 | max_z = max(max_z, mag_z)
90 |
91 | time.sleep(delay)
92 |
93 | offset_x = -((max_x + min_x) // 2)
94 | offset_y = -((max_y + min_y) // 2)
95 | offset_z = -((max_z + min_z) // 2)
96 |
97 | with open('mag_offsets.json', 'w') as f:
98 | json.dump({"x": offset_x, "y": offset_y, "z": offset_z}, f)
99 |
100 | print(f"[✔] Калибровка QMC5883L завершена. Смещения: x={offset_x}, y={offset_y}, z={offset_z}")
101 |
102 | # Function to pack channels into the CRSF payload format (16 channels, 11 bits each)
103 | def pack_channels(channel_data):
104 | # channel data: array of 16 integers
105 | channel_data = list(reversed(channel_data))
106 | pack_bit = []
107 | for idx, channel in enumerate(channel_data):
108 | pack_bit[idx*11: (idx+1)*11] = "{0:011b}".format(channel)
109 | pack_bit=''.join(pack_bit)
110 | pack_byte = []
111 | for idx in range(22):
112 | current_byte = int(pack_bit[idx*8:(idx+1)*8], 2)
113 | pack_byte.append(current_byte)
114 | pack_byte = list(reversed(pack_byte))
115 | return pack_byte
116 |
117 | # Function to extract channels from the CRSF payload (22 bytes representing 16 channels)
118 | def extract_channels(data):
119 | channels = []
120 | if len(data) != 22: # CRSF packed channel data is 22 bytes
121 | return channels
122 |
123 | # Convert bytes to binary string
124 | bits = ''.join(format(byte, '08b')[::-1] for byte in data)
125 |
126 | # Extract 11-bit channel values
127 | for i in range(16): # CRSF supports up to 16 channels
128 | start = i * 11
129 | end = start + 11
130 | if end <= len(bits):
131 | channel_bits = bits[start:end][::-1]
132 | channel_value = int(channel_bits, 2)
133 | channels.append(channel_value)
134 |
135 | return channels
136 |
137 | def update_rc_channels_in_background(channels_old, uart4, data_without_crc_old):
138 | import json
139 | import logging
140 | import time
141 | import math
142 |
143 | global bus
144 |
145 | # Адреса датчиков
146 | QMC5883L_ADDR = 0x0D
147 | MAGNETIC_DECLINATION = 8.2667
148 | # CRSF параметры
149 | CENTER_TICKS = 992
150 | MIN_TICKS = 172
151 | MAX_TICKS = 1811
152 |
153 | FRAME_SIZE = 320
154 | MAX_OFFSET_PX = FRAME_SIZE // 2
155 | MAX_DEFLECTION_US = 300
156 | MAX_DEFLECTION_TICKS = int(MAX_DEFLECTION_US * 8 / 5)
157 |
158 | ALT_SCALE = 100
159 | CLIMB_SCALE = 100
160 |
161 | P_int = 4 # 400 / 100
162 | D_int = 1 # 100 / 100
163 | FF_int = 2 # 200 / 100
164 |
165 | def calculate_heading():
166 | # Загружаем калибровочные смещения
167 | try:
168 | with open('mag_offsets.json', 'r') as f:
169 | offsets = json.load(f)
170 | mag_offset_x = offsets.get('x', 0)
171 | mag_offset_y = offsets.get('y', 0)
172 | except Exception:
173 | mag_offset_x = 0
174 | mag_offset_y = 0
175 |
176 | # Читаем 6 байт данных магнитометра
177 | data = bus.read_i2c_block_data(QMC5883L_ADDR, 0x00, 6)
178 | mag_x = (data[1] << 8) | data[0]
179 | mag_y = (data[3] << 8) | data[2]
180 |
181 | # Преобразуем в signed int
182 | if mag_x >= 32768: mag_x -= 65536
183 | if mag_y >= 32768: mag_y -= 65536
184 |
185 | # Применяем калибровку
186 | mag_x += mag_offset_x
187 | mag_y += mag_offset_y
188 |
189 | # Вычисляем азимут (heading) в градусах
190 | heading = math.atan2(mag_y, mag_x) * 180 / math.pi
191 | if heading < 0:
192 | heading += 360
193 |
194 | # Применяем магнитное склонение (восточное → +)
195 | heading += MAGNETIC_DECLINATION
196 | if heading >= 360:
197 | heading -= 360
198 |
199 | return heading
200 |
201 | def heading_diff(desired, current):
202 | return (desired - current + 540) % 360 - 180
203 |
204 | # 🌐 Начальные значения
205 | desired_heading = calculate_heading()
206 |
207 | #initial_throttle = channels_old[2]
208 | initial_yaw = channels_old[3]
209 |
210 | while not stop_event.is_set():
211 | # Чтение сенсоров
212 | current_heading = calculate_heading()
213 |
214 | try:
215 | with open('offsets.json', 'r') as f:
216 | offsets = json.load(f)
217 | offset_x = offsets.get('x', 0)
218 | offset_y = offsets.get('y', 0)
219 | except:
220 | offset_x = 0
221 | offset_y = 0
222 |
223 | offset_x = max(-MAX_OFFSET_PX, min(offset_x, MAX_OFFSET_PX))
224 | offset_y = max(-MAX_OFFSET_PX, min(offset_y, MAX_OFFSET_PX))
225 |
226 | def scale_offset_to_ticks(offset_px):
227 | return int(offset_px * MAX_DEFLECTION_TICKS / MAX_OFFSET_PX)
228 |
229 | roll_ticks = scale_offset_to_ticks(offset_x)
230 | pitch_ticks = scale_offset_to_ticks(offset_y)
231 |
232 | channels_old[0] = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + roll_ticks))
233 | channels_old[1] = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + pitch_ticks))
234 | """
235 | error = desired_altitude_int - current_altitude_int
236 | p_out = P_int * error
237 | d_out = -D_int * climb_int
238 | ff_out = FF_int * climb_int
239 | offset = p_out + d_out + ff_out
240 | offset = max(-MAX_DEFLECTION_TICKS, min(MAX_DEFLECTION_TICKS, offset))
241 |
242 | throttle = initial_throttle + offset
243 | throttle = max(MIN_TICKS, min(MAX_TICKS, throttle))
244 | channels_old[2] = throttle
245 | """
246 | yaw_error = max(-30, min(30, heading_diff(desired_heading, current_heading)))
247 |
248 | if abs(yaw_error) > 15:
249 | yaw_normalized = max(-1.0, min(1.0, yaw_error / 30.0))
250 | yaw_ticks = int(yaw_normalized * MAX_DEFLECTION_TICKS)
251 | yaw_channel = max(MIN_TICKS, min(MAX_TICKS, CENTER_TICKS + yaw_ticks))
252 | channels_old[3] = yaw_channel
253 | else:
254 | channels_old[3] = CENTER_TICKS
255 |
256 | packed_channels = pack_channels(channels_old)
257 | data_without_crc_old[3:25] = packed_channels
258 | crc = crc8(data_without_crc_old[2:25])
259 | updated_data = data_without_crc_old + [crc]
260 | uart4.write(bytes(updated_data))
261 |
262 | #logging.info(f"HEAD: {current_heading:.1f}, ΔH={yaw_error:.1f}°")
263 |
264 | global is_thread_running
265 | is_thread_running = False
266 |
267 | # Функция для запуска потока обновления RC каналов
268 | def start_update_rc_channels_thread(channels_old, uart4, data_without_crc_old):
269 | global is_thread_running # нужно явно указать, что используем глобальную переменную
270 | if not is_thread_running:
271 | stop_event.clear()
272 | update_thread = threading.Thread(
273 | target=update_rc_channels_in_background,
274 | args=(channels_old, uart4, data_without_crc_old)
275 | )
276 | update_thread.daemon = True # Поток завершится при завершении основного потока
277 | update_thread.start()
278 | is_thread_running = True # Устанавливаем флаг после запуска
279 |
280 | def calibrate_thread():
281 | global is_calibrating
282 | is_calibrating = True
283 | calibrate_qmc5883l(duration_sec=30, delay=0.01)
284 | is_calibrating = False
285 |
286 | # Функция для обновления RC каналов
287 | def update_rc_channels(data, uart4):
288 | global channels_old, data_without_crc_old, is_thread_running, is_calibrating
289 |
290 | if len(data) < 26:
291 | #print(f"❌ Недостаточно данных: {len(data)} байт, нужно минимум 26.")
292 | return data
293 |
294 | data_without_crc = data[:-1] # Без последнего байта CRC
295 | channels = extract_channels(data_without_crc[3:25])
296 |
297 | if len(channels) < 16:
298 | #print(f"❌ Недостаточно каналов для обновления. Найдено {len(channels)} каналов.")
299 | return
300 |
301 | #print(f"Канал 11: {channels[11]}") # Активность канала для контроля
302 |
303 | # Если канал 11 больше 1700 и поток еще не запущен, запускаем его
304 | if channels[11] > 1700:
305 | if not is_thread_running: # Если поток еще не запущен
306 | if channels_old is None:
307 | channels_old = channels.copy()
308 | if data_without_crc_old is None:
309 | data_without_crc_old = data_without_crc
310 | # Запускаем поток для обновления каналов в фоне
311 | start_update_rc_channels_thread(channels_old, uart4, data_without_crc_old)
312 | elif channels[3] > 1700 and channels[0] < 300 and not is_calibrating:
313 | t = threading.Thread(target=calibrate_thread)
314 | t.start()
315 | # Завершаем выполнение, если канал 11 меньше или равен 1700
316 | else:
317 | stop_event.set()
318 | channels_old = None
319 | data_without_crc_old = None
320 | #print(f"Канал 11 меньше или равен 1700")
321 | uart4.write(bytes(data))
322 |
323 | # Функция для форвардинга пакетов
324 | def uart_forwarder(uart3, uart4):
325 | global is_thread_running
326 | packet_buffer = []
327 |
328 | while True:
329 | try:
330 | # Чтение данных из uart3
331 | data = uart3.read(512)
332 | if not data:
333 | continue
334 |
335 | packet_buffer.extend(data)
336 | # Обрабатываем пакеты
337 | while len(packet_buffer) >= 4:
338 | try:
339 | # Проверяем начало пакета
340 | if packet_buffer[0] != 0xC8:
341 | #print(f"❌ Неправильный байт начала пакета: {packet_buffer[0]:02x}")
342 | packet_buffer.pop(0)
343 | continue
344 |
345 | length = packet_buffer[1] # Длина пакета из второго байта
346 | print(f"Ожидаемая длина пакета: {length}")
347 |
348 | if len(packet_buffer) < length + 2:
349 | #print("❌ Пакет неполный, ожидаем дополнительные данные...")
350 | break
351 |
352 | packet = packet_buffer[:length + 2]
353 | packet_buffer = packet_buffer[length + 2:]
354 |
355 | #print(f"Получен пакет: {' '.join(f'{x:02x}' for x in packet)}")
356 |
357 | if packet[2] == 0x16: # Проверка на тип пакета
358 | update_rc_channels(packet, uart4)
359 | else:
360 | if not is_thread_running:
361 | uart4.write(bytes(packet))
362 | #print(f"Записано байтов в UART4: {len(packet)}")
363 |
364 | except Exception as e:
365 | logging.error(f"❌ Ошибка при обработке пакета: {e}")
366 | # Очистка буфера для предотвращения зависания
367 | packet_buffer.clear()
368 |
369 | except Exception as e:
370 | logging.error(f"❌ Ошибка при чтении данных с UART3: {e}")
371 |
372 | # Основная функция
373 | def main():
374 | logging.info("🚀 Запуск UART forwarder...")
375 |
376 | uart3 = serial.Serial('/dev/ttyS3', 115200, timeout=0) # Настройте нужную скорость
377 | uart4 = serial.Serial('/dev/ttyS4', 420000, timeout=0) # Настройте нужную скорость
378 |
379 | uart_forwarder(uart3, uart4)
380 |
381 | uart3.close()
382 | uart4.close()
383 |
384 | if __name__ == "__main__":
385 | main()
--------------------------------------------------------------------------------