├── .gitignore
├── License.md
├── README.md
├── apriltag_detector.service
├── camera_params
├── Logitech_Webcam.json
├── ProBook_11_G2_Webcam.json
├── Small_USB_Camera.json
└── Thinkpad_T490.json
├── cameras.json
├── detector.json
├── environment.json
├── images
└── calibration_chessboard.jpeg
├── requirements.txt
├── setup.sh
├── src
├── cameras.py
├── driver_station.py
├── gui.py
├── main.py
├── messenger.py
├── quaternions.py
├── shufflelog_api.py
├── solver.py
└── tag_tracker.py
├── templates
├── fail.html
├── form.html
├── index.html
└── pass.html
└── tools
├── calibrate_camera.py
└── camera_port_scanner.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__/
2 | *.py[cod]
3 | Tracker.Log
4 | .installer_config
5 |
--------------------------------------------------------------------------------
/License.md:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 SouthwestRoboticsProgramming
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Apriltag Tracker for FRC
2 |
3 | ## This is no longer actively maintained
4 | Use [TagTracker-v2](https://github.com/SouthwestRoboticsProgramming/TagTracker-v2) instead.
5 |
6 | TagTracker is the first FRC implementation of the AprilTag standard capable of full robot localization. It achieves this by combining estimated poses of multiple tags, even across multiple cameras!
7 |
8 | This project was made public by FRC Team 2129, Ultraviolet.
9 | This project was modified by FRC Team 3620, The Average Joes.
10 |
11 | To put this on your own robot, check out the [wiki](https://github.com/SouthwestRoboticsProgramming/TagTracker/wiki)!
12 |
--------------------------------------------------------------------------------
/apriltag_detector.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Apriltag Detector Service
3 |
4 | [Service]
5 | Type=simple
6 | WorkingDirectory = %p
7 | ExecStart=/usr/bin/python3 %s
8 |
9 | [Install]
10 | WantedBy=multi-user.target
11 |
--------------------------------------------------------------------------------
/camera_params/Logitech_Webcam.json:
--------------------------------------------------------------------------------
1 | {
2 | "fx": 676.6192195641298,
3 | "fy": 676.8359339562655,
4 | "cx": 385.1137834870396,
5 | "cy": 201.81402152233636,
6 | "dist": [ 0.01632932, -0.36390723, -0.01638719, 0.02577886, 0.93133364]
7 | }
8 |
--------------------------------------------------------------------------------
/camera_params/ProBook_11_G2_Webcam.json:
--------------------------------------------------------------------------------
1 | {
2 | "fx": 595.7487947787047,
3 | "fy": 595.9156142635239,
4 | "cx": 309.56098609995973,
5 | "cy": 248.43377626597933
6 | }
--------------------------------------------------------------------------------
/camera_params/Small_USB_Camera.json:
--------------------------------------------------------------------------------
1 | {
2 | "fx": 671.7353387615264,
3 | "fy": 676.3792450156916,
4 | "cx": 378.77904795628274,
5 | "cy": 272.38729137395103,
6 | "dist": [-0.10392609, -0.63257957, -0.01078108, 0.00174896, 0.90013624]
7 | }
--------------------------------------------------------------------------------
/camera_params/Thinkpad_T490.json:
--------------------------------------------------------------------------------
1 | {
2 | "fx" : 559.7699205140387,
3 | "fy" : 561.9898855409883,
4 | "cx" : 368.2361920292215,
5 | "cy" : 225.225099916301
6 | }
--------------------------------------------------------------------------------
/cameras.json:
--------------------------------------------------------------------------------
1 | {
2 | "cameras" : [
3 |
4 | {
5 | "name": "USB Webcam",
6 | "type": "Small_USB_Camera",
7 | "port": 2,
8 | "robot_pose": [
9 | [1, 0, 0, 0.5],
10 | [0, -1, 0, 0],
11 | [0, 0, -1, 0],
12 | [0, 0, 0, 1]
13 | ]
14 | }
15 | ]
16 | }
--------------------------------------------------------------------------------
/detector.json:
--------------------------------------------------------------------------------
1 | {
2 | "families" : "tag16h5",
3 | "border" : 1,
4 | "nthreads" : 4,
5 | "quad_decimate" : 1,
6 | "quad_blur" : 0,
7 | "quad_sigma" : 1,
8 | "refine_edges" : true,
9 | "refine_decode" : false,
10 | "refine_pose" : false,
11 | "debug" : false,
12 | "quad_contours" : true
13 | }
14 |
--------------------------------------------------------------------------------
/environment.json:
--------------------------------------------------------------------------------
1 | {
2 | "tag_family" : "tag16h5",
3 |
4 | "tags" : [
5 | {
6 | "size": 0.2032,
7 | "id": 1,
8 | "transform": [
9 | -1,
10 | 0,
11 | 0,
12 | 7.24310,
13 | 0,
14 | -1,
15 | 0,
16 | -2.93659,
17 | 0,
18 | 0,
19 | 1,
20 | 0.46272,
21 | 0,
22 | 0,
23 | 0,
24 | 1
25 | ]
26 | },
27 |
28 | {
29 | "size": 0.2032,
30 | "id": 2,
31 | "transform": [
32 | -1,
33 | 0,
34 | 0,
35 | 7.24310,
36 | 0,
37 | -1,
38 | 0,
39 | -1.26019,
40 | 0,
41 | 0,
42 | 1,
43 | 0.46272,
44 | 0,
45 | 0,
46 | 0,
47 | 1
48 | ]
49 | },
50 | {
51 | "size": 0.2032,
52 | "id": 3,
53 | "transform": [
54 | -1,
55 | 0,
56 | 0,
57 | 7.24310,
58 | 0,
59 | -1,
60 | 0,
61 | 0.41621,
62 | 0,
63 | 0,
64 | 1,
65 | 0.46272,
66 | 0,
67 | 0,
68 | 0,
69 | 1
70 | ]
71 | },
72 | {
73 | "size": 0.2032,
74 | "id": 4,
75 | "transform": [
76 | -1,
77 | 0,
78 | 0,
79 | 7.90832,
80 | 0,
81 | -1,
82 | 0,
83 | 2.74161,
84 | 0,
85 | 0,
86 | 1,
87 | 0.695452,
88 | 0,
89 | 0,
90 | 0,
91 | 1
92 | ]
93 | },
94 | {
95 | "size": 0.2032,
96 | "id": 5,
97 | "transform": [
98 | 1,
99 | 0,
100 | 0,
101 | -7.90832,
102 | 0,
103 | 1,
104 | 0,
105 | 2.74161,
106 | 0,
107 | 0,
108 | 1,
109 | 0.695452,
110 | 0,
111 | 0,
112 | 0,
113 | 1
114 | ]
115 | },
116 | {
117 | "size": 0.2032,
118 | "id": 6,
119 | "transform": [
120 | 1,
121 | 0,
122 | 0,
123 | -7.24310,
124 | 0,
125 | 1,
126 | 0,
127 | 0.41621,
128 | 0,
129 | 0,
130 | 1,
131 | 0.46272,
132 | 0,
133 | 0,
134 | 0,
135 | 1
136 | ]
137 | },
138 | {
139 | "size": 0.2032,
140 | "id": 7,
141 | "transform": [
142 | 1,
143 | 0,
144 | 0,
145 | -7.24310,
146 | 0,
147 | 1,
148 | 0,
149 | -1.26019,
150 | 0,
151 | 0,
152 | 1,
153 | 0.46272,
154 | 0,
155 | 0,
156 | 0,
157 | 1
158 | ]
159 | },
160 | {
161 | "size": 0.2032,
162 | "id": 8,
163 | "transform": [
164 | 1,
165 | 0,
166 | 0,
167 | -7.24310,
168 | 0,
169 | 1,
170 | 0,
171 | -2.93659,
172 | 0,
173 | 0,
174 | 1,
175 | 0.46272,
176 | 0,
177 | 0,
178 | 0,
179 | 1
180 | ]
181 | }
182 | ]
183 | }
184 |
--------------------------------------------------------------------------------
/images/calibration_chessboard.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/SouthwestRoboticsProgramming/TagTracker/ca083e8371f788274abc2b86a0db52e9e9d3f3e5/images/calibration_chessboard.jpeg
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | apriltag==0.0.16
2 | numpy==1.22.3
3 | opencv-contrib-python==4.5.5.64
4 | pynetworktables==2021.0.0
5 | imutils==0.5.4
6 |
--------------------------------------------------------------------------------
/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "[INFO] Starting setup"
4 |
5 | yes | sudo apt-get install libgl1-mesa-glx libegl1-mesa libxrandr2 libxrandr2 libxss1 libxcursor1 libxcomposite1 libasound2 libxi6 libxtst6
6 |
7 | echo "[INFO] Finished installing Anaconda for python environment"
8 |
9 | yes n | conda create -n apriltags python=3.8
10 |
11 | echo "[INFO] Created environment for python"
12 |
13 | conda init bash
14 | conda activate apriltags
15 |
16 | pip install -r requirements.txt
17 |
18 | echo "[INFO] Installed dependencies"
19 |
--------------------------------------------------------------------------------
/src/cameras.py:
--------------------------------------------------------------------------------
1 | from threading import Thread
2 | import cv2
3 | import numpy as np
4 | import json
5 | from main import logger
6 | import time
7 |
8 | class Camera:
9 | def __init__(self, camera_options):
10 | # Extract indevidual values
11 | camera_port = camera_options['port']
12 | self.name = camera_options['name']
13 | self.robot_position = camera_options['robot_pose']
14 | self.is_driver = camera_options.get('driver') is not None
15 |
16 | # Extract params JSON
17 | try:
18 | location = f"camera_params/{camera_options['type']}.json"
19 | params_json = open(location, 'r')
20 | params = json.load(params_json)
21 | logger.info(f"Camera params JSON loaded for {self.name}")
22 | except (FileNotFoundError, json.JSONDecodeError) as e:
23 | logger.exception("Could not open camera parameters JSON, qutting")
24 | raise FileNotFoundError("Could not open cameara params JSON '{}' for {}, \
25 | is the path relative to /camera_params?".format(camera_options['type'], self.name)) from e
26 |
27 | params_json.close()
28 |
29 | # Convert params to tuple
30 | self.camera_params = (params['fx'], params['fy'], params['cx'], params['cy'])
31 | self.distortion = params['dist']
32 | self.matrix = np.array([
33 | self.camera_params[0], 0, self.camera_params[2],
34 | 0, self.camera_params[1], self.camera_params[3],
35 | 0, 0, 1
36 | ]).reshape(3, 3)
37 |
38 |
39 | self.capture = cv2.VideoCapture(camera_port)
40 |
41 | # def read(self, return_list=None, return_index=None):
42 | # read_value = self.capture.read()
43 |
44 | # if not return_list: # If used outside of multithreaded camera system
45 | # return read_value
46 |
47 | # return_list[return_index] = read_value
48 |
49 | # Takes a queue, when reading the queue, you are reading the most up-to-date image
50 | def start_reader(self, images_list, list_index):
51 | while True:
52 | ret, frame = self.capture.read() # Read the camera
53 |
54 | if not ret:
55 | images_list[list_index] = ((ret, frame), self)
56 | continue
57 |
58 | height, width = frame.shape[:2]
59 | dist_coeffs = np.array(self.distortion)
60 | new_mtx, roi = cv2.getOptimalNewCameraMatrix(self.matrix, dist_coeffs, (width, height), 1, (width, height))
61 |
62 | undistorted = cv2.undistort(frame, new_mtx, dist_coeffs, None, self.matrix)
63 | crop_x, crop_y, crop_w, crop_h = roi
64 | undistorted = undistorted[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w]
65 |
66 | images_list[list_index] = ((ret, frame), self)
67 |
68 | def release(self):
69 | self.capture.release()
70 |
71 |
72 | class CameraArray: # Multithread frame captures
73 | def __init__(self, logger, camera_list):
74 | # Put together a list of cameras
75 | self.camera_list = camera_list
76 |
77 | if not self.camera_list:
78 | logger.error("No cameras defined! Quitting")
79 | raise ValueError("No cameras defined in camera array!")
80 |
81 | self.image_list = []
82 | self.threads = []
83 |
84 | # Create threads for each camera
85 | for i, camera in enumerate(self.camera_list):
86 | self.image_list.append(None) # Add something for the thread to edit
87 | self.threads.append(Thread(target=camera.start_reader, args=(self.image_list, i,)))
88 | self.threads[i].start()
89 |
90 | def read_cameras(self): # Returns map of images to camera that they came from
91 | final_images = []
92 |
93 | # Filter out failed image captures and log them
94 | for read_image in self.image_list:
95 |
96 | if not read_image:
97 | continue
98 |
99 | image, camera = read_image
100 |
101 | ret, frame = image # Extract image into ret and the frame
102 |
103 | if not ret: # If the image couldn't be captured
104 | logger.error(f"{camera.name} failed to capture an image")
105 | else: # Otherwise, remove the ret and leave just the image
106 | final_images.append({
107 | 'image': frame, # Remove ret
108 | 'camera': camera
109 | })
110 | return final_images
111 |
112 | def getParams(self):
113 | params = []
114 |
115 | for camera in self.camera_list:
116 | # Convert dictionary to tuple
117 | camera_params_tuple = tuple(camera.camera_params.values())
118 | params.append(camera_params_tuple)
119 |
120 | return params
121 |
122 | def release_cameras(self):
123 | for camera in self.camera_list:
124 | camera.release()
125 |
--------------------------------------------------------------------------------
/src/driver_station.py:
--------------------------------------------------------------------------------
1 | from imutils import build_montages
2 | import numpy as np
3 | import cv2
4 |
5 | # WIP Feature, not currently used.
6 |
7 | # data_dict holds both images and the camera that took them
8 | def get_driver_frame(data_dict):
9 | # Just get the cameras marked as driver
10 | drivers = [data['image'] for data in data_dict if data['camera'].is_driver]
11 |
12 | # TODO: No drivers?
13 | if drivers:
14 | # Arrange the images into one collage
15 | collage = create_collage(drivers)
16 | collage = cv2.resize(collage, (50,50))
17 | else:
18 | # Just give back a blank image
19 | # TODO: Descriptive image
20 | collage = np.zeros(shape=[512,512,3], dtype=np.uint8)
21 | return collage
22 |
23 |
24 |
25 |
26 | def create_collage(images):
27 | # TODO: Magic rescaling
28 | montages = build_montages(images, (256, 256), (len(images),1))
29 | return montages[0]
--------------------------------------------------------------------------------
/src/gui.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | DRAW_GUI = True
5 |
6 | # TODO-Ryan: Don't draw pose too and just make it one function
7 | def draw_bounding_box(overlay, detector_result, camera_params, pose):
8 | # Copied from AprilTag source
9 |
10 | # Extract the bounding box (x, y)-coordinates for the AprilTag
11 | # and convert each of the (x, y)-coordinate pairs to integers
12 | (ptA, ptB, ptC, ptD) = detector_result.corners
13 | ptB = (int(ptB[0]), int(ptB[1]))
14 | ptC = (int(ptC[0]), int(ptC[1]))
15 | ptD = (int(ptD[0]), int(ptD[1]))
16 | ptA = (int(ptA[0]), int(ptA[1]))
17 |
18 | # draw the bounding box of the AprilTag detection
19 | cv2.line(overlay, ptA, ptB, (0, 255, 0), 2)
20 | cv2.line(overlay, ptB, ptC, (0, 255, 0), 2)
21 | cv2.line(overlay, ptC, ptD, (0, 255, 0), 2)
22 | cv2.line(overlay, ptD, ptA, (0, 255, 0), 2)
23 |
24 | # draw the center (x, y)-coordinates of the AprilTag
25 | (cX, cY) = (int(detector_result.center[0]), int(detector_result.center[1]))
26 | cv2.circle(overlay, (cX, cY), 5, (0, 0, 255), -1)
27 |
28 | # draw the tag family on the overlay
29 | tagFamily = detector_result.tag_family.decode("utf-8")
30 | cv2.putText(overlay, tagFamily, (ptA[0], ptA[1] - 15),
31 | cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
32 |
33 | # draw the tag id on the overlay
34 | tagId = str(detector_result.tag_id)
35 | cv2.putText(overlay, tagId, ((cX, cY)),
36 | cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 2)
37 |
38 | draw_cube(overlay, camera_params, 1, pose)
39 |
40 | # Not done TODO: Finish
41 | def draw_cube(overlay, camera_params, tag_size, pose, z_sign=1):
42 |
43 | opoints = np.array([
44 | -1, -1, 0,
45 | 1, -1, 0,
46 | 1, 1, 0,
47 | -1, 1, 0,
48 | -1, -1, -2*z_sign,
49 | 1, -1, -2*z_sign,
50 | 1, 1, -2*z_sign,
51 | -1, 1, -2*z_sign,
52 | ]).reshape(-1, 1, 3) * 0.5*tag_size
53 |
54 | edges = np.array([
55 | 0, 1,
56 | 1, 2,
57 | 2, 3,
58 | 3, 0,
59 | 0, 4,
60 | 1, 5,
61 | 2, 6,
62 | 3, 7,
63 | 4, 5,
64 | 5, 6,
65 | 6, 7,
66 | 7, 4
67 | ]).reshape(-1, 2)
68 |
69 | fx, fy, cx, cy = camera_params
70 |
71 | K = np.array([fx, 0, cx, 0, fy, cy, 0, 0, 1]).reshape(3, 3)
72 |
73 | rvec, _ = cv2.Rodrigues(pose[:3,:3])
74 | tvec = pose[:3, 3]
75 |
76 | dcoeffs = np.zeros(5)
77 |
78 | ipoints, _ = cv2.projectPoints(opoints, rvec, tvec, K, dcoeffs)
79 |
80 | ipoints = np.round(ipoints).astype(int)
81 |
82 | ipoints = [tuple(pt) for pt in ipoints.reshape(-1, 2)]
83 |
84 | for i, j in edges:
85 | cv2.line(overlay, ipoints[i], ipoints[j], (0, 255, 0), 1, 16)
86 |
--------------------------------------------------------------------------------
/src/main.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from networktables import NetworkTables
3 | import time
4 |
5 | # Get field data to improve logging
6 | fms = NetworkTables.getTable("FMSInfo")
7 | match_type = fms.getEntry('MatchType')
8 | match_number = fms.getEntry('MatchNumber')
9 |
10 | # Configure logging first so other files can use the same formatting
11 | LOG_FILENAME = 'Tracker.Log'
12 | TIME_FORMAT = f"%y %a %b {match_type.getString('Test Match')} {match_number.getString('0')}"
13 |
14 | LOG_FORMAT = '%(levelname)s %(asctime)s - %(message)s'
15 | logging.basicConfig(filename=LOG_FILENAME,
16 | level=logging.DEBUG,
17 | format=LOG_FORMAT,
18 | datefmt=TIME_FORMAT)
19 | logger = logging.getLogger()
20 |
21 |
22 | from argparse import ArgumentParser
23 | from tag_tracker import *
24 | from solver import *
25 | from shufflelog_api import ShuffleLogAPI
26 | from driver_station import get_driver_frame
27 | import json
28 |
29 | def main():
30 | # Create a parser to allow variable arguments
31 | parser = ArgumentParser(prog='AprilTag tracker',
32 | description='AprilTag tracker for FRC')
33 | parser.add_argument('-i', '--networktable_ip', type=str, default='localhost', metavar='', help='RoboRIO ip to access network tables')
34 | parser.add_argument('-e', '--environment', type=str, default='environment.json', metavar='', help='Path to environment definition JSON')
35 | parser.add_argument('-c', '--cameras', type=str, default='cameras.json', metavar='', help='Path to camera definition JSON')
36 | parser.add_argument('-d', '--detector', type=str, default='detector.json', metavar='', help='Path to detector definition JSON')
37 | parser.add_argument('-n', '--no_gui', action='store_true', help='Hide OpenCV gui.')
38 |
39 | args = parser.parse_args()
40 |
41 | # Configure NetworkTables
42 | networktable_ip = args.networktable_ip
43 | if networktable_ip:
44 | NetworkTables.initialize(server=networktable_ip)
45 | else:
46 | NetworkTables.initialize()
47 | # Tables to send back to RoboRIO and driver station
48 | table = NetworkTables.getTable("apriltag")
49 |
50 | # Extract environment JSON
51 | try:
52 | environment_json = open(args.environment, 'r')
53 | environment = json.load(environment_json)
54 | logger.info("Environment JSON loaded")
55 | except (FileNotFoundError, json.JSONDecodeError) as e:
56 | logger.exception("Could not open envionment JSON, quitting")
57 | raise FileNotFoundError(f"Could not open environment JSON '{args.environment}', is the path relative to /TagTracker/?") from e
58 |
59 |
60 | environment_json.close()
61 |
62 | # Exctract cameras JSON
63 | try:
64 | cameras_json = open(args.cameras, 'r')
65 | cameras = json.load(cameras_json)
66 | logger.info("Cameras JSON loaded")
67 | except (FileNotFoundError, json.JSONDecodeError) as exc:
68 | logger.exception("Could not open cameras JSON, quitting")
69 | raise FileNotFoundError(f"Could not open cameras JSON '{args.cameras}', is the path relative to /TagTracker?") from exc
70 |
71 |
72 | cameras_json.close()
73 |
74 | # Extract detector JSON
75 | try:
76 | detector_json = open(args.detector, 'r')
77 | detector = json.load(detector_json)
78 | logger.info("Detector JSON loaded")
79 | except (FileNotFoundError, json.JSONDecodeError) as err:
80 | logger.exception("Could not open detector JSON, qutting")
81 | raise FileNotFoundError(f"Could not open detector JSON '{args.detector}', is the path relative to /TagTracker?") from err
82 |
83 |
84 | detector_json.close()
85 |
86 | # Setup a detector with the JSON settings
87 | detector = Detector(logger, detector)
88 |
89 | camera_list = [Camera(camera_info) for camera_info in cameras['cameras']]
90 |
91 | # Setup a camera array with the JSON settings
92 | camera_array = CameraArray(logger, camera_list)
93 |
94 | # Create a solver to filter estimated positions
95 | # and localize robot
96 | solver = RobotPoseSolver(environment)
97 |
98 | # Initialize ShuffleLog API
99 | messenger_params = {
100 | 'host': 'localhost',
101 | 'port': 5805,
102 | 'name': 'TagTracker',
103 | 'mute_errors': True
104 | }
105 | api = ShuffleLogAPI(messenger_params, environment['tags'], cameras['cameras'])
106 |
107 |
108 | # Main loop, run all the time like limelight
109 | while True:
110 | tic = time.perf_counter();
111 | data = camera_array.read_cameras()
112 | detection_poses = detector.getPoses(data)
113 |
114 | position, matrices = solver.solve(detection_poses)
115 |
116 | if not args.no_gui:
117 | for i, image in enumerate(data):
118 | cv2.imshow(str(i), image['image'])
119 |
120 | # Send the solved position back to robot
121 | api.publish_test_matrices(matrices)
122 | if position is None: position = [0, 0, 0]
123 | table.putNumberArray('position', position)
124 |
125 | # Read incoming API messages
126 | api.read()
127 |
128 | # Q to stop the program
129 | if cv2.waitKey(1) & 0xFF == ord('q'):
130 | break
131 |
132 | toc = time.perf_counter()
133 | print(f"FPS: {1/(toc-tic)}")
134 |
135 | # Disconnect from Messenger
136 | api.shutdown()
137 |
138 | # Safely close video operation
139 | cv2.destroyAllWindows()
140 | camera_array.release_cameras()
141 |
142 |
143 | if __name__ == '__main__':
144 | main()
145 |
--------------------------------------------------------------------------------
/src/messenger.py:
--------------------------------------------------------------------------------
1 | """
2 | Python port of the MessengerClient API.
3 |
4 | Use MessengerClient to connect to a server. In order to properly
5 | receive messages, you must call the read_messages() method on it
6 | while your program is running. The disconnect() method should be
7 | called when the program ends to shut down background threads. If
8 | you do not call disconnect(), the program will appear to hang.
9 | """
10 |
11 | import socket
12 | import time
13 | import threading
14 | import struct
15 | import select
16 |
17 |
18 | class MessageBuilder:
19 | """
20 | Allows easy storage of data into a message.
21 | """
22 |
23 | def __init__(self, client, type):
24 | self.client = client
25 | self.type = type
26 | self.buffer = b''
27 | pass
28 |
29 | def send(self):
30 | """
31 | Sends the message with the type and data.
32 | """
33 | self.client._send_message(self.type, self.buffer)
34 |
35 | def add_boolean(self, b):
36 | """
37 | Adds a boolean to this message
38 |
39 | :param b: boolean to add
40 | :return: self
41 | """
42 |
43 | self.buffer += struct.pack('>?', b)
44 | return self
45 |
46 | def add_string(self, s):
47 | """
48 | Adds a string to this message
49 |
50 | :param s: string to add
51 | :return: self
52 | """
53 |
54 | self.buffer += _encode_string(s)
55 | return self
56 |
57 | def add_char(self, c):
58 | """
59 | Adds a character to this message
60 |
61 | :param c: character to add
62 | :return: self
63 | """
64 |
65 | self.buffer += struct.pack('>c', c)
66 | return self
67 |
68 | def add_byte(self, b):
69 | """
70 | Adds a byte to this message
71 |
72 | :param b: byte to add
73 | :return: self
74 | """
75 |
76 | self.buffer += struct.pack('>b', b)
77 | return self
78 |
79 | def add_short(self, s):
80 | """
81 | Adds a short to this message
82 |
83 | :param s: short to add
84 | :return: self
85 | """
86 |
87 | self.buffer += struct.pack('>s', s)
88 | return self
89 |
90 | def add_int(self, i):
91 | """
92 | Adds an int to this message
93 |
94 | :param i: int to add
95 | :return: self
96 | """
97 |
98 | self.buffer += struct.pack('>i', i)
99 | return self
100 |
101 | def add_long(self, l):
102 | """
103 | Adds a long to this message
104 |
105 | :param l: long to add
106 | :return: self
107 | """
108 |
109 | self.buffer += struct.pack('>q', l)
110 | return self
111 |
112 | def add_float(self, f):
113 | """
114 | Adds a float to this message
115 |
116 | :param f: float to add
117 | :return: self
118 | """
119 |
120 | self.buffer += struct.pack('>f', f)
121 | return self
122 |
123 | def add_double(self, d):
124 | """
125 | Adds a double to this message
126 |
127 | :param d: double to add
128 | :return: self
129 | """
130 |
131 | self.buffer += struct.pack('>d', d)
132 | return self
133 |
134 | def add_raw(self, b):
135 | """
136 | Adds raw data to this message
137 |
138 | :param b: data to add
139 | :return: self
140 | """
141 |
142 | self.buffer += b
143 | return self
144 |
145 |
146 | class MessageReader:
147 | """
148 | Allows easy access to data stored within a message.
149 | """
150 |
151 | def __init__(self, data):
152 | """
153 | Creates a new MessageReader that reads from a raw byte array.
154 |
155 | :param data: raw data
156 | """
157 |
158 | self.data = data
159 | self.cursor = 0
160 |
161 | def _next(self, count):
162 | # Reads the next count bytes from the data array as a bytes object
163 |
164 | read = self.data[self.cursor:(self.cursor + count)]
165 | self.cursor += count
166 | return read
167 |
168 | def read_boolean(self):
169 | """
170 | Reads a boolean from the message.
171 |
172 | :return: boolean read
173 | """
174 |
175 | return struct.unpack('?', self._next(1))[0]
176 |
177 | def read_string(self):
178 | """
179 | Reads a string from the message.
180 |
181 | :return: string read
182 | """
183 |
184 | utf_len = struct.unpack('>H', self._next(2))[0]
185 | return self._next(utf_len).decode('utf-8')
186 |
187 | def read_char(self):
188 | """
189 | Reads a character from the message.
190 |
191 | :return: character read
192 | """
193 |
194 | return chr(struct.unpack('>H', self._next(2)))[0]
195 |
196 | def read_byte(self):
197 | """
198 | Reads a byte from the message.
199 |
200 | :return: byte read
201 | """
202 |
203 | return struct.unpack('b', self._next(1))[0]
204 |
205 | def read_short(self):
206 | """
207 | Reads a short from the message.
208 |
209 | :return: short read
210 | """
211 |
212 | return struct.unpack('>h', self._next(2))[0]
213 |
214 | def read_int(self):
215 | """
216 | Reads an int from the message.
217 |
218 | :return: int read
219 | """
220 |
221 | return struct.unpack('>i', self._next(4))[0]
222 |
223 | def read_long(self):
224 | """
225 | Reads a long from the message.
226 |
227 | :return: long read
228 | """
229 |
230 | return struct.unpack('>q', self._next(8))[0]
231 |
232 | def read_float(self):
233 | """
234 | Reads a float from the message.
235 |
236 | :return: float read
237 | """
238 |
239 | return struct.unpack('>f', self._next(4))[0]
240 |
241 | def read_double(self):
242 | """
243 | Reads a double from the message.
244 |
245 | :return: double read
246 | """
247 |
248 | return struct.unpack('>d', self._next(8))[0]
249 |
250 | def read_raw(self, length):
251 | """
252 | Reads raw data as a byte array from the message.
253 |
254 | :param length: number of bytes to read
255 | :return: bytes read
256 | """
257 | return self._next(length)
258 |
259 | def read_all_data(self):
260 | """
261 | Reads all remaining data as a bytes object
262 |
263 | :return: data read
264 | """
265 | return self.data[self.cursor:]
266 |
267 |
268 | # Built-in messages
269 | _HEARTBEAT = '_Heartbeat'
270 | _LISTEN = '_Listen'
271 | _DISCONNECT = '_Disconnect'
272 |
273 |
274 | def _heartbeat_thread(msg):
275 | # Sends a heartbeat message once per second to keep the
276 | # connection alive
277 |
278 | while not msg.heartbeat_event.is_set():
279 | msg._send_message(_HEARTBEAT, b'')
280 | time.sleep(1)
281 |
282 |
283 | def _connect_thread(msg):
284 | # Repeatedly tries to connect to the server until successful
285 |
286 | while not msg.connected and not msg.connect_event.is_set():
287 | try:
288 | msg.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
289 | msg.socket.connect((msg.host, msg.port))
290 |
291 | msg._send(_encode_string(msg.name))
292 | msg.connected = True
293 |
294 | for listen in msg.listening:
295 | msg._listen(listen)
296 | except Exception:
297 | if msg.log_errors:
298 | print('Messenger connection failed')
299 |
300 | time.sleep(1)
301 | msg.connect_thread = None
302 |
303 |
304 | def _encode_string(str):
305 | # Encodes a string into a length-prefixed UTF-8 bytes object
306 |
307 | encoded_len = struct.pack(">h", len(str))
308 | return encoded_len + str.encode("utf-8")
309 |
310 |
311 | class WildcardHandler:
312 | # Handles wildcard patterns (i.e. patterns that end in '*')
313 |
314 | def __init__(self, pattern, handler):
315 | self.pattern = pattern
316 | self.handler = handler
317 |
318 | def handle(self, type, data):
319 | if type.startswith(self.pattern):
320 | self.handler(type, MessageReader(data))
321 |
322 |
323 | class DirectHandler:
324 | # Handles simple patterns (type must match exactly)
325 |
326 | def __init__(self, type, handler):
327 | self.type = type
328 | self.handler = handler
329 |
330 | def handle(self, type, data):
331 | if self.type == type:
332 | self.handler(type, MessageReader(data))
333 |
334 |
335 | class MessengerClient:
336 | """
337 | Represents a connection to the Messenger server.
338 | This can be used to send messages between processes.
339 | """
340 |
341 | def __init__(self, host, port, name, mute_errors=False):
342 | """
343 | Creates a new instance and attempts to connect to a
344 | Messenger server at the given address.
345 |
346 | :param host: server host
347 | :param port: server port
348 | :param name: unique string used in logging
349 | """
350 |
351 | self.host = host
352 | self.port = port
353 | self.name = name
354 | self.log_errors = not mute_errors
355 |
356 | self.socket = None
357 | self.connected = False
358 |
359 | self.heartbeat_event = threading.Event()
360 | self.heartbeat_thread = threading.Thread(target=_heartbeat_thread, args=(self,))
361 | self.heartbeat_thread.start()
362 | self.listening = []
363 | self.handlers = []
364 |
365 | self._start_connect_thread()
366 |
367 | def reconnect(self, host, port, name):
368 | """
369 | Attempts to reconnect to a different Messenger server at
370 | a given address.
371 |
372 | :param host: server host
373 | :param port: server port
374 | :param name: unique string used in logging
375 | :return:
376 | """
377 |
378 | self.host = host
379 | self.port = port
380 | self.name = name
381 |
382 | self.send(_DISCONNECT)
383 | self._disconnect_socket()
384 | self.connected = False
385 |
386 | self._start_connect_thread()
387 |
388 | def read_messages(self):
389 | """
390 | Reads all incoming messages. If not connected, this will do
391 | nothing. Message handlers will be invoked from this method.
392 | """
393 |
394 | if not self.connected:
395 | return
396 |
397 | try:
398 | while self._available():
399 | self._read_message()
400 | except Exception:
401 | self._handle_error()
402 |
403 | def is_connected(self):
404 | """
405 | Gets whether this client is currently connected to a server.
406 |
407 | :return: connected
408 | """
409 | return self.connected
410 |
411 | def disconnect(self):
412 | """
413 | Disconnects from the current server. After this method is called,
414 | this object should no longer be used. If you want to change servers,
415 | use reconnect(...).
416 | """
417 |
418 | self.send(_DISCONNECT)
419 |
420 | self.heartbeat_event.set()
421 | self.heartbeat_thread.join()
422 |
423 | self._disconnect_socket()
424 | self.connected = False
425 |
426 | def prepare(self, type):
427 | """
428 | Prepares to send a message. This returns a MessageBuilder,
429 | which allows you to add data to the message.
430 |
431 | :param type: type of the message to send
432 | :return: builder to add data
433 | """
434 |
435 | return MessageBuilder(self, type)
436 |
437 | def send(self, type):
438 | """
439 | Immediately sends a message with no data.
440 |
441 | :param type: type of the message to send
442 | """
443 | self._send_message(type, b'')
444 |
445 | def add_handler(self, type, handler):
446 | """
447 | Registers a message handler to handle incoming messages.
448 | If the type ends in '*', the handler will be invoked for all messages
449 | that match the content before. For example, "Foo*" would match a
450 | message of type "Foo2", while "Foo" would only match messages of
451 | type "Foo".
452 |
453 | The handler parameter should be a function with two parameters:
454 | the type of message as a string, and a MessageReader to read data
455 | from the message.
456 |
457 | :param type: type of message to listen to
458 | :param handler: handler to invoke
459 | """
460 |
461 | if type.endswith('*'):
462 | h = WildcardHandler(type[:-1], handler)
463 | else:
464 | h = DirectHandler(type, handler)
465 | self.handlers.append(h)
466 |
467 | if type not in self.listening:
468 | self.listening.append(type)
469 | if self.connected:
470 | self._listen(type)
471 |
472 | def _available(self):
473 | readable = select.select([self.socket], [], [], 0)[0]
474 |
475 | return any(sock == self.socket for sock in readable)
476 |
477 | def _listen(self, type):
478 | self.prepare(_LISTEN).add_string(type).send()
479 |
480 | def _handle_error(self):
481 | self._disconnect_socket()
482 |
483 | if self.log_errors:
484 | print("Messenger connection lost")
485 |
486 | self.connected = False
487 |
488 | self._start_connect_thread()
489 |
490 | def _read(self, count):
491 | data = b""
492 | while len(data) < count:
493 | data += self.socket.recv(count - len(data))
494 | return data
495 |
496 | def _send(self, data):
497 | total = 0
498 | while total < len(data):
499 | send = self.socket.send(data[total:])
500 | if send == 0:
501 | self._handle_error()
502 | return
503 | total += send
504 |
505 | def _send_message(self, type, data):
506 | if not self.connected:
507 | return
508 |
509 | encoded_type = _encode_string(type)
510 | encoded_data_len = struct.pack(">i", len(data))
511 | encoded = encoded_type + encoded_data_len + data
512 |
513 | # packet_len = struct.pack(">i", len(encoded))
514 | # packet = packet_len + encoded
515 | packet = encoded
516 |
517 | self._send(packet)
518 |
519 | def _start_connect_thread(self):
520 | self.connect_event = threading.Event()
521 | self.connect_thread = threading.Thread(target=_connect_thread, args=(self,))
522 | self.connect_thread.start()
523 |
524 | def _read_message(self):
525 | type_len = struct.unpack('>h', self._read(2))[0]
526 | message_type = self._read(type_len).decode('utf-8')
527 | data_len = struct.unpack('>i', self._read(4))[0]
528 | message_data = self._read(data_len)
529 |
530 | for handler in self.handlers:
531 | handler.handle(message_type, message_data)
532 |
533 | def _disconnect_socket(self):
534 | if self.connect_thread is not None:
535 | self.connect_event.set()
536 | self.connect_thread.join()
537 | self.connect_thread = None
538 |
539 | self.socket.close()
540 |
--------------------------------------------------------------------------------
/src/quaternions.py:
--------------------------------------------------------------------------------
1 | import math
2 |
3 | def matrixToQuat(m):
4 | r11 = m[0][0]; r12 = m[0][1]; r13 = m[0][2]
5 | r21 = m[1][0]; r22 = m[1][1]; r23 = m[1][2]
6 | r31 = m[2][0]; r32 = m[2][1]; r33 = m[2][2]
7 |
8 | q0 = math.sqrt((1 + r11 + r22 + r33) / 4)
9 | q1 = math.sqrt((1 + r11 - r22 - r33) / 4)
10 | q2 = math.sqrt((1 - r11 + r22 - r33) / 4)
11 | q3 = math.sqrt((1 - r11 - r22 + r33) / 4)
12 |
13 | if q0 > q1 and q0 > q2 and q0 > q3:
14 | q1 = (r32 - r23) / (4 * q0)
15 | q2 = (r13 - r31) / (4 * q0)
16 | q3 = (r21 - r12) / (4 * q0)
17 | elif q1 > q0 and q1 > q2 and q1 > q3:
18 | q0 = (r32 - r23) / (4 * q1)
19 | q2 = (r12 + r21) / (4 * q1)
20 | q3 = (r13 + r31) / (4 * q1)
21 | elif q2 > q0 and q2 > q1 and q2 > q3:
22 | q0 = (r13 - r31) / (4 * q2)
23 | q1 = (r12 + r21) / (4 * q2)
24 | q3 = (r23 + r32) / (4 * q2)
25 | elif q3 > q0 and q3 > q1 and q3 > q2:
26 | q0 = (r21 - r12) / (4 * q3)
27 | q1 = (r13 + r31) / (4 * q3)
28 | q2 = (r23 + r32) / (4 * q3)
29 |
30 | return (q0, q1, q2, q3)
31 |
32 | def invertQuat(q):
33 | return (q[0], -q[1], -q[2], -q[3])
34 |
35 | def quatToAxisAngle(q):
36 | if q[0] == 1:
37 | return (0, (1, 0, 0))
38 |
39 | theta = 2 * math.acos(q[0])
40 |
41 | s = math.sin(theta / 2)
42 | x = q[1] / s
43 | y = q[2] / s
44 | z = q[3] / s
45 |
46 | return (theta, (x, y, z))
47 |
48 | def quatToFUL(q):
49 | x, y, z, w = q
50 |
51 | forward = (
52 | 2 * (x * z + w * y),
53 | 2 * (y * z - w * x),
54 | 1 - 2 * (x * x + y * y)
55 | )
56 |
57 | up = (
58 | 2 * (x * y - w * z),
59 | 1 - 2 * (x * x + z * z),
60 | 2 * (y * z + w * x)
61 | )
62 |
63 | left = (
64 | 1 - 2 * (y * y + z * z),
65 | 2 * (x * y + w * z),
66 | 2 * (x * z - w * y)
67 | )
68 |
69 | return (forward, up, left)
70 |
--------------------------------------------------------------------------------
/src/shufflelog_api.py:
--------------------------------------------------------------------------------
1 | from itertools import product
2 | from messenger import *
3 |
4 | def _write_matrix(builder, matrix):
5 | # Write as column major
6 | for col, row in product(range(4), range(4)):
7 | # Float here since ShuffleLog stores matrices as float
8 | builder.add_float(matrix[row][col])
9 |
10 | class ShuffleLogAPI:
11 | _MSG_QUERY_ENVIRONMENT = "TagTracker:QueryEnvironment"
12 | _MSG_ENVIRONMENT = "TagTracker:Environment"
13 |
14 | def __init__(self, conn_params, tag_infos, camera_infos):
15 | host = conn_params['host']
16 | port = conn_params['port']
17 | name = conn_params['name']
18 | mute_errors = conn_params['mute_errors']
19 |
20 | self.msg = MessengerClient(host, port, name, mute_errors=mute_errors)
21 | self.msg.add_handler(ShuffleLogAPI._MSG_QUERY_ENVIRONMENT, lambda t, r: self._on_query_environment(t, r))
22 |
23 | self.tag_infos = tag_infos
24 | self.camera_infos = camera_infos
25 |
26 | def read(self):
27 | self.msg.read_messages()
28 |
29 | def shutdown(self):
30 | self.msg.disconnect()
31 |
32 | # This is temporary
33 | def publish_detection_data(self, detections):
34 | builder = self.msg.prepare('TagTracker:TestData')
35 | builder.add_int(len(detections))
36 | for detect in detections:
37 | _write_matrix(builder, detect['pose'])
38 | _write_matrix(builder, detect['camera'].robot_position)
39 | builder.add_int(detect['tag_id'])
40 | builder.send()
41 |
42 | def publish_test_matrices(self, matrices):
43 | builder = self.msg.prepare('TagTracker:TestMtx')
44 | builder.add_int(len(matrices))
45 | for matrix in matrices:
46 | _write_matrix(builder, matrix)
47 | builder.send()
48 |
49 | def _on_query_environment(self, type, reader):
50 | print('[debug] sending environment data to ShuffleLog')
51 | builder = self.msg.prepare(ShuffleLogAPI._MSG_ENVIRONMENT)
52 |
53 | builder.add_int(len(self.tag_infos))
54 | for tag in self.tag_infos:
55 | builder.add_double(tag['size'])
56 | builder.add_int(tag['id'])
57 | _write_matrix(builder, tag['transform'])
58 |
59 | builder.add_int(len(self.camera_infos))
60 | for camera in self.camera_infos:
61 | builder.add_string(camera['name'])
62 | builder.add_int(camera['port'])
63 | _write_matrix(builder, camera['robot_pose'])
64 |
65 | builder.send()
66 |
--------------------------------------------------------------------------------
/src/solver.py:
--------------------------------------------------------------------------------
1 | # Solves for robot position based on results of found tags
2 | import numpy as np
3 | from main import logger
4 |
5 | # TODO-Ryan: Finish/Fix
6 |
7 | # Manual implementation of matrix inversion because np.linalg.inv is slow
8 | def invert(m):
9 | m00, m01, m02, m03, m10, m11, m12, m13, m20, m21, m22, m23, m30, m31, m32, m33 = np.ravel(m)
10 | a2323 = m22 * m33 - m23 * m32
11 | a1323 = m21 * m33 - m23 * m31
12 | a1223 = m21 * m32 - m22 * m31
13 | a0323 = m20 * m33 - m23 * m30
14 | a0223 = m20 * m32 - m22 * m30
15 | a0123 = m20 * m31 - m21 * m30
16 | a2313 = m12 * m33 - m13 * m32
17 | a1313 = m11 * m33 - m13 * m31
18 | a1213 = m11 * m32 - m12 * m31
19 | a2312 = m12 * m23 - m13 * m22
20 | a1312 = m11 * m23 - m13 * m21
21 | a1212 = m11 * m22 - m12 * m21
22 | a0313 = m10 * m33 - m13 * m30
23 | a0213 = m10 * m32 - m12 * m30
24 | a0312 = m10 * m23 - m13 * m20
25 | a0212 = m10 * m22 - m12 * m20
26 | a0113 = m10 * m31 - m11 * m30
27 | a0112 = m10 * m21 - m11 * m20
28 |
29 | det = m00 * (m11 * a2323 - m12 * a1323 + m13 * a1223) \
30 | - m01 * (m10 * a2323 - m12 * a0323 + m13 * a0223) \
31 | + m02 * (m10 * a1323 - m11 * a0323 + m13 * a0123) \
32 | - m03 * (m10 * a1223 - m11 * a0223 + m12 * a0123)
33 | det = 1 / det
34 |
35 | return np.array([ \
36 | det * (m11 * a2323 - m12 * a1323 + m13 * a1223), \
37 | det * -(m01 * a2323 - m02 * a1323 + m03 * a1223), \
38 | det * (m01 * a2313 - m02 * a1313 + m03 * a1213), \
39 | det * -(m01 * a2312 - m02 * a1312 + m03 * a1212), \
40 | det * -(m10 * a2323 - m12 * a0323 + m13 * a0223), \
41 | det * (m00 * a2323 - m02 * a0323 + m03 * a0223), \
42 | det * -(m00 * a2313 - m02 * a0313 + m03 * a0213), \
43 | det * (m00 * a2312 - m02 * a0312 + m03 * a0212), \
44 | det * (m10 * a1323 - m11 * a0323 + m13 * a0123), \
45 | det * -(m00 * a1323 - m01 * a0323 + m03 * a0123), \
46 | det * (m00 * a1313 - m01 * a0313 + m03 * a0113), \
47 | det * -(m00 * a1312 - m01 * a0312 + m03 * a0112), \
48 | det * -(m10 * a1223 - m11 * a0223 + m12 * a0123), \
49 | det * (m00 * a1223 - m01 * a0223 + m02 * a0123), \
50 | det * -(m00 * a1213 - m01 * a0213 + m02 * a0113), \
51 | det * (m00 * a1212 - m01 * a0212 + m02 * a0112) \
52 | ]).reshape(4, 4)
53 |
54 | class RobotPoseSolver:
55 | def __init__(self, environment_dict):
56 | # Unpack tag positions into lookup dictionary
57 | if not environment_dict['tags']:
58 | logger.error('No tags defined! Quitting')
59 | raise AssertionError('No tags defined in environment JSON')
60 | self.tags_dict = {tag['id']: tag for tag in environment_dict['tags']}
61 | self.tag_family = environment_dict['tag_family']
62 |
63 | if self.tag_family != "tag16h5":
64 | '''logger.warning('Are you sure that you want to look for, tags in the \
65 | family {}. FRC uses 16h5'.format(self.tag_family))
66 | '''
67 | def solve(self, detection_poses):
68 | # Master list of estimated poses to be combined
69 | estimated_poses_list = []
70 |
71 | # Apply camera and tag pose to estimated pose
72 | for pose_dict in detection_poses:
73 | estimated_pose = pose_dict['pose']
74 | camera_pose = pose_dict['camera'].robot_position
75 | tag_id = pose_dict['tag_id']
76 | tag_family = pose_dict['tag_family']
77 |
78 | # Find the tag info that matches that tag
79 | if self.tag_family not in str(tag_family):
80 | logger.warning(f"Found a tag that doesn't belong to {self.tag_family}")
81 | continue
82 |
83 | # Get the info for the tag
84 | tag_dict = self.tags_dict.get(tag_id)
85 |
86 | if not tag_dict:
87 | logger.warning(f"Found a tag that isn't defined in environment. ID: {tag_id}")
88 | continue
89 |
90 | tag_pose = tag_dict['transform']
91 |
92 | # Convert to numpy arrarys for math
93 | estimated_pose = np.array(estimated_pose)
94 | camera_pose = np.array(camera_pose)
95 | tag_pose = np.array(tag_pose)
96 |
97 | # Scale estimated position by tag size
98 | size = tag_dict['size']
99 | estimated_pose[0][3] *= size
100 | estimated_pose[1][3] *= size
101 | estimated_pose[2][3] *= size
102 |
103 | # Find where the camera is if the tag is at the origin
104 | tag_relative_camera_pose = invert(estimated_pose)
105 |
106 | # Find the camera position relative to the tag position
107 | world_camera_pose = np.matmul(tag_pose, tag_relative_camera_pose)
108 |
109 | # Find the position of the robot from the camera position
110 | inv_rel_camera_pose = invert(camera_pose)
111 | robot_pose = np.matmul(world_camera_pose, inv_rel_camera_pose)
112 |
113 | estimated_poses_list.append(robot_pose)
114 |
115 | # Combine poses with average (just for position, not rotation)
116 |
117 | if not estimated_poses_list:
118 | # If we have no samples, report none
119 | return (None, estimated_poses_list)
120 |
121 | # TODO: Figure out rotation
122 | total = np.array([0.0, 0.0, 0.0])
123 | for pose in estimated_poses_list:
124 | total += np.array([pose[0][3], pose[1][3], pose[2][3]])
125 | average = total / len(estimated_poses_list)
126 | return (average, estimated_poses_list)
127 |
--------------------------------------------------------------------------------
/src/tag_tracker.py:
--------------------------------------------------------------------------------
1 | from cameras import *
2 | from gui import *
3 | import apriltag
4 | import cv2
5 |
6 | class _DetectorOptions: # Converts JSON into object for apriltag's dector to read
7 | def __init__(self, dict=None):
8 | if dict:
9 | for key, value in dict.items():
10 | setattr(self, key, value)
11 |
12 | class Detector: # Rename?
13 | def __init__(self, logger, options):
14 | options = _DetectorOptions(options)
15 | self.detector = apriltag.Detector(options)
16 |
17 | def getPoses(self, images):
18 |
19 | # Make a list of estimated poses to add to
20 | estimated_poses = []
21 |
22 | # Find every target in the images
23 | for image_dict in images:
24 | image = image_dict['image']
25 | camera = image_dict['camera']
26 |
27 | # Convert image to grayscale
28 | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
29 |
30 | # Find basic information about tag (center location, ID, family...)
31 | results = self.detector.detect(gray)
32 |
33 | # Estimate the pose of the camera relative to each target
34 | for result in results:
35 | # Undistort corners
36 |
37 | # print(result.corners)
38 | # for j in range(len(result.corners)):
39 | # result.corners[j] = cv2.undistortPoints(result.corners[j].reshape(-1, 2), camera.matrix, dist_coeffs, P=new_mtx).reshape(-1)
40 | # print(result.corners)
41 |
42 | pose, e0, e1 = self.detector.detection_pose(result, camera.camera_params)
43 |
44 | # Draw bounding box
45 | draw_bounding_box(image, result, camera.camera_params, pose)
46 |
47 | # TODO Finish
48 | # TODO: What are the 'e's?
49 | # TODO: Scale pose by tag size
50 | estimated_poses.append({
51 | 'pose': pose,
52 | 'camera': camera,
53 | 'tag_id': result.tag_id,
54 | 'tag_family' : result.tag_family
55 | })
56 |
57 | # Log number of tags found
58 | if estimated_poses:
59 | logger.debug(f"Estimated pose on {len(estimated_poses)} AprilTags")
60 |
61 | return estimated_poses
62 |
--------------------------------------------------------------------------------
/templates/fail.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 | {{reason}} 12 |
13 | 14 | 15 | -------------------------------------------------------------------------------- /tools/calibrate_camera.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | import numpy as np 4 | import cv2 5 | import argparse 6 | 7 | # Copied from AprilTag library source code 8 | # and edited to work with live video stream 9 | def main(): 10 | 11 | parser = argparse.ArgumentParser( 12 | description='calibrate camera intrinsics using OpenCV') 13 | 14 | parser.add_argument('-r', '--rows', metavar='N', type=int, 15 | required=True, 16 | help='# of chessboard corners in vertical direction') 17 | 18 | parser.add_argument('-c', '--cols', metavar='N', type=int, 19 | required=True, 20 | help='# of chessboard corners in horizontal direction') 21 | 22 | parser.add_argument('-i', '--input_camera', type=int, 23 | default=0, help='change which camera to reac from by openCV id') 24 | 25 | parser.add_argument('-m', '--max-images', metavar='N', type=int, 26 | default=0, 27 | help='max # of images to keep to use in calibration') 28 | 29 | options = parser.parse_args() 30 | 31 | if options.rows < options.cols: 32 | patternsize = (options.cols, options.rows) 33 | else: 34 | patternsize = (options.rows, options.cols) 35 | 36 | cam = options.input_camera 37 | 38 | x = np.arange(patternsize[0]) 39 | y = np.arange(patternsize[1]) 40 | 41 | xgrid, ygrid = np.meshgrid(x, y) 42 | zgrid = np.zeros_like(xgrid) 43 | opoints = np.dstack((xgrid, ygrid, zgrid)).reshape((-1, 1, 3)).astype(np.float32) 44 | 45 | imagesize = None 46 | 47 | cap = cv2.VideoCapture(cam) 48 | 49 | ipoints = [] 50 | 51 | criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) 52 | 53 | while True: 54 | 55 | ret, rgb = cap.read() 56 | 57 | if rgb is None: 58 | print('warning: error getting data from webcam, ending') 59 | break 60 | 61 | # cursize = (rgb.shape[1], rgb.shape[0]) 62 | # print(cursize, rgb.shape[::-1]) 63 | 64 | 65 | if len(rgb.shape) == 3: 66 | gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY) 67 | else: 68 | gray = rgb 69 | 70 | cursize = gray.shape[::-1] 71 | 72 | if imagesize is None: 73 | imagesize = cursize 74 | print('Received frame of size {}x{}'.format(*imagesize)) 75 | else: 76 | assert imagesize == cursize 77 | 78 | retval, corners = cv2.findChessboardCorners(gray, patternsize, None) 79 | 80 | if retval: 81 | # print('checkerboard acquired!') 82 | # Refine corners (apriltags devs forgot this) 83 | refined = cv2.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria) 84 | ipoints.append(refined) 85 | 86 | cv2.drawChessboardCorners(rgb, patternsize, refined, retval) 87 | else: 88 | # print('warning: no chessboard found, skipping') 89 | # print('nothing found') 90 | pass 91 | 92 | cv2.putText(rgb, str(len(ipoints)), (10, 30), 93 | cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) 94 | cv2.imshow("Image", rgb) 95 | 96 | if cv2.waitKey(1) & 0xFF == ord('q'): 97 | break 98 | 99 | print (f'got {len(ipoints)} points') 100 | if options.max_images != 0 and len(ipoints) > options.max_images: 101 | random.shuffle(ipoints) 102 | ipoints = ipoints[:options.max_images] 103 | print (f'using {len(ipoints)} points') 104 | 105 | if len(ipoints) == 0: 106 | print("no data!") 107 | else: 108 | opoints = [opoints] * len(ipoints) 109 | 110 | retval, K, dcoeffs, rvecs, tvecs = cv2.calibrateCamera(opoints, ipoints, imagesize, None, None) 111 | 112 | #assert( np.all(dcoeffs == 0) ) 113 | 114 | fx = K[0,0] 115 | fy = K[1,1] 116 | cx = K[0,2] 117 | cy = K[1,2] 118 | 119 | params = (fx, fy, cx, cy) 120 | 121 | print() 122 | print('all units below measured in pixels:') 123 | print(' fx = {}'.format(K[0,0])) 124 | print(' fy = {}'.format(K[1,1])) 125 | print(' cx = {}'.format(K[0,2])) 126 | print(' cy = {}'.format(K[1,2])) 127 | print() 128 | print('pastable into Python:') 129 | print(' fx, fy, cx, cy = {}'.format(repr(params))) 130 | print() 131 | print('json:') 132 | print('{') 133 | print(' "fx": {},'.format(K[0,0])) 134 | print(' "fy": {},'.format(K[1,1])) 135 | print(' "cx": {},'.format(K[0,2])) 136 | print(' "cy": {},'.format(K[1,2])) 137 | print(' "dist": {}'.format(dcoeffs.tolist())) 138 | print('}') 139 | 140 | cv2.destroyAllWindows() 141 | 142 | if __name__ == '__main__': 143 | main() 144 | -------------------------------------------------------------------------------- /tools/camera_port_scanner.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | from argparse import ArgumentParser 3 | 4 | def main(): 5 | # Create a parser for optional parameters 6 | parser = ArgumentParser(prog='Camera Port Scanner', 7 | description='Camera Port Scannera') 8 | parser.add_argument('--min', type=int, default=-10, metavar='', help='Min port to scan') 9 | parser.add_argument('--max', type=int, default=10, metavar='', help='Max port to scan') 10 | 11 | args = parser.parse_args() 12 | min_port = args.min 13 | max_port = args.max 14 | 15 | if (min_port >= max_port): 16 | raise ValueError('Make sure the minimum port is less than the maximum!') 17 | 18 | working_ids = [] 19 | for i in range(min_port, max_port): 20 | cap = cv2.VideoCapture(i) 21 | 22 | if cap.isOpened(): 23 | working_ids.append(i) 24 | 25 | cap.release() 26 | 27 | print('Working Camera Ports: \n \ 28 | {}'.format(working_ids)) 29 | 30 | if __name__ == '__main__': 31 | main() --------------------------------------------------------------------------------