├── seahorse ├── utils │ ├── __init__.py │ └── geometry │ │ ├── __init__.py │ │ ├── line.py │ │ ├── point.py │ │ ├── polygon.py │ │ └── geometry.py ├── perception │ ├── __init__.py │ ├── common │ │ └── __init__.py │ ├── data │ │ ├── __init__.py │ │ └── dataloader.py │ ├── tools │ │ ├── __init__.py │ │ └── mock_publish_image.py │ ├── detection │ │ ├── __init__.py │ │ ├── multitask │ │ │ ├── __init__.py │ │ │ ├── contracts.py │ │ │ ├── base_detector.py │ │ │ ├── test_detector.py │ │ │ └── yoloe_detector.py │ │ ├── keypoint_detection │ │ │ ├── __init__.py │ │ │ ├── keypoint_detection.py │ │ │ ├── keypoint_rcnn.py │ │ │ └── test.py │ │ ├── instance_segmentation │ │ │ ├── __init__.py │ │ │ ├── segmentation.py │ │ │ ├── mask_rcnn.py │ │ │ ├── visualize.py │ │ │ └── test.py │ │ ├── lane_detection │ │ │ ├── contracts.py │ │ │ ├── detector.py │ │ │ ├── __init__.py │ │ │ ├── test_lane.py │ │ │ └── spline_detector.py │ │ ├── object_detection │ │ │ ├── contracts.py │ │ │ ├── base_detector.py │ │ │ ├── __init__.py │ │ │ ├── yolo.py │ │ │ ├── test_detector.py │ │ │ ├── test_detector_node.py │ │ │ └── detector.py │ │ └── traffic_light_detection │ │ │ ├── contracts.py │ │ │ ├── detector.py │ │ │ ├── __init__.py │ │ │ └── traffic_light.py │ ├── tracking │ │ ├── __init__.py │ │ ├── affinity │ │ │ ├── __init__.py │ │ │ ├── similarity.py │ │ │ └── distance.py │ │ ├── data_associate │ │ │ ├── __init__.py │ │ │ ├── associator.py │ │ │ ├── min_cost_max_flow.py │ │ │ ├── bipartite_graph_match.py │ │ │ ├── conditional_random_field.py │ │ │ └── dynamic_programming.py │ │ ├── feature_extractor │ │ │ ├── __init__.py │ │ │ └── hog.py │ │ ├── motion_predictor │ │ │ ├── __init__.py │ │ │ ├── linear_model.py │ │ │ ├── interaction_model.py │ │ │ └── nonlinear_model.py │ │ ├── object.py │ │ ├── tracklet.py │ │ └── tracker.py │ ├── segmentation │ │ ├── __init__.py │ │ ├── cluster │ │ │ ├── __init__.py │ │ │ ├── run_clustering.py │ │ │ ├── adaptive_clustering.py │ │ │ ├── autoware_clustering.py │ │ │ ├── depth_clustering.py │ │ │ └── euclidean_clustering.py │ │ └── semantic_segmentation │ │ │ ├── __init__.py │ │ │ ├── segmentation.py │ │ │ ├── fcn.py │ │ │ ├── lraspp.py │ │ │ ├── deep_lab_v3.py │ │ │ └── test.py │ ├── io │ │ ├── __init__.py │ │ ├── test.py │ │ ├── pil_source.py │ │ ├── source.py │ │ ├── message_source.py │ │ ├── stream_source.py │ │ ├── screenshot_source.py │ │ └── file_source.py │ ├── config.yaml │ ├── visualizer │ │ ├── __init__.py │ │ └── visualize.py │ ├── utils │ │ └── __init__.py │ ├── image_detection.py │ └── core │ │ └── runner.py ├── behavior_planning │ ├── __init__.py │ ├── behavior_tree │ │ ├── __init__.py │ │ └── behavior_tree.py │ └── state_machine │ │ ├── __init__.py │ │ └── state_machine.py ├── global_planning │ ├── __init__.py │ └── a_star.py ├── trajectory_planning │ ├── spline.py │ └── __init__.py ├── __init__.py ├── middleware │ └── __init__.py └── cli.py ├── docs ├── images │ ├── seahorse.jpg │ └── seahorse_architecture.jpg ├── middleware.md ├── segmentation.md ├── tracking.md ├── distance.md ├── perception.md ├── polygon.md └── behavior_planning │ └── behavior_tree.md ├── .github ├── ISSUE_TEMPLATE │ ├── custom.md │ ├── feature_request.md │ └── bug_report.md ├── workflows │ ├── greetings.yml │ ├── pylint.yml │ ├── stale.yml │ └── python-publish.yml └── PULL_REQUEST_TEMPLATE │ └── pull_request_template.md ├── .vscode ├── extensions.json └── settings.json ├── SECURITY.md ├── setup.py ├── README.md ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md └── LICENSE /seahorse/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/behavior_planning/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/global_planning/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/global_planning/a_star.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/common/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/data/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tools/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/trajectory_planning/spline.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/utils/geometry/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/detection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/trajectory_planning/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/behavior_planning/behavior_tree/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/behavior_planning/state_machine/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/detection/multitask/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/affinity/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/behavior_planning/behavior_tree/behavior_tree.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/behavior_planning/state_machine/state_machine.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/run_clustering.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/feature_extractor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/motion_predictor/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/detection/keypoint_detection/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/adaptive_clustering.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/autoware_clustering.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/depth_clustering.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/cluster/euclidean_clustering.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /seahorse/perception/io/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | __all__ = [ 3 | 'source', 4 | ] 5 | -------------------------------------------------------------------------------- /seahorse/perception/config.yaml: -------------------------------------------------------------------------------- 1 | task: 2 | model: 3 | source: 4 | visualize: 5 | -------------------------------------------------------------------------------- /seahorse/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.0" 2 | 3 | __all__ = ( 4 | "__version__", 5 | ) 6 | -------------------------------------------------------------------------------- /docs/images/seahorse.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daohu527/seahorse/HEAD/docs/images/seahorse.jpg -------------------------------------------------------------------------------- /seahorse/middleware/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.0" 2 | 3 | __all__ = ( 4 | "__version__", 5 | ) 6 | -------------------------------------------------------------------------------- /docs/images/seahorse_architecture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daohu527/seahorse/HEAD/docs/images/seahorse_architecture.jpg -------------------------------------------------------------------------------- /docs/middleware.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | middleware 3 | 4 | ## Features 5 | 6 | 7 | ## Design 8 | A wrapper to cyber and ros. 9 | 10 | #### interface 11 | 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/custom.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Custom issue template 3 | about: Describe this issue template's purpose here. 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | 11 | -------------------------------------------------------------------------------- /seahorse/perception/detection/instance_segmentation/__init__.py: -------------------------------------------------------------------------------- 1 | from mask_rcnn import MaskRCNN 2 | 3 | 4 | CLASSES = { 5 | "mask_rcnn": MaskRCNN, 6 | } 7 | 8 | 9 | def instance_segmentation(args): 10 | pass 11 | -------------------------------------------------------------------------------- /docs/segmentation.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | segmentation 3 | 4 | ## Features 5 | lidar segmentation 6 | 7 | ## Design 8 | * [lidar_clustering_bench](https://github.com/cavayangtao/lidar_clustering_bench/tree/main) 9 | 10 | ## Testcase 11 | -------------------------------------------------------------------------------- /seahorse/perception/io/test.py: -------------------------------------------------------------------------------- 1 | 2 | from screenshot_source import ScreenshotSource 3 | 4 | def test_screenshot_source(): 5 | source = "source 0" 6 | screen_shots = ScreenshotSource(source) 7 | for s in screen_shots: 8 | print(s) 9 | 10 | 11 | if __name__ == "__main__": 12 | test_screenshot_source() 13 | -------------------------------------------------------------------------------- /.github/workflows/greetings.yml: -------------------------------------------------------------------------------- 1 | name: Greetings 2 | 3 | on: [pull_request_target, issues] 4 | 5 | jobs: 6 | greeting: 7 | runs-on: ubuntu-latest 8 | permissions: 9 | issues: write 10 | pull-requests: write 11 | steps: 12 | - uses: actions/first-interaction@v1 13 | with: 14 | repo-token: ${{ secrets.GITHUB_TOKEN }} 15 | issue-message: "Message that will be displayed on users' first issue" 16 | pr-message: "Message that will be displayed on users' first pull request" 17 | -------------------------------------------------------------------------------- /docs/tracking.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | tracking 3 | 4 | ## Features 5 | multi-object tracking 6 | 7 | ## Design 8 | We use tracking by detection in automatic driving. And divide the tracking into the following 4 steps. 9 | - object detection 10 | - feature extraction 11 | - similarity calculation 12 | - data association 13 | 14 | Tracker maintains multiple trajectories, and use the current frame object and the object in the trajectory to match, if it matches, add it, otherwise generate a new trajectory to maintain it. 15 | 16 | ## Testcase 17 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "esbenp.prettier-vscode", // Prettier formatting 4 | "dbaeumer.vscode-eslint", // JavaScript / TypeScript lint 5 | "ms-vscode.cpptools", // C/C++ support 6 | "xaver.clang-format", // C++ formatting 7 | "ms-python.python", // Python support 8 | "ms-python.black-formatter", // Python Black formatting 9 | "streetsidesoftware.code-spell-checker", // Spell checker 10 | "eamodio.gitlens", // Git enhancement 11 | "github.vscode-pull-request-github" // GitHub PR support 12 | ], 13 | "unwantedRecommendations": [] 14 | } 15 | -------------------------------------------------------------------------------- /docs/distance.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | distance 3 | 4 | ## Features 5 | multi-object tracking affinity 6 | 7 | ## Design 8 | When calculating similarity, we need some indicators to measure the distance. For example, we use Euler distance to represent the distance between 2 points, but what about 2 colors? So we need to introduce different distance functions to calculate the distance between different indicators. 9 | 10 | * [17-types-of-similarity-and-dissimilarity-measures](https://towardsdatascience.com/17-types-of-similarity-and-dissimilarity-measures-used-in-data-science-3eb914d2681) 11 | 12 | ## Testcase 13 | -------------------------------------------------------------------------------- /.github/workflows/pylint.yml: -------------------------------------------------------------------------------- 1 | name: Pylint 2 | 3 | on: [push] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | strategy: 9 | matrix: 10 | python-version: ["3.8", "3.9", "3.10"] 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Set up Python ${{ matrix.python-version }} 14 | uses: actions/setup-python@v3 15 | with: 16 | python-version: ${{ matrix.python-version }} 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install pylint 21 | - name: Analysing the code with pylint 22 | run: | 23 | pylint $(git ls-files '*.py') 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Supported Versions 4 | 5 | Use this section to tell people about which versions of your project are 6 | currently being supported with security updates. 7 | 8 | | Version | Supported | 9 | | ------- | ------------------ | 10 | | 5.1.x | :white_check_mark: | 11 | | 5.0.x | :x: | 12 | | 4.0.x | :white_check_mark: | 13 | | < 4.0 | :x: | 14 | 15 | ## Reporting a Vulnerability 16 | 17 | Use this section to tell people how to report a vulnerability. 18 | 19 | Tell them where to go, how often they can expect to get an update on a 20 | reported vulnerability, what to expect if the vulnerability is accepted or 21 | declined, etc. 22 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/motion_predictor/linear_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/motion_predictor/interaction_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/motion_predictor/nonlinear_model.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/affinity/similarity.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | def iou(src, dst): 18 | pass 19 | -------------------------------------------------------------------------------- /seahorse/perception/visualizer/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | -------------------------------------------------------------------------------- /seahorse/utils/geometry/line.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | class Line(): 19 | def __init__(self): 20 | pass 21 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/feature_extractor/hog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | class Hog: 18 | def __init__(self): 19 | pass 20 | -------------------------------------------------------------------------------- /docs/perception.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | perception 3 | 4 | ## Features 5 | 6 | 7 | ## Design 8 | ``` 9 | seahorse object_detection model=FasterRCNN source='https://youtu.be/LNwODJXcvt4' 10 | ``` 11 | - command_line = seahorse + task_type + parameters, task_type can refer to the dataset 12 | - source is multi-source input, include from file(img, video, pcd), url(http, rtmp), bag(rosbag, record), topic 13 | - Visualization is a must, including real-time display and offline saving 14 | - A variety of results must be saved(img-txt, video, record) 15 | - The parameters are completed by association as much as possible 16 | 17 | #### Other cmd 18 | ``` 19 | seahorse help 20 | seahorse checks 21 | seahorse version 22 | seahorse settings 23 | seahorse copy-cfg 24 | seahorse cfg 25 | ``` 26 | 27 | ## Testcase 28 | -------------------------------------------------------------------------------- /docs/polygon.md: -------------------------------------------------------------------------------- 1 | ## Module name 2 | segmentation 3 | 4 | ## Features 5 | 6 | #### Rotating_calipers 7 | - https://en.wikipedia.org/wiki/Rotating_calipers 8 | - http://datagenetics.com/blog/march12014/index.html 9 | 10 | 11 | #### Polygon_triangulation 12 | - https://en.wikipedia.org/wiki/Polygon_triangulation 13 | 14 | #### point_in_polygon 15 | - https://en.wikipedia.org/wiki/Point_in_polygon 16 | 17 | #### clipping_polygon 18 | - https://en.wikipedia.org/wiki/Sutherland%E2%80%93Hodgman_algorithm#:~:text=The%20Sutherland%E2%80%93Hodgman%20algorithm%20is,are%20on%20the%20visible%20side. 19 | 20 | - https://www.geeksforgeeks.org/weiler-atherton-polygon-clipping-algorithm/ 21 | 22 | ## Design 23 | * [lidar_clustering_bench](https://github.com/cavayangtao/lidar_clustering_bench/tree/main) 24 | 25 | ## Testcase 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | # This workflow warns and then closes issues and PRs that have had no activity for a specified amount of time. 2 | # 3 | # You can adjust the behavior by modifying this file. 4 | # For more information, see: 5 | # https://github.com/actions/stale 6 | name: Mark stale issues and pull requests 7 | 8 | on: 9 | schedule: 10 | - cron: '18 11 * * *' 11 | 12 | jobs: 13 | stale: 14 | 15 | runs-on: ubuntu-latest 16 | permissions: 17 | issues: write 18 | pull-requests: write 19 | 20 | steps: 21 | - uses: actions/stale@v5 22 | with: 23 | repo-token: ${{ secrets.GITHUB_TOKEN }} 24 | stale-issue-message: 'Stale issue message' 25 | stale-pr-message: 'Stale pull request message' 26 | stale-issue-label: 'no-issue-activity' 27 | stale-pr-label: 'no-pr-activity' 28 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/associator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | class Associator: 19 | def __init__(self): 20 | pass 21 | 22 | def associate(self, src, dst) -> tuple(dict, list, list): 23 | pass 24 | -------------------------------------------------------------------------------- /seahorse/utils/geometry/point.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | class Point(): 19 | def __init__(self, x, y, z): 20 | self.x = x 21 | self.y = y 22 | self.z = z 23 | 24 | def __str__(self): 25 | return "x: {}, y: {}, z: {}".format(self.x, self.y, self.z) 26 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/min_cost_max_flow.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from associator import Associator 18 | 19 | class MinCostMaxFlow(Associator): 20 | def __init__(self): 21 | pass 22 | 23 | def associate(self, src, dst) -> tuple(dict, list, list): 24 | pass 25 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/bipartite_graph_match.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from associator import Associator 18 | 19 | class BipartiteGraphMatch(Associator): 20 | def __init__(self): 21 | pass 22 | 23 | def associate(self, src, dst) -> tuple(dict, list, list): 24 | pass 25 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/conditional_random_field.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from associator import Associator 18 | 19 | class ConditionalRandomField(Associator): 20 | def __init__(self): 21 | pass 22 | 23 | def associate(self, src, dst) -> tuple(dict, list, list): 24 | pass 25 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/data_associate/dynamic_programming.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from associator import Associator 18 | 19 | class DynamicProgramming(Associator): 20 | def __init__(self): 21 | pass 22 | 23 | def associate(self, src, dst) -> tuple(dict, list, list): 24 | pass 25 | 26 | -------------------------------------------------------------------------------- /seahorse/utils/geometry/polygon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | class Polygon(): 19 | def __init__(self): 20 | self.points = [] 21 | 22 | def area(self): 23 | pass 24 | 25 | def intersection(self): 26 | # TODO(zero): use clipping_polygon 27 | pass 28 | 29 | def union(self): 30 | pass 31 | -------------------------------------------------------------------------------- /seahorse/perception/detection/instance_segmentation/segmentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | 20 | class Segmentation: 21 | def __init__(self): 22 | pass 23 | 24 | def detect(self, img): 25 | with torch.no_grad(): 26 | outputs = self.model(img) 27 | return outputs 28 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/segmentation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | 20 | class Segmentation: 21 | def __init__(self): 22 | pass 23 | 24 | def detect(self, img): 25 | with torch.no_grad(): 26 | outputs = self.model(img) 27 | return outputs 28 | -------------------------------------------------------------------------------- /seahorse/perception/detection/keypoint_detection/keypoint_detection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | 20 | class KeypointDetector: 21 | def __init__(self): 22 | pass 23 | 24 | def detect(self, img): 25 | with torch.no_grad(): 26 | outputs = self.model(img) 27 | return outputs 28 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/object.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | class Object: 18 | def __init__(self) -> None: 19 | # detection 20 | self.bbox = None 21 | self.score = 0 22 | self.type = None 23 | self.position = None 24 | self.size = None 25 | self.heading = None 26 | 27 | # track 28 | self.track_id = None 29 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Go to '...' 16 | 2. Click on '....' 17 | 3. Scroll down to '....' 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Desktop (please complete the following information):** 27 | - OS: [e.g. iOS] 28 | - Browser [e.g. chrome, safari] 29 | - Version [e.g. 22] 30 | 31 | **Smartphone (please complete the following information):** 32 | - Device: [e.g. iPhone6] 33 | - OS: [e.g. iOS8.1] 34 | - Browser [e.g. stock browser, safari] 35 | - Version [e.g. 22] 36 | 37 | **Additional context** 38 | Add any other context about the problem here. 39 | -------------------------------------------------------------------------------- /seahorse/perception/io/pil_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from source import Source 18 | 19 | 20 | class PILSource(Source): 21 | """read PIL 22 | 23 | Args: 24 | Source (_type_): _description_ 25 | """ 26 | def __init__(self, url): 27 | pass 28 | 29 | def __iter__(self): 30 | return self 31 | 32 | def __next__(self): 33 | pass 34 | -------------------------------------------------------------------------------- /seahorse/perception/detection/lane_detection/contracts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from typing import List 22 | import numpy as np 23 | 24 | # A lane line is a numpy array of [x, y] points. Shape will be (N, 2). 25 | LaneLine = np.ndarray 26 | 27 | # A list of all detected lane lines in a frame. 28 | LaneResults = List[LaneLine] 29 | -------------------------------------------------------------------------------- /docs/behavior_planning/behavior_tree.md: -------------------------------------------------------------------------------- 1 | ## Behavior Tree 2 | 3 | ## Features 4 | 5 | ## Design 6 | node - different types of nodes 7 | 8 | ## Testcase 9 | 10 | ## Reference 11 | - [wiki](https://en.wikipedia.org/wiki/Behavior_tree_(artificial_intelligence,_robotics_and_control)#:~:text=Article%20Talk,tasks%20in%20a%20modular%20fashion.) 12 | - https://www.gamedeveloper.com/programming/behavior-trees-for-ai-how-they-work 13 | - https://docs.unrealengine.com/4.27/en-US/InteractiveExperiences/ArtificialIntelligence/BehaviorTrees/BehaviorTreesOverview/ 14 | - https://robohub.org/introduction-to-behavior-trees/ 15 | - https://opensource.adobe.com/behavior_tree_editor/#/dash/home 16 | - https://arxiv.org/abs/1709.00084 17 | - https://www.the-data-wrangler.com/fluent-behavior-trees-for-ai-and-game-logic/ 18 | - https://www.behaviortree.dev/docs/Intro 19 | - https://py-trees.readthedocs.io/en/devel/introduction.html 20 | - https://github.com/BehaviorTree/awesome-behavior-trees 21 | - https://github.com/BehaviorTree/Groot 22 | - https://www.pygame.org/project-owyl-1004-.html 23 | -------------------------------------------------------------------------------- /seahorse/perception/io/source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from abc import ABC, abstractmethod 18 | 19 | 20 | class Source(ABC): 21 | """Base class 22 | 23 | Args: 24 | ABC (_type_): _description_ 25 | """ 26 | def __init__(self): 27 | pass 28 | 29 | @abstractmethod 30 | def __iter__(self): 31 | pass 32 | 33 | @abstractmethod 34 | def __next__(self): 35 | pass 36 | -------------------------------------------------------------------------------- /seahorse/perception/io/message_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from source import Source 18 | 19 | from pycyber import cyber 20 | from modules.drivers.proto.sensor_image_pb2 import Image 21 | 22 | 23 | def subscribe(topic, callback): 24 | node = cyber.Node("__perception_node") 25 | node.create_reader(topic, Image, callback) 26 | node.spin() 27 | 28 | class MessageSource(Source): 29 | def __init__(self): 30 | pass 31 | 32 | def __iter__(self): 33 | pass 34 | 35 | def __next__(self): 36 | pass 37 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setuptools.setup( 7 | name="seahorse", 8 | version="1.0.0", 9 | author="daohu527", 10 | author_email="daohu527@gmail.com", 11 | description="A pure python autonomous driving framework", 12 | long_description=long_description, 13 | long_description_content_type="text/markdown", 14 | url="https://github.com/daohu527/seahorse", 15 | project_urls={ 16 | "Bug Tracker": "https://github.com/daohu527/seahorse/issues", 17 | }, 18 | classifiers=[ 19 | "Programming Language :: Python :: 3", 20 | "License :: OSI Approved :: Apache Software License", 21 | "Operating System :: OS Independent", 22 | ], 23 | package_dir={"": "."}, 24 | packages=setuptools.find_packages(where="."), 25 | package_data={"": [ 26 | 'perception/config.yaml', 27 | ]}, 28 | install_requires=[ 29 | "torch", 30 | "pyyaml", 31 | ], 32 | entry_points={ 33 | 'console_scripts': [ 34 | 'seahorse = seahorse.command:main', 35 | ], 36 | }, 37 | python_requires=">=3.6", 38 | ) -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/contracts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from typing import List, Tuple 22 | from dataclasses import dataclass 23 | 24 | 25 | @dataclass 26 | class BoundingBox: 27 | x1: int 28 | y1: int 29 | x2: int 30 | y2: int 31 | 32 | 33 | @dataclass 34 | class DetectionResult: 35 | bounding_box: BoundingBox 36 | class_id: int 37 | label: str 38 | score: float 39 | color: List[int] 40 | 41 | 42 | DetectionResults = List[DetectionResult] 43 | -------------------------------------------------------------------------------- /seahorse/perception/detection/instance_segmentation/mask_rcnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from segmentation import Segmentation 20 | 21 | from torchvision.models.detection import ( 22 | maskrcnn_resnet50_fpn, 23 | MaskRCNN_ResNet50_FPN_Weights 24 | ) 25 | 26 | 27 | class MaskRCNN(Segmentation): 28 | def __init__(self, weights=MaskRCNN_ResNet50_FPN_Weights.COCO_V1): 29 | self.model = maskrcnn_resnet50_fpn(weights) 30 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 31 | self.model.eval().to(device) 32 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/fcn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from segmentation import Segmentation 20 | 21 | from torchvision.models.segmentation import ( 22 | fcn_resnet50, 23 | FCN_ResNet50_Weights 24 | ) 25 | 26 | 27 | class FCN(Segmentation): 28 | def __init__(self, weights=FCN_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1): 29 | self.model = fcn_resnet50(weights) 30 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 31 | self.model.eval().to(device) 32 | self.weights = weights 33 | -------------------------------------------------------------------------------- /seahorse/perception/detection/keypoint_detection/keypoint_rcnn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from keypoint_detection import KeypointDetector 20 | 21 | from torchvision.models.detection import ( 22 | keypointrcnn_resnet50_fpn, 23 | KeypointRCNN_ResNet50_FPN_Weights 24 | ) 25 | 26 | 27 | class KeypointRCNN(KeypointDetector): 28 | def __init__(self, weights=KeypointRCNN_ResNet50_FPN_Weights.COCO_V1): 29 | self.model = keypointrcnn_resnet50_fpn(weights) 30 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 31 | self.model.eval().to(device) 32 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Upload Python Package 10 | 11 | on: 12 | release: 13 | types: [published] 14 | 15 | permissions: 16 | contents: read 17 | 18 | jobs: 19 | deploy: 20 | 21 | runs-on: ubuntu-latest 22 | 23 | steps: 24 | - uses: actions/checkout@v4 25 | - name: Set up Python 26 | uses: actions/setup-python@v3 27 | with: 28 | python-version: '3.x' 29 | - name: Install dependencies 30 | run: | 31 | python -m pip install --upgrade pip 32 | pip install build 33 | - name: Build package 34 | run: python -m build 35 | - name: Publish package 36 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 37 | with: 38 | user: __token__ 39 | password: ${{ secrets.PYPI_API_TOKEN }} 40 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/lraspp.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from segmentation import Segmentation 20 | 21 | from torchvision.models.segmentation import ( 22 | lraspp_mobilenet_v3_large, 23 | LRASPP_MobileNet_V3_Large_Weights 24 | ) 25 | 26 | 27 | class LRASPP(Segmentation): 28 | def __init__(self, weights=LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1): 29 | self.model = lraspp_mobilenet_v3_large(weights) 30 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 31 | self.model.eval().to(device) 32 | self.weights = weights 33 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/deep_lab_v3.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from segmentation import Segmentation 20 | 21 | from torchvision.models.segmentation import ( 22 | deeplabv3_mobilenet_v3_large, 23 | DeepLabV3_MobileNet_V3_Large_Weights 24 | ) 25 | 26 | 27 | class DeepLabV3(Segmentation): 28 | def __init__(self, weights=DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1): 29 | self.model = deeplabv3_mobilenet_v3_large(weights) 30 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 31 | self.model.eval().to(device) 32 | self.weights = weights 33 | -------------------------------------------------------------------------------- /seahorse/perception/detection/instance_segmentation/visualize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import numpy as np 18 | import matplotlib.pyplot as plt 19 | 20 | import torchvision.transforms.functional as F 21 | 22 | 23 | plt.rcParams["savefig.bbox"] = 'tight' 24 | 25 | 26 | def show(imgs): 27 | if not isinstance(imgs, list): 28 | imgs = [imgs] 29 | fig, axs = plt.subplots(ncols=len(imgs), squeeze=False) 30 | for i, img in enumerate(imgs): 31 | img = img.detach() 32 | img = F.to_pil_image(img) 33 | axs[0, i].imshow(np.asarray(img)) 34 | axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) 35 | plt.show() 36 | -------------------------------------------------------------------------------- /seahorse/perception/visualizer/visualize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import numpy as np 22 | import matplotlib.pyplot as plt 23 | 24 | import torchvision.transforms.functional as F 25 | 26 | 27 | plt.rcParams["savefig.bbox"] = "tight" 28 | 29 | 30 | def show(imgs): 31 | if not isinstance(imgs, list): 32 | imgs = [imgs] 33 | fig, axs = plt.subplots(ncols=len(imgs), squeeze=False) 34 | for i, img in enumerate(imgs): 35 | img = img.detach() 36 | img = F.to_pil_image(img) 37 | axs[0, i].imshow(np.asarray(img)) 38 | axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) 39 | plt.show() 40 | -------------------------------------------------------------------------------- /seahorse/perception/detection/multitask/contracts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from dataclasses import dataclass 22 | from typing import List 23 | 24 | 25 | @dataclass 26 | class BoundingBox: 27 | x_min: int 28 | y_min: int 29 | x_max: int 30 | y_max: int 31 | 32 | 33 | @dataclass 34 | class DetectionResult: 35 | def __init__(self, bounding_box, class_id, label, score, color, mask=None): 36 | self.bounding_box = bounding_box 37 | self.class_id = class_id 38 | self.label = label 39 | self.score = float(score) 40 | self.color = color 41 | self.mask = mask 42 | 43 | 44 | DetectionResults = List[DetectionResult] 45 | -------------------------------------------------------------------------------- /seahorse/perception/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from PIL import Image 2 | from torchvision.io import read_video 3 | 4 | IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # image suffixes 5 | VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv', 'webm' # video suffixes 6 | 7 | class FileType(Enum): 8 | IMAGE = 1 9 | POINTCLOUD = 2 10 | VIDEO = 3 11 | ROSBAG = 4 12 | RECORD = 5 13 | 14 | 15 | def check_type(file_name): 16 | suffix = Path(file_name).suffix 17 | if suffix in IMG_FORMATS: 18 | return FileType.IMAGE 19 | elif suffix in ('pcd'): 20 | return FileType.POINTCLOUD 21 | elif suffix in VID_FORMATS: 22 | return FileType.VIDEO 23 | elif suffix in ('bag'): 24 | return FileType.ROSBAG 25 | elif suffix in ('record'): 26 | return FileType.RECORD 27 | else: 28 | raise TypeError(f'{suffix} not supported!') 29 | 30 | def read_image(img_file): 31 | input_image = Image.open(img_file) 32 | return input_image.convert("RGB") 33 | 34 | def read_video(video_file): 35 | frames, _, _ = read_video(video_file, output_format="TCHW") 36 | return frames 37 | 38 | def read_pointcloud(pointcloud_file): 39 | pass 40 | 41 | def read_bag(bag_file): 42 | pass 43 | 44 | def read_record(record_file): 45 | import cyber_record 46 | 47 | -------------------------------------------------------------------------------- /seahorse/perception/tools/mock_publish_image.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import time 18 | 19 | from modules.drivers.proto.sensor_image_pb2 import Image 20 | from pycyber import cyber 21 | 22 | from torchvision.io import read_image 23 | 24 | 25 | def create_image(): 26 | # img = read_image() 27 | msg = Image() 28 | # msg.data = img 29 | return msg 30 | 31 | 32 | def publish_image(): 33 | node = cyber.Node("mock_image") 34 | writer = node.create_writer("mock/image", Image, 5) 35 | 36 | g_count = 1 37 | while not cyber.is_shutdown(): 38 | msg = create_image() 39 | writer.write(msg) 40 | g_count = g_count + 1 41 | time.sleep(0.1) 42 | 43 | 44 | if __name__ == '__main__': 45 | cyber.init() 46 | publish_image() 47 | cyber.shutdown() 48 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/tracklet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import itertools 18 | from enum import Enum 19 | 20 | from object import Object 21 | 22 | class Status(Enum): 23 | LOST = 1 24 | HIT = 2 25 | 26 | 27 | class Tracklet: 28 | global_id = itertools.count() 29 | 30 | def __init__(self, detection = None) -> None: 31 | self.id = self.next_id() 32 | self.last_update_time = None 33 | self.objects = [] 34 | 35 | self.add(detection) 36 | 37 | def is_lost(self): 38 | pass 39 | 40 | def latest_obj(self): 41 | pass 42 | 43 | def add(self, detection): 44 | if detection is None: 45 | return 46 | pass 47 | 48 | def predict(self): 49 | pass 50 | 51 | def update(self): 52 | pass 53 | 54 | def next_id(self): 55 | return next(Tracklet.global_id) 56 | -------------------------------------------------------------------------------- /seahorse/perception/detection/traffic_light_detection/contracts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from dataclasses import dataclass 22 | from typing import List 23 | 24 | 25 | # It's good practice to define this here, even if it's identical 26 | # to the one in object_detection, to keep the module self-contained. 27 | @dataclass(frozen=True) 28 | class BoundingBox: 29 | """Defines an immutable bounding box (x1, y1, x2, y2).""" 30 | 31 | x1: int 32 | y1: int 33 | x2: int 34 | y2: int 35 | 36 | 37 | @dataclass(frozen=True) 38 | class TrafficLightResult: 39 | """Defines a standard, immutable traffic light detection result.""" 40 | 41 | bounding_box: BoundingBox 42 | state: str # e.g., 'red', 'yellow', 'green', 'unknown' 43 | score: float 44 | 45 | 46 | TrafficLightResults = List[TrafficLightResult] 47 | -------------------------------------------------------------------------------- /seahorse/utils/geometry/geometry.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | def _orientation(p, q, r) -> int: 19 | return (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y) 20 | 21 | def convex_hull(points) -> list: 22 | n = len(points) 23 | if n < 3: 24 | return [] 25 | 26 | l = 0 27 | for i in range(n): 28 | if points[l].x > points[i].x: 29 | l = i 30 | 31 | p = l 32 | hull = [] 33 | while(True): 34 | hull.append(points[p]) 35 | q = (p + 1) % n 36 | for r in range(n): 37 | if _orientation(points[p], points[q], points[r]) < 0: 38 | q = r 39 | p = q 40 | if p == l: 41 | break 42 | 43 | return hull 44 | 45 | 46 | def is_point_in_polygon(point, polygon): 47 | pass 48 | 49 | def clipping_polygon(): 50 | pass 51 | 52 | def rotating_calipers(): 53 | pass 54 | -------------------------------------------------------------------------------- /seahorse/perception/detection/lane_detection/detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from abc import ABC, abstractmethod 22 | import numpy as np 23 | import torch 24 | from .contracts import LaneResults 25 | 26 | 27 | class LaneDetectorBase(ABC): 28 | """Abstract Base Class for all lane detectors.""" 29 | 30 | def __init__(self, device: torch.device): 31 | self.device = device 32 | self.model = self._load_model() 33 | 34 | @abstractmethod 35 | def _load_model(self) -> torch.nn.Module: 36 | """Subclasses must implement this to load their specific model.""" 37 | raise NotImplementedError 38 | 39 | @abstractmethod 40 | def __call__(self, image: np.ndarray) -> LaneResults: 41 | """The main public method. Takes a numpy image and returns standardized results.""" 42 | raise NotImplementedError 43 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE/pull_request_template.md: -------------------------------------------------------------------------------- 1 | # Description 2 | 3 | Please include a summary of the changes and the related issue. Please also include relevant motivation and context. List any dependencies that are required for this change. 4 | 5 | Fixes # (issue) 6 | 7 | ## Type of change 8 | 9 | Please delete options that are not relevant. 10 | 11 | - [ ] Bug fix (non-breaking change which fixes an issue) 12 | - [ ] New feature (non-breaking change which adds functionality) 13 | - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) 14 | - [ ] This change requires a documentation update 15 | 16 | # How Has This Been Tested? 17 | 18 | Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration 19 | 20 | - [ ] Test A 21 | - [ ] Test B 22 | 23 | **Test Configuration**: 24 | * Firmware version: 25 | * Hardware: 26 | * Toolchain: 27 | * SDK: 28 | 29 | # Checklist: 30 | 31 | - [ ] My code follows the style guidelines of this project 32 | - [ ] I have performed a self-review of my code 33 | - [ ] I have commented my code, particularly in hard-to-understand areas 34 | - [ ] I have made corresponding changes to the documentation 35 | - [ ] My changes generate no new warnings 36 | - [ ] I have added tests that prove my fix is effective or that my feature works 37 | - [ ] New and existing unit tests pass locally with my changes 38 | - [ ] Any dependent changes have been merged and published in downstream modules 39 | -------------------------------------------------------------------------------- /seahorse/perception/detection/traffic_light_detection/detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from abc import ABC, abstractmethod 22 | import numpy as np 23 | import torch 24 | from .contracts import TrafficLightResults 25 | 26 | 27 | class TrafficLightDetectorBase(ABC): 28 | """Abstract Base Class for all traffic light detectors.""" 29 | 30 | def __init__(self, device: torch.device): 31 | self.device = device 32 | self.model = self._load_model() 33 | 34 | @abstractmethod 35 | def _load_model(self) -> torch.nn.Module: 36 | """Subclasses must implement this to load their specific model.""" 37 | raise NotImplementedError 38 | 39 | @abstractmethod 40 | def __call__(self, image: np.ndarray) -> TrafficLightResults: 41 | """The main public method. Takes a numpy image and returns standardized results.""" 42 | raise NotImplementedError 43 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/affinity/distance.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import numpy as np 18 | 19 | 20 | def euclidean(u, v): 21 | return np.sqrt(squared_euclidean(u, v)) 22 | 23 | def squared_euclidean(u, v): 24 | return np.sum(np.square(u - v)) 25 | 26 | def standardized_euclidian(u, v): 27 | pass 28 | 29 | def manhattan(u, v): 30 | pass 31 | 32 | def canberra(u, v): 33 | pass 34 | 35 | def chebyshev(u, v): 36 | pass 37 | 38 | def minkowski(u, v): 39 | pass 40 | 41 | def cosine(u, v): 42 | pass 43 | 44 | def pearson_correlation(u, v): 45 | pass 46 | 47 | def spearman_correlation(u, v): 48 | pass 49 | 50 | def mahalanobis(u, v): 51 | pass 52 | 53 | def chi_square(u, v): 54 | pass 55 | 56 | def jensen_shannon(u, v): 57 | pass 58 | 59 | def levenshtein(u, v): 60 | pass 61 | 62 | def hamming(u, v): 63 | pass 64 | 65 | def jaccard(u, v): 66 | pass 67 | 68 | def sorensen_dice(u, v): 69 | pass 70 | 71 | def bray_curtis(u, v): 72 | pass 73 | -------------------------------------------------------------------------------- /seahorse/perception/detection/keypoint_detection/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from keypoint_rcnn import KeypointRCNN 20 | from seahorse.perception.visualize.visualize import show 21 | 22 | from torchvision.io import read_image 23 | from torchvision.utils import draw_keypoints 24 | from torchvision.transforms.functional import convert_image_dtype 25 | 26 | 27 | def test_keypoint(img_file): 28 | keypoint_rcnn = KeypointRCNN() 29 | img = read_image(img_file) 30 | print(img.shape) 31 | outputs = keypoint_rcnn.detect([convert_image_dtype(img)]) 32 | 33 | print(outputs) 34 | kpts = outputs[0]['keypoints'] 35 | scores = outputs[0]['scores'] 36 | detect_threshold = 0.2 37 | idx = torch.where(scores > detect_threshold) 38 | keypoints = kpts[idx] 39 | res = draw_keypoints(img, keypoints, colors="blue", radius=4) 40 | show(res) 41 | 42 | 43 | if __name__ == "__main__": 44 | img_file = "data/FudanPed00001.png" 45 | test_keypoint(img_file) 46 | -------------------------------------------------------------------------------- /seahorse/perception/detection/lane_detection/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from typing import Dict, Any 22 | import torch 23 | 24 | from .detector import LaneDetectorBase 25 | from .spline_detector import SplineLaneDetector 26 | from .contracts import LaneLine, LaneResults 27 | 28 | 29 | def build_lane_detector(config: Dict[str, Any]) -> LaneDetectorBase: 30 | """Factory function to build and return a lane detector instance.""" 31 | backend = config.get("backend", "spline") # Default to mock if not specified 32 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 33 | print( 34 | f"INFO: Building lane detector with backend '{backend}' on device '{device}'..." 35 | ) 36 | 37 | if backend == "spline": 38 | return SplineLaneDetector(device=device) 39 | else: 40 | raise ValueError(f"Unknown lane detector backend: '{backend}'") 41 | 42 | 43 | __all__ = ["build_lane_detector", "LaneDetectorBase", "LaneLine", "LaneResults"] 44 | -------------------------------------------------------------------------------- /seahorse/perception/data/dataloader.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | 19 | class DataLoader(): 20 | def __init__(self, source, filter, associater): 21 | pass 22 | 23 | def read_unimodal(self, path): 24 | if file_type is : 25 | data = read_image(path) 26 | elif file_type is : 27 | data = read_video(path) 28 | elif file_type is : 29 | data = read_pointcloud(path) 30 | elif file_type is : 31 | data = read_bag(path) 32 | elif file_type is : 33 | data = read_record(path) 34 | 35 | def read_multimodal(self, path): 36 | if file_type is : 37 | read_multi_data(pointclouds, images, ) 38 | elif file_type is : 39 | read_bag(path) 40 | elif file_type is : 41 | read_record(path) 42 | 43 | def __iter__(self): 44 | self.count = 0 45 | return self 46 | 47 | def __next__(self): 48 | for path, file_type in source: 49 | if (filter(file_type)): 50 | read_unimodal() 51 | 52 | -------------------------------------------------------------------------------- /seahorse/perception/detection/instance_segmentation/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | from mask_rcnn import MaskRCNN 19 | from visualize import show 20 | 21 | from torchvision.io import read_image 22 | from torchvision.utils import draw_segmentation_masks 23 | from torchvision.transforms.functional import convert_image_dtype 24 | 25 | 26 | def test_instance_segmentation(img_file): 27 | mask_rcnn = MaskRCNN() 28 | img = read_image(img_file) 29 | print(img.shape) 30 | outputs = mask_rcnn.detect([convert_image_dtype(img)]) 31 | 32 | output = outputs[0] 33 | 34 | proba_threshold = 0.5 35 | output_bool_masks = output['masks'] > proba_threshold 36 | print(f"shape = {output_bool_masks.shape}, dtype = {output_bool_masks.dtype}") 37 | 38 | # There's an extra dimension (1) to the masks. We need to remove it 39 | output_bool_masks = output_bool_masks.squeeze(1) 40 | 41 | show(draw_segmentation_masks(img, output_bool_masks, alpha=0.9)) 42 | 43 | 44 | if __name__ == "__main__": 45 | img_file = "data/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151610512404.jpg" 46 | test_instance_segmentation(img_file) 47 | -------------------------------------------------------------------------------- /seahorse/perception/tracking/tracker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from tracklet import Tracklet 18 | from similarity.similarity import iou 19 | from data_associate.bipartite_graph_match import BipartiteGraphMatch 20 | 21 | class Tracker: 22 | def __init__(self) -> None: 23 | self.tracklets = dict() 24 | self.associator = BipartiteGraphMatch() 25 | 26 | def track(self, detections) -> list: 27 | costs = distance(self.tracklets, detections) 28 | self.calc_similarity(self.tracklets, detections) 29 | match_pairs, unmatch_tracklets, unmatch_detections = \ 30 | self.associator.associate(costs) 31 | 32 | self.del_tracklet(unmatch_tracklets) 33 | self.add_tracklet(unmatch_detections) 34 | for tracklet, detection in match_pairs: 35 | tracklet.add(detection) 36 | 37 | def add_tracklet(self, unmatch_detections): 38 | for detection in unmatch_detections: 39 | tracklet = Tracklet(detection) 40 | self.tracklets.update((tracklet.id, tracklet)) 41 | 42 | def del_tracklet(self, unmatch_tracklets): 43 | for tracklet in unmatch_tracklets: 44 | if tracklet.is_lost(): 45 | self.tracklets.pop(tracklet.id) 46 | 47 | def calc_similarity(self, src, dst): 48 | iou(src, dst) 49 | -------------------------------------------------------------------------------- /seahorse/perception/detection/multitask/base_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from abc import ABC, abstractmethod 22 | import numpy as np 23 | import torch 24 | from .contracts import DetectionResults 25 | 26 | 27 | class ObjectDetector(ABC): 28 | """ 29 | The Abstract Base Class for all object detectors. 30 | 31 | It defines the public interface (`__call__`) that all concrete 'backend' 32 | implementations must follow. 33 | """ 34 | 35 | def __init__(self, device: torch.device): 36 | self.device = device 37 | # The model is loaded by the concrete subclass. 38 | self.model = self._load_model() 39 | 40 | @abstractmethod 41 | def _load_model(self) -> torch.nn.Module: 42 | """Subclasses must implement this to load their specific model.""" 43 | raise NotImplementedError 44 | 45 | @abstractmethod 46 | def __call__(self, image: np.ndarray, *args, **kwargs) -> DetectionResults: 47 | """ 48 | The main public method. Takes a numpy image and returns standardized results. 49 | Subclasses must implement their entire pipeline here for maximum flexibility. 50 | """ 51 | raise NotImplementedError 52 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/base_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from abc import ABC, abstractmethod 22 | import numpy as np 23 | import torch 24 | from .contracts import DetectionResults 25 | 26 | 27 | class ObjectDetector(ABC): 28 | """ 29 | The Abstract Base Class for all object detectors. 30 | 31 | It defines the public interface (`__call__`) that all concrete 'backend' 32 | implementations must follow. 33 | """ 34 | 35 | def __init__(self, device: torch.device): 36 | self.device = device 37 | # The model is loaded by the concrete subclass. 38 | self.model = self._load_model() 39 | 40 | @abstractmethod 41 | def _load_model(self) -> torch.nn.Module: 42 | """Subclasses must implement this to load their specific model.""" 43 | raise NotImplementedError 44 | 45 | @abstractmethod 46 | def __call__(self, image: np.ndarray, *args, **kwargs) -> DetectionResults: 47 | """ 48 | The main public method. Takes a numpy image and returns standardized results. 49 | Subclasses must implement their entire pipeline here for maximum flexibility. 50 | """ 51 | raise NotImplementedError 52 | -------------------------------------------------------------------------------- /seahorse/perception/io/stream_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | from source import Source 21 | 22 | 23 | class StreamSource(Source): 24 | """read video stream 25 | 26 | Args: 27 | Source (_type_): _description_ 28 | """ 29 | 30 | def __init__(self, url): 31 | pass 32 | 33 | def __iter__(self): 34 | return self 35 | 36 | def __next__(self): 37 | pass 38 | 39 | 40 | class VideoSource: 41 | """Iterator for reading frames from a video file""" 42 | 43 | def __init__(self, video_path: str): 44 | self.cap = cv2.VideoCapture(video_path) 45 | if not self.cap.isOpened(): 46 | raise IOError(f"Cannot open video file: {video_path}") 47 | self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 48 | self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 49 | self.fps = self.cap.get(cv2.CAP_PROP_FPS) 50 | 51 | def __iter__(self) -> Iterator[np.ndarray]: 52 | return self 53 | 54 | def __next__(self) -> np.ndarray: 55 | ret, frame = self.cap.read() 56 | if not ret: 57 | raise StopIteration 58 | return frame 59 | 60 | def release(self): 61 | """Release video capture object""" 62 | self.cap.release() 63 | -------------------------------------------------------------------------------- /seahorse/perception/detection/traffic_light_detection/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from typing import Dict, Any 22 | import torch 23 | 24 | from .detector import TrafficLightDetectorBase 25 | from .mock_detector import MockTrafficLightDetector 26 | from .contracts import TrafficLightResult, TrafficLightResults, BoundingBox 27 | 28 | 29 | def build_traffic_light_detector(config: Dict[str, Any]) -> TrafficLightDetectorBase: 30 | """Factory function to build and return a traffic light detector instance.""" 31 | backend = config.get("backend", "mock") # Default to mock if not specified 32 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 33 | print( 34 | f"INFO: Building traffic light detector with backend '{backend}' on device '{device}'..." 35 | ) 36 | 37 | if backend == "mock": 38 | return MockTrafficLightDetector(device=device) 39 | # When you have a real model, you'll add its backend here: 40 | # elif backend == "real_classifier": 41 | # return RealTrafficLightDetector(...) 42 | else: 43 | raise ValueError(f"Unknown traffic light detector backend: '{backend}'") 44 | 45 | 46 | __all__ = [ 47 | "build_traffic_light_detector", 48 | "TrafficLightDetectorBase", 49 | "TrafficLightResult", 50 | "TrafficLightResults", 51 | "BoundingBox", 52 | ] 53 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from typing import Dict, Any 22 | import torch 23 | 24 | from .detector import Detector 25 | from .base_detector import ObjectDetector 26 | from .contracts import BoundingBox, DetectionResult, DetectionResults 27 | 28 | 29 | def build_object_detector(config: Dict[str, Any]) -> ObjectDetector: 30 | """ 31 | Factory function to build and return an object detector instance. 32 | It intelligently routes the config to the correct backend detector. 33 | """ 34 | backend = config.get("backend") 35 | if not backend: 36 | raise ValueError( 37 | "Detector config must specify a 'backend' ('vision' or 'yolo')." 38 | ) 39 | 40 | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") 41 | print( 42 | f"INFO: Building object detector with backend '{backend}' on device '{device}'..." 43 | ) 44 | 45 | if backend == "vision": 46 | return Detector( 47 | model_name=config["model_name"], 48 | weights_name=config["weights_name"], 49 | score_thresh=config.get("score_thresh", 0.7), 50 | device=device, 51 | ) 52 | else: 53 | raise ValueError(f"Unknown object detector backend: '{backend}'") 54 | 55 | 56 | __all__ = [ 57 | "build_object_detector", 58 | "ObjectDetector", 59 | "DetectionResult", 60 | "DetectionResults", 61 | "BoundingBox", 62 | "draw_detections", 63 | ] 64 | -------------------------------------------------------------------------------- /seahorse/perception/image_detection.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | from modules.drivers.proto.sensor_image_pb2 import Image 22 | from pycyber import cyber 23 | 24 | from torchvision.transforms.functional import convert_image_dtype 25 | 26 | from detection.faster_rcnn import FasterRCNN 27 | 28 | 29 | class ImageDetection: 30 | def __init__(self, subscribe_topic, publish_topic, need_visualize=False) -> None: 31 | self.name = "ImageDetection" 32 | self.subscribe_topic = subscribe_topic 33 | self.publish_topic = publish_topic 34 | self.need_visualize = need_visualize 35 | self.detector = FasterRCNN() 36 | 37 | def start(self): 38 | self._node = cyber.Node(self.name) 39 | self._reader = self._node.create_reader( 40 | self.subscribe_topic, Image, self.callback 41 | ) 42 | self._writer = self._node.create_writer(self.publish_topic, Image, 1) 43 | self._node.spin() 44 | 45 | def callback(self, img): 46 | self.preprocess(img) 47 | outputs = self.detector.detect([convert_image_dtype(img)]) 48 | self.postprocess(outputs) 49 | if self.need_visualize: 50 | self.visualize() 51 | self._writer.write(outputs) 52 | 53 | def preprocess(self, img): 54 | pass 55 | 56 | def postprocess(self, outputs): 57 | pass 58 | 59 | def visualize(self): 60 | pass 61 | 62 | 63 | if __name__ == "__main__": 64 | cyber.init() 65 | image_detection = ImageDetection() 66 | image_detection.start() 67 | cyber.shutdown() 68 | -------------------------------------------------------------------------------- /seahorse/perception/io/screenshot_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from source import Source 18 | 19 | 20 | class ScreenshotSource(Source): 21 | def __init__(self, source, imgsz=640): 22 | # check_requirements('mss') 23 | import mss 24 | 25 | source, *params = source.split() 26 | self.screen, left, top, width, height = 0, None, None, None, None 27 | 28 | if len(params) == 1: 29 | self.screen = int(params[0]) 30 | elif len(params) == 4: 31 | left, top, width, height = (int(x) for x in params) 32 | elif len(params) == 5: 33 | self.screen, left, top, width, height = (int(x) for x in params) 34 | 35 | self.imgsz = imgsz 36 | self.mode = 'stream' 37 | self.frame = 0 38 | self.sct = mss.mss() 39 | self.bs = 1 40 | 41 | monitor = self.sct.monitors[self.screen] 42 | self.top = monitor['top'] if top is None else (monitor['top'] + top) 43 | self.left = monitor['left'] if left is None else (monitor['left'] + left) 44 | self.width = width or monitor['width'] 45 | self.height = height or monitor['height'] 46 | self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} 47 | 48 | 49 | def __iter__(self): 50 | return self 51 | 52 | def __next__(self): 53 | im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] 54 | s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' 55 | 56 | self.frame += 1 57 | return [str(self.screen)], [im0], None, s # screen, img, vid_cap, string 58 | -------------------------------------------------------------------------------- /seahorse/perception/segmentation/semantic_segmentation/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import torch 18 | 19 | from deep_lab_v3 import DeepLabV3 20 | from fcn import FCN 21 | from lraspp import LRASPP 22 | from visualize import show 23 | 24 | from PIL import Image 25 | from torchvision.utils import draw_segmentation_masks 26 | from torchvision import transforms 27 | from torchvision.transforms.functional import to_pil_image, pil_to_tensor 28 | 29 | def test_deep_model(deep_lab_v3, img_file): 30 | input_image = Image.open(img_file) 31 | input_image = input_image.convert("RGB") 32 | 33 | preprocess = transforms.Compose([ 34 | transforms.ToTensor(), 35 | ]) 36 | 37 | input_tensor = preprocess(input_image) 38 | input_batch = input_tensor.unsqueeze(0) 39 | outputs = deep_lab_v3.detect(input_batch) 40 | output = outputs['out'] 41 | # print(output) 42 | 43 | normalized_masks = output.softmax(dim=1) 44 | class_to_idx = {cls: idx for (idx, cls) in enumerate(deep_lab_v3.weights.meta["categories"])} 45 | print(class_to_idx) 46 | mask = normalized_masks[0] 47 | num_classes = normalized_masks.shape[1] 48 | class_dim = 0 49 | all_classes_masks = mask.argmax(class_dim) == torch.arange(num_classes)[:, None, None] 50 | 51 | show(draw_segmentation_masks(pil_to_tensor(input_image), masks=all_classes_masks, alpha=0.7)) 52 | 53 | 54 | if __name__ == "__main__": 55 | img_file = "data/n008-2018-08-01-15-16-36-0400__CAM_FRONT__1533151610512404.jpg" 56 | # deep_lab_v3 = DeepLabV3() 57 | # test_deep_model(deep_lab_v3, img_file) 58 | 59 | fcn = FCN() 60 | test_deep_model(fcn, img_file) 61 | 62 | # lraspp = LRASPP() 63 | # test_deep_model(lraspp, img_file) 64 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 |

4 | 5 |

6 | 7 | [English](README.md) | [中文](README.zh-cn.md) | [한국어](README.ko.md) | [日本語](README.ja.md) 8 | 9 |

10 | 11 | 12 | 13 | 14 | Documentation Status 15 | 16 | 17 | 18 | 19 |

20 | 21 | A pure python autonomous driving framework 22 |
23 | 24 |
25 | 26 | ## Related works 27 | 28 | - [PythonRobotics](https://github.com/AtsushiSakai/PythonRobotics) Python codes for robotics algorithm 29 | - [Awesome Robotic Tooling](https://github.com/protontypes/awesome-robotic-tooling) A curated list of tooling for professional robotic development 30 | 31 | ## Online Practice 32 | 33 | - [Traffic light detection](https://huggingface.co/spaces/daohu527/seahorse) 34 | - [Planning](https://mattbradley.github.io/dash/) 35 | 36 | ## Design 37 | 38 | You might be surprised that this autonomous driving system was developed in python. I know the speed is slow and can't guarantee real time, but its benefits are also obvious. It's easy to add features, verify algorithms, structure data, visualize and so on. 39 | 40 | So this is probably the best project for **teaching and research** in the field of autonomous driving. Although this is a bit of an overstatement, we still want it to be the matlab of autonomous driving. 41 | 42 | We call it the `seahorse`, small and delicate. Enjoy it and action now!!! 43 | 44 | ## Architecture 45 | 46 | The following is the software block diagram of seahorse, which integrates various algorithm libraries for autonomous driving and supports a variety of middleware through the wrappar layer. At the same time, it also provides a rich API interface to connect to the simulation system. 47 | 48 | We hope it will be an autonomous driving system with rich algorithm support, simple use, and standard interfaces. 49 | 50 | 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /seahorse/cli.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | import sys 18 | import argparse 19 | 20 | from perception.detection import object_detection 21 | 22 | TASKS = { 23 | "instance_segmentation", 24 | "keypoint_detection", 25 | "object_detection", 26 | "semantic_segmentation" 27 | } 28 | 29 | TASK2FUNC = { 30 | "instance_segmentation": instance_segmentation, 31 | "keypoint_detection": keypoint_detection, 32 | "object_detection": object_detection, 33 | "semantic_segmentation": semantic_segmentation, 34 | } 35 | 36 | def dispatch_task(task, args): 37 | return TASK2FUNC[task](args) 38 | 39 | 40 | def main(args=sys.argv): 41 | parser = argparse.ArgumentParser( 42 | description="A pure python autonomous driving framework", 43 | prog="command.py") 44 | 45 | parser.add_argument( 46 | "-m", "--model", action="store", type=str, required=False, 47 | default="", help="") 48 | parser.add_argument( 49 | "-s", "--source", action="store", type=str, required=False, 50 | default="", help="") 51 | 52 | # check env 53 | parser.add_argument( 54 | "--checks", action="store", type=str, required=False, 55 | default="", help="") 56 | # version 57 | parser.add_argument( 58 | "-v", "--version", action="store", type=str, required=False, 59 | default="", help="") 60 | # config 61 | parser.add_argument( 62 | "--settings", action="store", type=str, required=False, 63 | default="", help="") 64 | parser.add_argument( 65 | "--cfg", action="store", type=str, required=False, 66 | default="", help="") 67 | 68 | # Task type 69 | task = args[1] 70 | if task not in TASKS: 71 | raise "task {} not support!".format(task) 72 | 73 | # parameters 74 | args = parser.parse_args(args[2:]) 75 | dispatch_task(task, args) 76 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/yolo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import torch 22 | import numpy as np 23 | from ultralytics import YOLO 24 | 25 | from .base_detector import ObjectDetector 26 | from .contracts import BoundingBox, DetectionResult, DetectionResults 27 | 28 | 29 | class YOLODetector(ObjectDetector): 30 | """A concrete detector for YOLO models via the 'ultralytics' library.""" 31 | 32 | def __init__(self, model_path: str, device: torch.device = torch.device("cuda")): 33 | self.model_path = model_path 34 | super().__init__(device) 35 | 36 | def _load_model(self) -> YOLO: 37 | """Loads a YOLO model from a given path (e.g., 'yolov8n.pt').""" 38 | print(f"INFO: Loading ultralytics YOLO model '{self.model_path}'...") 39 | return YOLO(self.model_path) 40 | 41 | def __call__(self, image: np.ndarray, verbose: bool = False) -> DetectionResults: 42 | """Overrides the base call to use the library's optimized `predict` method.""" 43 | # The ultralytics library handles preprocessing, inference, and NMS internally. 44 | # It expects BGR numpy arrays. 45 | results = self.model.predict(image, device=self.device, verbose=verbose) 46 | 47 | processed_results = [] 48 | for res in results: 49 | boxes = res.boxes 50 | names = res.names 51 | for box in boxes: 52 | (x1, y1, x2, y2) = box.xyxy[0].int().tolist() 53 | class_id = int(box.cls) 54 | processed_results.append( 55 | DetectionResult( 56 | bounding_box=BoundingBox(x1, y1, x2, y2), 57 | class_id=class_id, 58 | label=names[class_id], 59 | score=float(box.conf), 60 | ) 61 | ) 62 | return processed_results 63 | -------------------------------------------------------------------------------- /seahorse/perception/detection/traffic_light_detection/traffic_light.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import numpy as np 22 | import torch 23 | import random 24 | import time 25 | from .detector import TrafficLightDetectorBase 26 | from .contracts import BoundingBox, TrafficLightResult, TrafficLightResults 27 | 28 | 29 | class TrafficLightDetector(TrafficLightDetectorBase): 30 | """ 31 | A mock detector that simulates traffic light detection for visualization. 32 | It simulates a single traffic light at a fixed position and cycles its state. 33 | """ 34 | 35 | def __init__(self, device: torch.device): 36 | super().__init__(device) 37 | self.possible_states = ["green", "green", "yellow", "red", "red"] 38 | self.last_change_time = time.time() 39 | self.current_state = "green" 40 | self.cycle_interval = random.uniform(4, 8) # Change state every 4-8 seconds 41 | 42 | def _load_model(self) -> torch.nn.Module: 43 | """No model to load for a mock detector. Returns None.""" 44 | print("INFO: Initializing TrafficLightDetector (no model loaded).") 45 | return None 46 | 47 | def __call__(self, image: np.ndarray) -> TrafficLightResults: 48 | """ 49 | Simulates finding one traffic light and cycling its state every few seconds. 50 | """ 51 | h, w, _ = image.shape 52 | 53 | # Check if it's time to cycle the traffic light state 54 | if time.time() - self.last_change_time > self.cycle_interval: 55 | self.current_state = random.choice(self.possible_states) 56 | self.last_change_time = time.time() 57 | self.cycle_interval = random.uniform(4, 8) 58 | print(f"INFO: Mock traffic light changed to '{self.current_state.upper()}'") 59 | 60 | # Define a fixed, plausible position for the mock traffic light 61 | box = BoundingBox(int(w * 0.75), int(h * 0.05), int(w * 0.8), int(h * 0.25)) 62 | 63 | return [ 64 | TrafficLightResult(bounding_box=box, state=self.current_state, score=0.99) 65 | ] 66 | -------------------------------------------------------------------------------- /seahorse/perception/core/runner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import time 22 | import cv2 23 | from typing import Dict 24 | 25 | from io.source import build_source 26 | from detection import build_object_detector 27 | from visualizer import Visualizer 28 | 29 | 30 | class Runner: 31 | """The task flow orchestrator is responsible for the entire perception-visualization process.""" 32 | 33 | def __init__(self, config: Dict): 34 | self.cfg = config 35 | 36 | print("INFO: Initializing system components...") 37 | # Runner responsible for building all components 38 | self.source = build_source(config["source"]) 39 | self.visualizer = Visualizer(config["visualize"]) 40 | 41 | # Detectors are built using factory functions and are entirely configuration-driven. 42 | self.object_detector = build_object_detector( 43 | config["models"]["object_detector"] 44 | ) 45 | 46 | print("INFO: System initialized successfully.") 47 | 48 | def run(self): 49 | """ 50 | This is the correct location of the loop logic in the original object_detection() function. 51 | """ 52 | prev_time = time.time() 53 | 54 | # 1. Retrieve data from the data source 55 | for frame in self.source: 56 | current_time = time.time() 57 | fps = 1 / (current_time - prev_time) 58 | prev_time = current_time 59 | 60 | results = {"fps": fps} 61 | 62 | # 2. Call the model for inference (using __call__). 63 | detection_results = self.object_detector(frame) 64 | results["objects"] = detection_results 65 | 66 | # 3. The results are then visualized using a visualizer. 67 | if self.cfg["visualize"]["enable"]: 68 | vis_frame = self.visualizer.draw(frame, results) 69 | cv2.imshow(self.cfg["visualize"]["window_name"], vis_frame) 70 | 71 | if cv2.waitKey(1) & 0xFF == ord("q"): 72 | break 73 | 74 | self.cleanup() 75 | 76 | def cleanup(self): 77 | pass 78 | -------------------------------------------------------------------------------- /seahorse/perception/io/file_source.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2023 daohu527 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | from source import Source 18 | 19 | from pathlib import Path 20 | from enum import Enum 21 | 22 | 23 | def read_url(url): 24 | data = requests.get(url, stream=True).raw 25 | scheme = urlparse(src).scheme 26 | if scheme in ("http", "https"): 27 | pass 28 | elif scheme in ("rtps"): 29 | pass 30 | 31 | 32 | class FileSource(Source): 33 | """Read any type of file. Image, POINTCLOUD, VIDEO, Rosbag, Record 34 | 35 | Args: 36 | Source (_type_): _description_ 37 | """ 38 | def __init__(self, path, filter): 39 | # read path from text 40 | if isinstance(path, str) and Path(path).suffix == '.txt': 41 | path = Path(path).read_text().splitlines() 42 | 43 | # read files from path 44 | files = [] 45 | for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: 46 | a = str(Path(p).absolute()) 47 | p = Path(p) 48 | if '*' in a: 49 | # Specify file type, like '*.jpg' 50 | files.extend(sorted(glob.glob(a, recursive=True))) 51 | elif p.is_dir(): 52 | # Find all types use '*.*' 53 | files.extend(sorted(glob.glob(os.path.join(a, '*.*')))) 54 | elif p.is_file(): 55 | files.append(p) 56 | else: 57 | raise FileNotFoundError(f'{p} does not exist!') 58 | 59 | self.files = files 60 | 61 | # filter and sort 62 | images = [] 63 | videos = [] 64 | pointclouds = [] 65 | bags = [] 66 | records = [] 67 | for f in files: 68 | file_type = check_type(f) 69 | if file_type is FileType.IMAGE: 70 | images.append(f) 71 | elif file_type is FileType.VIDEO: 72 | videos.append(f) 73 | elif file_type is FileType.POINTCLOUD: 74 | pointclouds.append(f) 75 | elif file_type is FileType.ROSBAG: 76 | bags.append(f) 77 | elif file_type is FileType.RECORD: 78 | records.append(f) 79 | 80 | 81 | def __iter__(self): 82 | return self 83 | 84 | 85 | def __next__(self): 86 | for f in self.files: 87 | yield f, check_type(f) 88 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | // —————————————————————— 3 | // Formatting rules 4 | // —————————————————————— 5 | "editor.formatOnSave": true, 6 | "editor.formatOnPaste": true, 7 | "editor.codeActionsOnSave": { 8 | "source.fixAll": "explicit", 9 | "source.fixAll.eslint": "explicit", 10 | "source.organizeImports": "explicit" 11 | }, 12 | "files.trimTrailingWhitespace": true, 13 | "files.insertFinalNewline": true, 14 | "files.eol": "\n", 15 | // —————————————————————— 16 | // Global Editor Experience (default fallback) 17 | // —————————————————————— 18 | "editor.tabSize": 2, 19 | "editor.detectIndentation": false, 20 | "editor.rulers": [ 21 | 80, 22 | 120 23 | ], 24 | "editor.minimap.enabled": false, 25 | "editor.bracketPairColorization.enabled": true, 26 | "editor.guides.bracketPairs": "active", 27 | "editor.renderWhitespace": "boundary", 28 | "editor.cursorSmoothCaretAnimation": "on", 29 | "editor.smoothScrolling": true, 30 | "editor.fontSize": 14, 31 | // —————————————————————— 32 | // File exclusions 33 | // —————————————————————— 34 | "files.exclude": { 35 | "**/.DS_Store": true, 36 | "**/node_modules": true, 37 | "**/dist": true, 38 | "**/__pycache__": true 39 | }, 40 | "search.exclude": { 41 | "**/node_modules": true, 42 | "**/dist": true, 43 | "**/build": true 44 | }, 45 | // —————————————————————— 46 | // Language-specific settings 47 | // —————————————————————— 48 | // JavaScript 49 | "[javascript]": { 50 | "editor.defaultFormatter": "esbenp.prettier-vscode", 51 | "editor.rulers": [ 52 | 80, 53 | 120 54 | ] 55 | }, 56 | // TypeScript 57 | "[typescript]": { 58 | "editor.defaultFormatter": "esbenp.prettier-vscode", 59 | "editor.rulers": [ 60 | 80, 61 | 120 62 | ] 63 | }, 64 | // JSON 65 | "[json]": { 66 | "editor.defaultFormatter": "esbenp.prettier-vscode", 67 | "editor.tabSize": 2 68 | }, 69 | // Markdown 70 | "[markdown]": { 71 | "editor.wordWrap": "on", 72 | "editor.quickSuggestions": { 73 | "comments": "on", 74 | "strings": "on", 75 | "other": "on" 76 | } 77 | }, 78 | // Python (Black uses 88) 79 | "[python]": { 80 | "editor.defaultFormatter": "ms-python.black-formatter", 81 | "editor.formatOnSave": true, 82 | "editor.rulers": [ 83 | 88 84 | ] 85 | }, 86 | // C++ (Google style, 80 char limit) 87 | "[cpp]": { 88 | "editor.defaultFormatter": "xaver.clang-format", 89 | "editor.rulers": [ 90 | 80 91 | ] 92 | }, 93 | // —————————————————————— 94 | // ESLint 95 | // —————————————————————— 96 | "eslint.validate": [ 97 | "javascript", 98 | "javascriptreact", 99 | "typescript", 100 | "typescriptreact" 101 | ], 102 | // —————————————————————— 103 | // Terminal 104 | // —————————————————————— 105 | "terminal.integrated.defaultProfile.linux": "bash", 106 | "terminal.integrated.defaultProfile.windows": "PowerShell", 107 | // —————————————————————— 108 | // TypeScript Import Handling 109 | // —————————————————————— 110 | "typescript.updateImportsOnFileMove.enabled": "always" 111 | } 112 | -------------------------------------------------------------------------------- /seahorse/perception/detection/lane_detection/test_lane.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import os 22 | import numpy as np 23 | from PIL import Image, ImageDraw 24 | 25 | from .spline_detector import SplineLaneDetector 26 | 27 | if __name__ == "__main__": 28 | model_file = "deeplabv3_resnet101_scripted.pt" 29 | image_file = "image.png" 30 | 31 | # 1. Check if the required file exists. 32 | if not os.path.exists(model_file): 33 | print(f"Error: Model file '{model_file}' not found.") 34 | print( 35 | "Please run the export script first to convert the .pt file to TorchScript format." 36 | ) 37 | elif not os.path.exists(image_file): 38 | print(f"Error: Image file '{image_file}' not found.") 39 | print( 40 | "Please download the zidane.jpg file: wget https://ultralytics.com/images/zidane.jpg" 41 | ) 42 | else: 43 | # 2. Instantiate Detector 44 | print("Instantiating detector...") 45 | lane_detector = SplineLaneDetector(model_path=model_file) 46 | print("Detector instantiated successfully.") 47 | 48 | # 3. Load Image 49 | print(f"Loading image '{image_file}'...") 50 | pil_image = Image.open(image_file).convert("RGB") 51 | # The detector requires a BGR NumPy array 52 | bgr_numpy_image = np.array(pil_image)[:, :, ::-1] 53 | 54 | # --- 5. Perform lane detection --- 55 | print("Performing lane detection...") 56 | lanes = lane_detector(bgr_numpy_image) 57 | print( 58 | f"Detection completed. {len(lanes)} lane/road contours extracted successfully!" 59 | ) 60 | 61 | # --- 6. Visualize results --- 62 | print("Generating final visualization...") 63 | viz_image = pil_image.copy() 64 | draw = ImageDraw.Draw(viz_image) 65 | 66 | if lanes: 67 | for lane_contour in lanes: 68 | # Convert (N, 2) contour points to format required by ImageDraw.line 69 | points_list = lane_contour.flatten().tolist() 70 | draw.line(points_list, fill=(255, 0, 255), width=4) # Draw in magenta 71 | else: 72 | print("Warning: No lanes detected in the image.") 73 | 74 | # --- 7. Save the final result --- 75 | output_filename = "torchvision_lane_output.png" 76 | viz_image.save(output_filename) 77 | print(f"\nLane detection result successfully saved as '{output_filename}'") 78 | print( 79 | "Please check the output image — you should now see the road contours detected by DeepLabV3." 80 | ) 81 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/test_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import os 22 | import argparse 23 | import numpy as np 24 | from PIL import Image, ImageDraw, ImageFont 25 | 26 | from .detector import Detector 27 | 28 | 29 | def main(): 30 | parser = argparse.ArgumentParser(description="YOLO TorchScript Detection Example") 31 | parser.add_argument( 32 | "--model", 33 | type=str, 34 | default="yolo11n.torchscript", 35 | help="Path to the TorchScript model file (default: yolo11n.torchscript)", 36 | ) 37 | parser.add_argument( 38 | "--image", type=str, required=True, help="Path to the image file to detect" 39 | ) 40 | args = parser.parse_args() 41 | 42 | model_file = args.model 43 | image_file = args.image 44 | 45 | # 1. Check if the required files exist 46 | if not os.path.exists(model_file): 47 | print(f"Error: Model file '{model_file}' not found.") 48 | return 49 | if not os.path.exists(image_file): 50 | print(f"Error: Image file '{image_file}' not found.") 51 | return 52 | 53 | # 2. Instantiate the detector 54 | print("Instantiating detector...") 55 | yolo_detector = Detector(model_path=model_file) 56 | print("Detector instantiated successfully.") 57 | 58 | # 3. Read image and perform inference 59 | print(f"Reading image '{image_file}'...") 60 | pil_image = Image.open(image_file).convert("RGB") 61 | rgb_numpy_image = np.array(pil_image) 62 | bgr_numpy_image = rgb_numpy_image[:, :, ::-1].copy() 63 | 64 | print("Performing object detection inference...") 65 | detections = yolo_detector(bgr_numpy_image) 66 | print("Inference completed.") 67 | 68 | # 4. Print and visualize results 69 | print("\n--- Detection Results ---") 70 | if detections: 71 | draw = ImageDraw.Draw(pil_image) 72 | 73 | try: 74 | font = ImageFont.truetype("arial.ttf", 20) 75 | except IOError: 76 | font = ImageFont.load_default() 77 | 78 | for det in detections: 79 | print(det) 80 | b = det.bounding_box 81 | color_rgb = tuple(det.color) 82 | 83 | # Draw bounding box 84 | box_coords = [b.x1, b.y1, b.x2, b.y2] 85 | draw.rectangle(box_coords, outline=color_rgb, width=3) 86 | 87 | label = f"{det.label}: {det.score:.2f}" 88 | 89 | text_bbox = draw.textbbox((0, 0), label, font=font) 90 | text_width = text_bbox[2] - text_bbox[0] 91 | text_height = text_bbox[3] - text_bbox[1] 92 | 93 | # Draw text background 94 | text_bg_coords = [b.x1, b.y1 - text_height - 5, b.x1 + text_width + 4, b.y1] 95 | draw.rectangle(text_bg_coords, fill=color_rgb) 96 | 97 | # Draw text 98 | draw.text( 99 | (b.x1 + 2, b.y1 - text_height - 3), label, fill="white", font=font 100 | ) 101 | 102 | # 5. Save result image 103 | base_name, ext = os.path.splitext(os.path.basename(image_file)) 104 | output_filename = f"{base_name}_detected{ext}" 105 | pil_image.save(output_filename) 106 | print(f"\nResult image saved as '{output_filename}'") 107 | else: 108 | print("No objects detected.") 109 | 110 | 111 | if __name__ == "__main__": 112 | main() 113 | -------------------------------------------------------------------------------- /seahorse/perception/detection/multitask/test_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import os 22 | import argparse 23 | import numpy as np 24 | from PIL import Image, ImageDraw, ImageFont 25 | 26 | from .yoloe_detector import YOLOESegDetector 27 | from .class_names import YOLOE_LVIS_CLASSES 28 | 29 | 30 | def main(): 31 | parser = argparse.ArgumentParser( 32 | description="YOLOE Segmentation Detection (Final Version)" 33 | ) 34 | parser.add_argument("--model", type=str, default="yoloe-11s-seg-pf.torchscript") 35 | parser.add_argument("--image", type=str, required=True) 36 | parser.add_argument("--conf", type=float, default=0.25, help="Confidence threshold") 37 | args = parser.parse_args() 38 | 39 | # 1. Check files 40 | if not os.path.exists(args.model) or not os.path.exists(args.image): 41 | print("Error: Model or image file not found.") 42 | return 43 | 44 | # 2. Instantiate detector 45 | print("Instantiating YOLOE detector...") 46 | try: 47 | yolo_detector = YOLOESegDetector( 48 | model_path=args.model, class_names=YOLOE_LVIS_CLASSES, conf_thresh=args.conf 49 | ) 50 | except Exception as e: 51 | print(f"Failed to instantiate detector: {e}") 52 | return 53 | print( 54 | f"Detector successfully instantiated with {yolo_detector.num_classes} known classes." 55 | ) 56 | 57 | # 3. Read image and perform inference 58 | print(f"Reading image '{args.image}'...") 59 | pil_image = Image.open(args.image).convert("RGB") 60 | # convert to numpy array in BGR format, since detector expects BGR 61 | rgb_numpy = np.array(pil_image) 62 | bgr_numpy = rgb_numpy[:, :, ::-1] # swap channels from RGB to BGR 63 | 64 | print("Running object detection and segmentation inference...") 65 | detections = yolo_detector(bgr_numpy) 66 | print("Inference completed.") 67 | 68 | # 4. Visualize results 69 | print("\n--- Detection Results ---") 70 | if detections: 71 | # go back to RGB for visualization 72 | viz_numpy = rgb_numpy.copy() 73 | mask_overlay = viz_numpy.astype(np.float32) 74 | alpha = 0.4 75 | for det in detections: 76 | if det.mask is not None: 77 | color_bgr = np.array(det.color, dtype=np.uint8) 78 | color_rgb = color_bgr[::-1] 79 | bool_mask = det.mask.astype(bool) 80 | mask_overlay[bool_mask] = (1 - alpha) * mask_overlay[ 81 | bool_mask 82 | ] + alpha * color_rgb 83 | 84 | viz_numpy = np.round(mask_overlay).astype(np.uint8) 85 | pil_image = Image.fromarray(viz_numpy) 86 | draw = ImageDraw.Draw(pil_image) 87 | try: 88 | font = ImageFont.truetype("arial.ttf", 15) 89 | except IOError: 90 | font = ImageFont.load_default() 91 | for det in detections: 92 | print(f"- Detected: {det.label} (Confidence: {det.score:.2f})") 93 | b = det.bounding_box 94 | color_rgb_tuple = tuple(det.color[::-1]) 95 | box_coords = [b.x_min, b.y_min, b.x_max, b.y_max] 96 | draw.rectangle(box_coords, outline=color_rgb_tuple, width=3) 97 | label = f"{det.label}: {det.score:.2f}" 98 | text_bbox = draw.textbbox((b.x_min, b.y_min), label, font=font) 99 | text_height = text_bbox[3] - text_bbox[1] 100 | text_pos = (b.x_min, b.y_min - text_height - 2) 101 | draw.rectangle( 102 | [text_pos, (text_pos[0] + text_bbox[2], text_pos[1] + text_height)], 103 | fill=color_rgb_tuple, 104 | ) 105 | draw.text(text_pos, label, fill="white", font=font) 106 | 107 | # 5. Save result 108 | output_filename = f"{os.path.splitext(args.image)[0]}_detected.png" 109 | pil_image.save(output_filename) 110 | print(f"\nResult image saved as '{output_filename}'") 111 | else: 112 | print("No objects detected.") 113 | 114 | 115 | if __name__ == "__main__": 116 | main() 117 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | . 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /seahorse/perception/detection/lane_detection/spline_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import torch 22 | import numpy as np 23 | 24 | from .detector import LaneDetectorBase 25 | from .contracts import LaneResults 26 | 27 | 28 | class SplineLaneDetector(LaneDetectorBase): 29 | def __init__(self, model_path: str, device=None): 30 | """ 31 | :param model_path: The path to the TorchScript model file (.pt). 32 | """ 33 | self.device = ( 34 | device 35 | if device 36 | else torch.device("cuda" if torch.cuda.is_available() else "cpu") 37 | ) 38 | self.model = self._load_model(model_path) 39 | 40 | self.ROAD_CLASS_ID = 0 41 | self.MIN_LANE_CONTOUR_LENGTH = 50 42 | 43 | def _load_model(self, model_path: str): 44 | """Load TorchScript models from local storage.""" 45 | print(f"INFO: Loading TorchScript model from: {model_path}") 46 | 47 | model = torch.jit.load(model_path, map_location=self.device) 48 | 49 | model.eval() 50 | print("INFO: TorchScript model loaded successfully.") 51 | return model 52 | 53 | def _preprocess(self, image: np.ndarray) -> torch.Tensor: 54 | """ 55 | Prepare images for the TorchVision model: convert to tensors and normalize. 56 | """ 57 | # BGR (NumPy format) -> RGB 58 | rgb_image = image[:, :, ::-1].copy() 59 | 60 | # 1. Convert the NumPy array (H, W, C) [0, 255] to a PyTorch tensor (C, H, W) [0.0, 1.0]. 61 | tensor = torch.from_numpy(rgb_image.astype(np.float32)).permute(2, 0, 1) / 255.0 62 | 63 | # 2. Normalization was performed using the mean and standard deviation from ImageNet. 64 | mean = torch.tensor([0.485, 0.456, 0.406]).view(-1, 1, 1) 65 | std = torch.tensor([0.229, 0.224, 0.225]).view(-1, 1, 1) 66 | normalized_tensor = (tensor - mean) / std 67 | 68 | # 3. Add batch dimension (C, H, W) -> (N, C, H, W) 69 | return normalized_tensor.unsqueeze(0) 70 | 71 | def _find_contours_native(self, binary_mask: np.ndarray) -> list[np.ndarray]: 72 | """ 73 | A native implementation of the Moore-Neighbor Tracing algorithm. 74 | This function finds the outer boundary of white regions in a binary image. 75 | """ 76 | contours = [] 77 | # Add 1 pixel of padding to properly handle the outline on the boundary. 78 | padded_mask = np.pad( 79 | binary_mask, pad_width=1, mode="constant", constant_values=0 80 | ) 81 | 82 | # Define 8 neighborhood directions in (y, x) format, starting from west and clockwise. 83 | neighbors = [ 84 | (0, -1), 85 | (-1, -1), 86 | (-1, 0), 87 | (-1, 1), 88 | (0, 1), 89 | (1, 1), 90 | (1, 0), 91 | (1, -1), 92 | ] 93 | 94 | # Scan pixels to find the starting point of the contour 95 | for r in range(padded_mask.shape[0]): 96 | for c in range(padded_mask.shape[1]): 97 | # The starting point is a white pixel, and its left side is a black pixel (make sure it is the outer contour). 98 | if padded_mask[r, c] == 255 and padded_mask[r, c - 1] == 0: 99 | contour = [] 100 | start_pos = (r, c) 101 | current_pos = start_pos 102 | # We enter from the black pixel on the left, so the first neighbor in the counter-clockwise direction is the starting point of the search. 103 | search_dir_start = ( 104 | 7 # Start searching from the top left (NW) direction 105 | ) 106 | 107 | while True: 108 | found_next = False 109 | # Starting from the initial search direction, iterate through 8 neighbors clockwise. 110 | for i in range(8): 111 | check_dir = (search_dir_start + i) % 8 112 | dr, dc = neighbors[check_dir] 113 | next_pos = (current_pos[0] + dr, current_pos[1] + dc) 114 | 115 | if padded_mask[next_pos] == 255: 116 | # Next boundary point found 117 | # Update the starting direction for the next search 118 | search_dir_start = ( 119 | check_dir + 5 120 | ) % 8 # Avoid returning immediately 121 | current_pos = next_pos 122 | found_next = True 123 | break 124 | 125 | if not found_next: 126 | # Isolated pixels 127 | contour.append(start_pos) 128 | break 129 | 130 | # If you return to the starting point, the outline is closed. 131 | if current_pos == start_pos: 132 | break 133 | else: 134 | contour.append(current_pos) 135 | 136 | if contour: 137 | # Remove padding offsets and convert (r, c) to (x, y) format 138 | final_contour = np.array(contour)[:, [1, 0]] - 1 139 | contours.append(final_contour.astype(np.int32)) 140 | return contours 141 | 142 | def _postprocess(self, model_output: torch.Tensor) -> LaneResults: 143 | """ 144 | Extract lane line contours from the segmentation results of the model. 145 | """ 146 | class_predictions = torch.argmax(model_output.squeeze(), dim=0).cpu().numpy() 147 | road_mask = np.uint8(class_predictions == self.ROAD_CLASS_ID) * 255 148 | 149 | contours = self._find_contours_native(road_mask) 150 | 151 | lane_lines = [] 152 | for contour in contours: 153 | if len(contour) > self.MIN_LANE_CONTOUR_LENGTH: 154 | lane_lines.append(contour) 155 | 156 | return lane_lines 157 | 158 | @torch.no_grad() 159 | def __call__(self, image: np.ndarray) -> LaneResults: 160 | """ 161 | Implement a complete "preprocess -> infer -> postprocess" perception process. 162 | """ 163 | input_tensor = self._preprocess(image).to(self.device) 164 | output = self.model(input_tensor)["out"] 165 | lanes = self._postprocess(output) 166 | return lanes 167 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to CONTRIBUTING.md 2 | 3 | First off, thanks for taking the time to contribute! 4 | 5 | All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 6 | 7 | > And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: 8 | > - Star the project 9 | > - Tweet about it 10 | > - Refer this project in your project's readme 11 | > - Mention the project at local meetups and tell your friends/colleagues 12 | 13 | 14 | ## Table of Contents 15 | 16 | - [Code of Conduct](#code-of-conduct) 17 | - [I Have a Question](#i-have-a-question) 18 | - [I Want To Contribute](#i-want-to-contribute) 19 | - [Reporting Bugs](#reporting-bugs) 20 | - [Suggesting Enhancements](#suggesting-enhancements) 21 | - [Your First Code Contribution](#your-first-code-contribution) 22 | - [Improving The Documentation](#improving-the-documentation) 23 | - [Styleguides](#styleguides) 24 | - [Commit Messages](#commit-messages) 25 | - [Join The Project Team](#join-the-project-team) 26 | 27 | 28 | ## Code of Conduct 29 | 30 | This project and everyone participating in it is governed by the 31 | [CONTRIBUTING.md Code of Conduct](blob/master/CODE_OF_CONDUCT.md). 32 | By participating, you are expected to uphold this code. Please report unacceptable behavior 33 | to <>. 34 | 35 | 36 | ## I Have a Question 37 | 38 | > If you want to ask a question, we assume that you have read the available [Documentation](). 39 | 40 | Before you ask a question, it is best to search for existing [Issues](/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. 41 | 42 | If you then still feel the need to ask a question and need clarification, we recommend the following: 43 | 44 | - Open an [Issue](/issues/new). 45 | - Provide as much context as you can about what you're running into. 46 | - Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant. 47 | 48 | We will then take care of the issue as soon as possible. 49 | 50 | 51 | 52 | ## I Want To Contribute 53 | 54 | > ### Legal Notice 55 | > When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license. 56 | 57 | ### Reporting Bugs 58 | 59 | 60 | #### Before Submitting a Bug Report 61 | 62 | A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. 63 | 64 | - Make sure that you are using the latest version. 65 | - Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)). 66 | - To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](issues?q=label%3Abug). 67 | - Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. 68 | - Collect information about the bug: 69 | - Stack trace (Traceback) 70 | - OS, Platform and Version (Windows, Linux, macOS, x86, ARM) 71 | - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. 72 | - Possibly your input and the output 73 | - Can you reliably reproduce the issue? And can you also reproduce it with older versions? 74 | 75 | 76 | #### How Do I Submit a Good Bug Report? 77 | 78 | > You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to <>. 79 | 80 | 81 | We use GitHub issues to track bugs and errors. If you run into an issue with the project: 82 | 83 | - Open an [Issue](/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) 84 | - Explain the behavior you would expect and the actual behavior. 85 | - Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. 86 | - Provide the information you collected in the previous section. 87 | 88 | Once it's filed: 89 | 90 | - The project team will label the issue accordingly. 91 | - A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. 92 | - If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be [implemented by someone](#your-first-code-contribution). 93 | 94 | 95 | 96 | 97 | ### Suggesting Enhancements 98 | 99 | This section guides you through submitting an enhancement suggestion for CONTRIBUTING.md, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. 100 | 101 | 102 | #### Before Submitting an Enhancement 103 | 104 | - Make sure that you are using the latest version. 105 | - Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration. 106 | - Perform a [search](/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. 107 | - Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. 108 | 109 | 110 | #### How Do I Submit a Good Enhancement Suggestion? 111 | 112 | Enhancement suggestions are tracked as [GitHub issues](/issues). 113 | 114 | - Use a **clear and descriptive title** for the issue to identify the suggestion. 115 | - Provide a **step-by-step description of the suggested enhancement** in as many details as possible. 116 | - **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. 117 | - You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. 118 | - **Explain why this enhancement would be useful** to most CONTRIBUTING.md users. You may also want to point out the other projects that solved it better and which could serve as inspiration. 119 | 120 | 121 | 122 | ### Your First Code Contribution 123 | 124 | 125 | ### Improving The Documentation 126 | 127 | 128 | ## Styleguides 129 | ### Commit Messages 130 | 131 | 132 | ## Join The Project Team 133 | 134 | 135 | 136 | ## Attribution 137 | This guide is based on the **contributing.md**. [Make your own](https://contributing.md/)! 138 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/test_detector_node.py: -------------------------------------------------------------------------------- 1 | import os 2 | import argparse 3 | import numpy as np 4 | from PIL import Image, ImageDraw, ImageFont 5 | 6 | from .detector import Detector 7 | 8 | from cyber.python.cyber_py3 import cyber 9 | from modules.common_msgs.sensor_msgs.sensor_image_pb2 import Image as PbImage 10 | 11 | 12 | def process_and_draw_detections(detector, bgr_numpy_image): 13 | """ 14 | Perform object detection on a single BGR NumPy image and return a PIL image with results drawn. 15 | 16 | Args: 17 | detector: An instantiated Detector object. 18 | bgr_numpy_image: A NumPy array image in BGR format. 19 | 20 | Returns: 21 | A PIL.Image object with detections drawn on it. 22 | """ 23 | print("Performing object detection inference...") 24 | detections = detector(bgr_numpy_image) 25 | print("Inference completed.") 26 | 27 | # Convert BGR NumPy image back to RGB PIL image for drawing 28 | rgb_numpy_image = bgr_numpy_image[:, :, ::-1].copy() 29 | pil_image = Image.fromarray(rgb_numpy_image) 30 | 31 | if not detections: 32 | print("No objects detected.") 33 | return pil_image 34 | 35 | draw = ImageDraw.Draw(pil_image) 36 | try: 37 | font = ImageFont.truetype("arial.ttf", 20) 38 | except IOError: 39 | font = ImageFont.load_default() 40 | 41 | print(f"Detected {len(detections)} objects.") 42 | for det in detections: 43 | print(det) 44 | b = det.bounding_box 45 | color_rgb = tuple(det.color) 46 | 47 | # Draw bounding box 48 | box_coords = [b.x1, b.y1, b.x2, b.y2] 49 | draw.rectangle(box_coords, outline=color_rgb, width=3) 50 | 51 | # Prepare label text 52 | label = f"{det.label}: {det.score:.2f}" 53 | text_bbox = draw.textbbox((b.x1, b.y1), label, font=font) 54 | text_width = text_bbox[2] - text_bbox[0] 55 | text_height = text_bbox[3] - text_bbox[1] 56 | 57 | # Draw text background 58 | text_bg_coords = [b.x1, b.y1 - text_height - 5, b.x1 + text_width + 4, b.y1] 59 | draw.rectangle(text_bg_coords, fill=color_rgb) 60 | 61 | # Draw text 62 | draw.text((b.x1 + 2, b.y1 - text_height - 3), label, fill="white", font=font) 63 | 64 | return pil_image 65 | 66 | 67 | class YoloCyberNode: 68 | """ 69 | A Cyber node encapsulating YOLO detection. 70 | Subscribes to an image topic, performs detection, and publishes results to another topic. 71 | """ 72 | 73 | def __init__(self, model_path, input_channel, output_channel): 74 | """ 75 | Initialize node, detector, publisher, and subscriber. 76 | """ 77 | cyber.init() 78 | self.node = cyber.Node("yolo_detection_node") 79 | 80 | print("Instantiating detector...") 81 | self.detector = Detector(model_path=model_path) 82 | print("Detector instantiated successfully.") 83 | 84 | self.output_channel = output_channel 85 | self.input_channel = input_channel 86 | 87 | # Create publisher for processed images 88 | self.publisher = self.node.create_writer(self.output_channel, PbImage) 89 | 90 | # Create subscriber and bind callback function 91 | self.node.create_reader(self.input_channel, PbImage, self.image_callback) 92 | 93 | print(f"Subscribed to image topic: '{self.input_channel}'") 94 | print(f"Results will be published to topic: '{self.output_channel}'") 95 | 96 | def image_callback(self, image_msg): 97 | """ 98 | Callback function to handle incoming image messages. 99 | """ 100 | print("-" * 80) 101 | print( 102 | f"Received image message from topic '{self.input_channel}', timestamp: {image_msg.header.timestamp_sec}" 103 | ) 104 | 105 | # 1. Parse Cyber image message (from bytes to NumPy array) 106 | # Assume 3 channels (RGB) 107 | # Use np.frombuffer instead of deprecated np.fromstring 108 | if image_msg.encoding == "rgb8" or image_msg.encoding == "bgr8": 109 | channel_num = 3 110 | else: # 'gray', 'y', etc. 111 | channel_num = 1 112 | 113 | np_array = np.frombuffer(image_msg.data, dtype=np.uint8) 114 | image_reshaped = np_array.reshape( 115 | (image_msg.height, image_msg.width, channel_num) 116 | ) 117 | 118 | # 2. Convert image from RGB to BGR since the detector expects BGR format 119 | if image_msg.encoding == "rgb8": 120 | bgr_image = image_reshaped[:, :, ::-1].copy() 121 | else: # Assume it's already BGR or grayscale 122 | bgr_image = image_reshaped 123 | 124 | # 3. Perform detection and draw results 125 | result_pil_image = process_and_draw_detections(self.detector, bgr_image) 126 | 127 | # 4. Build Cyber image message for publishing 128 | result_np_array = np.array(result_pil_image) 129 | 130 | output_msg = PbImage() 131 | output_msg.header.CopyFrom(image_msg.header) # Keep timestamp and frame_id 132 | output_msg.frame_id = image_msg.frame_id 133 | output_msg.measurement_time = image_msg.measurement_time 134 | output_msg.encoding = "rgb8" 135 | output_msg.width = result_np_array.shape[1] 136 | output_msg.height = result_np_array.shape[0] 137 | output_msg.data = result_np_array.tobytes() 138 | output_msg.step = output_msg.width * 3 # 3 bytes per pixel (RGB) 139 | 140 | # 5. Publish results 141 | self.publisher.write(output_msg) 142 | print(f"Published processed result to topic '{self.output_channel}'.") 143 | 144 | def spin(self): 145 | """ 146 | Start the node and keep it running, allowing graceful shutdown. 147 | """ 148 | print("\nCyber node started. Press Ctrl+C to exit.") 149 | try: 150 | self.node.spin() 151 | except KeyboardInterrupt: 152 | print("User interrupt detected. Shutting down...") 153 | finally: 154 | print("Shutting down Cyber node...") 155 | cyber.shutdown() 156 | 157 | 158 | def main(): 159 | parser = argparse.ArgumentParser( 160 | description="YOLO TorchScript detection demo (File and Cyber modes)" 161 | ) 162 | parser.add_argument( 163 | "--model", 164 | type=str, 165 | default="yolov5s.torchscript", 166 | help="Path to TorchScript model file", 167 | ) 168 | parser.add_argument( 169 | "--mode", 170 | type=str, 171 | choices=["file", "cyber"], 172 | required=True, 173 | help="Run mode: 'file' reads from local file, 'cyber' subscribes via Cyber topic", 174 | ) 175 | 176 | # File mode arguments 177 | parser.add_argument( 178 | "--image", type=str, help="[File mode] Path to input image file" 179 | ) 180 | 181 | # Cyber mode arguments 182 | parser.add_argument( 183 | "--input_channel", 184 | type=str, 185 | default="/apollo/sensor/camera/front_6mm/image", 186 | help="[Cyber mode] Input image Cyber topic", 187 | ) 188 | parser.add_argument( 189 | "--output_channel", 190 | type=str, 191 | default="/apollo/perception/yolo_image_detections", 192 | help="[Cyber mode] Output Cyber topic for images with detections", 193 | ) 194 | 195 | args = parser.parse_args() 196 | 197 | # Check if model file exists 198 | if not os.path.exists(args.model) and args.mode == "file": 199 | print(f"Warning: Model file '{args.model}' not found.") 200 | 201 | if args.mode == "file": 202 | # --- File mode --- 203 | if not args.image: 204 | print("Error: '--image' argument is required in 'file' mode.") 205 | return 206 | if not os.path.exists(args.image): 207 | print(f"Error: Image file '{args.image}' not found.") 208 | return 209 | 210 | print("Running in [File mode]...") 211 | 212 | # Instantiate detector 213 | print("Instantiating detector...") 214 | yolo_detector = Detector(model_path=args.model) 215 | print("Detector instantiated successfully.") 216 | 217 | # Read image 218 | print(f"Reading image '{args.image}'...") 219 | pil_image = Image.open(args.image).convert("RGB") 220 | rgb_numpy_image = np.array(pil_image) 221 | bgr_numpy_image = rgb_numpy_image[:, :, ::-1].copy() 222 | 223 | # Perform detection and drawing 224 | pil_result_image = process_and_draw_detections(yolo_detector, bgr_numpy_image) 225 | 226 | # Save result image 227 | base_name, ext = os.path.splitext(os.path.basename(args.image)) 228 | output_filename = f"{base_name}_detected{ext}" 229 | pil_result_image.save(output_filename) 230 | print(f"\nResult image saved as '{output_filename}'") 231 | 232 | elif args.mode == "cyber": 233 | print("Running in [Cyber mode]...") 234 | node = YoloCyberNode( 235 | model_path=args.model, 236 | input_channel=args.input_channel, 237 | output_channel=args.output_channel, 238 | ) 239 | node.spin() 240 | 241 | 242 | if __name__ == "__main__": 243 | main() 244 | -------------------------------------------------------------------------------- /seahorse/perception/detection/multitask/yoloe_detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | 21 | import torch 22 | import torch.nn.functional as F 23 | import numpy as np 24 | from typing import Dict 25 | 26 | from .base_detector import ObjectDetector 27 | from .contracts import BoundingBox, DetectionResult, DetectionResults 28 | 29 | 30 | class YOLOESegDetector(ObjectDetector): 31 | def __init__( 32 | self, 33 | model_path: str, 34 | class_names: Dict[int, str], 35 | img_size: int = 640, 36 | conf_thresh: float = 0.25, 37 | iou_thresh: float = 0.45, 38 | device: torch.device = torch.device( 39 | "cuda" if torch.cuda.is_available() else "cpu" 40 | ), 41 | ): 42 | if not class_names or not isinstance(class_names, dict): 43 | raise ValueError("`class_names` It must be a non-empty dictionary.") 44 | 45 | self.model_path = model_path 46 | self.img_size = img_size 47 | self.conf_thresh = conf_thresh 48 | self.iou_thresh = iou_thresh 49 | 50 | self.class_names = class_names 51 | self.num_classes = len(class_names) 52 | self.num_coeffs = 32 53 | 54 | np.random.seed(42) 55 | colors_raw = np.random.randint( 56 | 0, 255, size=(self.num_classes, 3), dtype=np.uint8 57 | ) 58 | self.colors = { 59 | name: color.tolist() 60 | for name, color in zip(self.class_names.values(), colors_raw) 61 | } 62 | 63 | super().__init__(device) 64 | 65 | def _load_model(self) -> torch.nn.Module: 66 | print(f"INFO: Loading TorchScript model from '{self.model_path}'...") 67 | model = torch.jit.load(self.model_path, map_location=self.device) 68 | return model.eval() 69 | 70 | def _preprocess(self, image: np.ndarray) -> (torch.Tensor, tuple, tuple): 71 | image_rgb = image[..., ::-1] 72 | h0, w0 = image_rgb.shape[:2] 73 | original_shape = (h0, w0) 74 | tensor = ( 75 | torch.from_numpy(np.ascontiguousarray(image_rgb)) 76 | .to(self.device) 77 | .float() 78 | .permute(2, 0, 1) 79 | ) 80 | tensor /= 255.0 81 | r = self.img_size / max(h0, w0) 82 | if r != 1: 83 | new_h, new_w = int(h0 * r), int(w0 * r) 84 | tensor = F.interpolate( 85 | tensor.unsqueeze(0), 86 | size=(new_h, new_w), 87 | mode="bilinear", 88 | align_corners=False, 89 | ).squeeze(0) 90 | h, w = tensor.shape[1:] 91 | dh, dw = self.img_size - h, self.img_size - w 92 | top, bottom = dh // 2, dh - (dh // 2) 93 | left, right = dw // 2, dw - (dw // 2) 94 | padded_tensor = F.pad( 95 | tensor, (left, right, top, bottom), mode="constant", value=114 / 255.0 96 | ) 97 | model_input_shape = (self.img_size, self.img_size) 98 | return padded_tensor.unsqueeze(0), original_shape, model_input_shape 99 | 100 | def _non_max_suppression( 101 | self, boxes: torch.Tensor, scores: torch.Tensor, class_indices: torch.Tensor 102 | ) -> torch.Tensor: 103 | max_coord = boxes.max() 104 | offsets = class_indices.to(boxes.dtype) * (max_coord + 1) 105 | boxes_for_nms = boxes + offsets[:, None] 106 | x1, y1, x2, y2 = ( 107 | boxes_for_nms[:, 0], 108 | boxes_for_nms[:, 1], 109 | boxes_for_nms[:, 2], 110 | boxes_for_nms[:, 3], 111 | ) 112 | areas = (x2 - x1) * (y2 - y1) 113 | order = scores.argsort(descending=True) 114 | keep = [] 115 | while order.numel() > 0: 116 | i = order[0] 117 | keep.append(i) 118 | if order.numel() == 1: 119 | break 120 | xx1 = torch.maximum(x1[i], x1[order[1:]]) 121 | yy1 = torch.maximum(y1[i], y1[order[1:]]) 122 | xx2 = torch.minimum(x2[i], x2[order[1:]]) 123 | yy2 = torch.minimum(y2[i], y2[order[1:]]) 124 | inter_w = torch.clamp(xx2 - xx1, min=0.0) 125 | inter_h = torch.clamp(yy2 - yy1, min=0.0) 126 | inter = inter_w * inter_h 127 | ovr = inter / (areas[i] + areas[order[1:]] - inter + 1e-6) 128 | inds = torch.where(ovr <= self.iou_thresh)[0] 129 | order = order[inds + 1] 130 | return ( 131 | torch.tensor(keep, dtype=torch.long, device=boxes.device) 132 | if keep 133 | else torch.tensor([], dtype=torch.long) 134 | ) 135 | 136 | def _xywh2xyxy(self, x: torch.Tensor) -> torch.Tensor: 137 | y = x.clone() 138 | y[:, 0] = x[:, 0] - x[:, 2] / 2 139 | y[:, 1] = x[:, 1] - x[:, 3] / 2 140 | y[:, 2] = x[:, 0] + x[:, 2] / 2 141 | y[:, 3] = x[:, 1] + x[:, 3] / 2 142 | return y 143 | 144 | def _scale_coords( 145 | self, model_input_shape: tuple, coords: torch.Tensor, original_shape: tuple 146 | ): 147 | h_model, w_model = model_input_shape 148 | h_orig, w_orig = original_shape 149 | gain = min(h_model / h_orig, w_model / w_orig) 150 | pad_w, pad_h = (w_model - w_orig * gain) / 2, (h_model - h_orig * gain) / 2 151 | coords[:, [0, 2]] -= pad_w 152 | coords[:, [1, 3]] -= pad_h 153 | coords[:, :4] /= gain 154 | coords[:, [0, 2]] = coords[:, [0, 2]].clamp(0, w_orig) 155 | coords[:, [1, 3]] = coords[:, [1, 3]].clamp(0, h_orig) 156 | 157 | def _process_masks( 158 | self, 159 | low_res_masks: torch.Tensor, 160 | model_input_shape: tuple, 161 | original_shape: tuple, 162 | ) -> np.ndarray: 163 | h_model, w_model = model_input_shape 164 | h_orig, w_orig = original_shape 165 | gain = min(h_model / h_orig, w_model / w_orig) 166 | pad_w, pad_h = (w_model - w_orig * gain) / 2, (h_model - h_orig * gain) / 2 167 | top, left = int(round(pad_h - 0.1)), int(round(pad_w - 0.1)) 168 | bottom, right = int(round(h_model - pad_h + 0.1)), int( 169 | round(w_model - pad_w + 0.1) 170 | ) 171 | masks_upsampled = F.interpolate( 172 | low_res_masks.unsqueeze(1), 173 | size=(h_model, w_model), 174 | mode="bilinear", 175 | align_corners=False, 176 | ) 177 | masks_cropped = masks_upsampled[:, :, top:bottom, left:right] 178 | final_masks = F.interpolate( 179 | masks_cropped, size=(h_orig, w_orig), mode="bilinear", align_corners=False 180 | ).squeeze(1) 181 | final_masks = torch.sigmoid(final_masks) 182 | return (final_masks > 0.5).cpu().numpy().astype(np.uint8) 183 | 184 | def _postprocess( 185 | self, outputs: tuple, original_shape: tuple, model_input_shape: tuple 186 | ) -> DetectionResults: 187 | 188 | det_seg_output, mask_prototypes = outputs 189 | 190 | actual_feature_dim = det_seg_output.shape[1] 191 | expected_feature_dim = 4 + self.num_classes + self.num_coeffs 192 | 193 | if actual_feature_dim != expected_feature_dim: 194 | raise ValueError( 195 | f"Dimensionality mismatch! Model output {actual_feature_dim} However, the program is expected to {expected_feature_dim} Dimension (4 + {self.num_classes} + {self.num_coeffs})." 196 | ) 197 | 198 | preds = det_seg_output.squeeze(0).T 199 | mask_prototypes = mask_prototypes.squeeze(0) 200 | 201 | boxes, class_scores, mask_coeffs = torch.split( 202 | preds, [4, self.num_classes, self.num_coeffs], dim=1 203 | ) 204 | 205 | scores, class_indices = class_scores.max(dim=1) 206 | keep = scores > self.conf_thresh 207 | 208 | if not torch.any(keep): 209 | return [] 210 | 211 | boxes = boxes[keep] 212 | scores = scores[keep] 213 | class_indices = class_indices[keep] 214 | mask_coeffs = mask_coeffs[keep] 215 | 216 | boxes_xyxy = self._xywh2xyxy(boxes) 217 | keep_indices = self._non_max_suppression(boxes_xyxy, scores, class_indices) 218 | 219 | if not keep_indices.numel(): 220 | return [] 221 | 222 | final_boxes = boxes_xyxy[keep_indices] 223 | final_scores = scores[keep_indices] 224 | final_classes = class_indices[keep_indices] 225 | final_mask_coeffs = mask_coeffs[keep_indices] 226 | 227 | proto_h, proto_w = mask_prototypes.shape[1], mask_prototypes.shape[2] 228 | low_res_masks = torch.matmul( 229 | final_mask_coeffs, mask_prototypes.view(self.num_coeffs, -1) 230 | ) 231 | low_res_masks = low_res_masks.view(-1, proto_h, proto_w) 232 | final_masks_np = self._process_masks( 233 | low_res_masks, model_input_shape, original_shape 234 | ) 235 | self._scale_coords(model_input_shape, final_boxes, original_shape) 236 | 237 | results = [] 238 | for i in range(final_boxes.shape[0]): 239 | box = final_boxes[i].cpu().numpy().astype(int) 240 | score = final_scores[i].item() 241 | class_id = final_classes[i].item() 242 | label = self.class_names[class_id] 243 | color = self.colors[label] 244 | mask = final_masks_np[i] 245 | 246 | results.append( 247 | DetectionResult( 248 | bounding_box=BoundingBox(box[0], box[1], box[2], box[3]), 249 | class_id=class_id, 250 | label=label, 251 | score=score, 252 | color=color, 253 | mask=mask, 254 | ) 255 | ) 256 | return results 257 | 258 | @torch.no_grad() 259 | def __call__(self, image: np.ndarray) -> DetectionResults: 260 | input_tensor, original_shape, model_input_shape = self._preprocess(image) 261 | model_outputs = self.model(input_tensor) 262 | outputs_on_cpu = [o.cpu() for o in model_outputs] 263 | results = self._postprocess(outputs_on_cpu, original_shape, model_input_shape) 264 | return results 265 | -------------------------------------------------------------------------------- /seahorse/perception/detection/object_detection/detector.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2025 WheelOS. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Created Date: 2025-11-06 18 | # Author: daohu527 19 | 20 | import torch 21 | import torch.nn.functional as F 22 | import numpy as np 23 | from typing import List 24 | 25 | from .base_detector import ObjectDetector 26 | from .contracts import BoundingBox, DetectionResult, DetectionResults 27 | 28 | 29 | class Detector(ObjectDetector): 30 | """ 31 | A detector using native PyTorch for YOLO model inference, without relying on ultralytics or cv2. 32 | The model must be exported in TorchScript format. 33 | """ 34 | 35 | def __init__( 36 | self, 37 | model_path: str, 38 | img_size: int = 640, 39 | conf_thresh: float = 0.5, 40 | iou_thresh: float = 0.45, 41 | device: torch.device = torch.device( 42 | "cuda" if torch.cuda.is_available() else "cpu" 43 | ), 44 | ): 45 | self.model_path = model_path 46 | self.img_size = img_size 47 | self.conf_thresh = conf_thresh 48 | self.iou_thresh = iou_thresh 49 | super().__init__(device) 50 | self.class_names = [ 51 | "person", 52 | "bicycle", 53 | "car", 54 | "motorcycle", 55 | "airplane", 56 | "bus", 57 | "train", 58 | "truck", 59 | "boat", 60 | "traffic light", 61 | "fire hydrant", 62 | "stop sign", 63 | "parking meter", 64 | "bench", 65 | "bird", 66 | "cat", 67 | "dog", 68 | "horse", 69 | "sheep", 70 | "cow", 71 | "elephant", 72 | "bear", 73 | "zebra", 74 | "giraffe", 75 | "backpack", 76 | "umbrella", 77 | "handbag", 78 | "tie", 79 | "suitcase", 80 | "frisbee", 81 | "skis", 82 | "snowboard", 83 | "sports ball", 84 | "kite", 85 | "baseball bat", 86 | "baseball glove", 87 | "skateboard", 88 | "surfboard", 89 | "tennis racket", 90 | "bottle", 91 | "wine glass", 92 | "cup", 93 | "fork", 94 | "knife", 95 | "spoon", 96 | "bowl", 97 | "banana", 98 | "apple", 99 | "sandwich", 100 | "orange", 101 | "broccoli", 102 | "carrot", 103 | "hot dog", 104 | "pizza", 105 | "donut", 106 | "cake", 107 | "chair", 108 | "couch", 109 | "potted plant", 110 | "bed", 111 | "dining table", 112 | "toilet", 113 | "tv", 114 | "laptop", 115 | "mouse", 116 | "remote", 117 | "keyboard", 118 | "cell phone", 119 | "microwave", 120 | "oven", 121 | "toaster", 122 | "sink", 123 | "refrigerator", 124 | "book", 125 | "clock", 126 | "vase", 127 | "scissors", 128 | "teddy bear", 129 | "hair drier", 130 | "toothbrush", 131 | ] 132 | 133 | np.random.seed(42) 134 | # Generate a random BGR color for each class 135 | colors = np.random.randint( 136 | 0, 255, size=(len(self.class_names), 3), dtype=np.uint8 137 | ) 138 | self.colors = { 139 | name: color.tolist() for name, color in zip(self.class_names, colors) 140 | } 141 | 142 | def _load_model(self) -> torch.nn.Module: 143 | """Load the model from a TorchScript file.""" 144 | print(f"INFO: Loading TorchScript model from '{self.model_path}'...") 145 | model = torch.jit.load(self.model_path, map_location=self.device) 146 | return model.eval() 147 | 148 | def _preprocess(self, image: np.ndarray) -> (torch.Tensor, tuple): 149 | """Preprocess the NumPy image into model input format, entirely using PyTorch operations.""" 150 | h0, w0 = image.shape[:2] 151 | tensor = torch.from_numpy(image).to(self.device).float().permute(2, 0, 1) 152 | tensor = tensor.flip(0).unsqueeze(0) 153 | tensor /= 255.0 154 | 155 | r = self.img_size / max(h0, w0) 156 | if r != 1: 157 | new_h, new_w = int(h0 * r), int(w0 * r) 158 | mode = "area" if r < 1 else "bilinear" 159 | if mode == "area": 160 | tensor = F.interpolate(tensor, size=(new_h, new_w), mode=mode) 161 | else: 162 | tensor = F.interpolate( 163 | tensor, size=(new_h, new_w), mode=mode, align_corners=False 164 | ) 165 | 166 | h, w = tensor.shape[2:] 167 | dh, dw = self.img_size - h, self.img_size - w 168 | top, bottom = dh // 2, dh - (dh // 2) 169 | left, right = dw // 2, dw - (dw // 2) 170 | tensor = F.pad( 171 | tensor, (left, right, top, bottom), mode="constant", value=114 / 255.0 172 | ) 173 | 174 | return tensor, (h0, w0) 175 | 176 | def _non_max_suppression( 177 | self, boxes: torch.Tensor, scores: torch.Tensor 178 | ) -> torch.Tensor: 179 | """Pure PyTorch implementation of Non-Maximum Suppression (NMS).""" 180 | x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] 181 | areas = (x2 - x1) * (y2 - y1) 182 | order = scores.argsort(descending=True) 183 | 184 | keep = [] 185 | while order.numel() > 0: 186 | i = order[0] 187 | keep.append(i) 188 | if order.numel() == 1: 189 | break 190 | 191 | xx1 = torch.maximum(x1[i], x1[order[1:]]) 192 | yy1 = torch.maximum(y1[i], y1[order[1:]]) 193 | xx2 = torch.minimum(x2[i], x2[order[1:]]) 194 | yy2 = torch.minimum(y2[i], y2[order[1:]]) 195 | 196 | inter = torch.clamp(xx2 - xx1, min=0.0) * torch.clamp(yy2 - yy1, min=0.0) 197 | ovr = inter / (areas[i] + areas[order[1:]] - inter + 1e-6) 198 | 199 | inds = torch.where(ovr <= self.iou_thresh)[0] 200 | order = order[inds + 1] 201 | 202 | return ( 203 | torch.stack(keep) 204 | if keep 205 | else torch.tensor([], dtype=torch.long, device=boxes.device) 206 | ) 207 | 208 | def _postprocess( 209 | self, preds: torch.Tensor, original_shape: tuple 210 | ) -> DetectionResults: 211 | """ 212 | Post-process the model’s raw output. 213 | This implementation assumes the model outputs a tensor of shape (batch, 4 + num_classes, num_proposals), 214 | e.g., (1, 84, 8400). 215 | """ 216 | # 1. Ensure the shape is correct and transpose: (1, 84, 8400) -> (8400, 84) 217 | preds = preds.squeeze(0).T 218 | 219 | # 2. Separate boxes and class scores from predictions 220 | boxes, class_scores = preds[:, :4], preds[:, 4:] 221 | 222 | # 3. Find best class and corresponding confidence for each predicted box 223 | scores, class_indices = torch.max(class_scores, dim=1) 224 | 225 | # 4. Filter by confidence threshold 226 | keep = scores > self.conf_thresh 227 | boxes = boxes[keep] 228 | scores = scores[keep] 229 | class_indices = class_indices[keep] 230 | 231 | if not boxes.shape[0]: 232 | return [] 233 | 234 | # 5. Convert box format (center_x, center_y, width, height) -> (x1, y1, x2, y2) 235 | boxes = self._xywh2xyxy(boxes) 236 | 237 | # 6. Execute category-wise NMS (Multi-class NMS) 238 | # By adding a large coordinate offset for different classes so NMS can treat each class independently 239 | max_coord = boxes.max() 240 | offsets = class_indices.to(boxes.dtype) * (max_coord + 1) 241 | boxes_for_nms = boxes + offsets[:, None] 242 | 243 | keep_indices = self._non_max_suppression(boxes_for_nms, scores) 244 | 245 | # 7. Final filtering according to NMS results 246 | boxes = boxes[keep_indices] 247 | scores = scores[keep_indices] 248 | class_indices = class_indices[keep_indices] 249 | 250 | # 8. Scale coordinates from model input size back to original image size 251 | model_input_shape = (self.img_size, self.img_size) 252 | self._scale_coords(model_input_shape, boxes, original_shape) 253 | 254 | # 9. Build return results 255 | results = [] 256 | for box, score, cls in zip(boxes, scores, class_indices): 257 | class_id = int(cls) 258 | label = self.class_names[class_id] 259 | color = self.colors[label] # get color from mapping 260 | 261 | results.append( 262 | DetectionResult( 263 | bounding_box=BoundingBox( 264 | int(box[0]), int(box[1]), int(box[2]), int(box[3]) 265 | ), 266 | class_id=class_id, 267 | label=label, 268 | score=float(score), 269 | color=color, # add color to the result object 270 | ) 271 | ) 272 | return results 273 | 274 | def _xywh2xyxy(self, x: torch.Tensor) -> torch.Tensor: 275 | """Convert (center_x, center_y, width, height) format to (x1, y1, x2, y2) format.""" 276 | y = torch.empty_like(x) 277 | y[:, 0] = x[:, 0] - x[:, 2] / 2 278 | y[:, 1] = x[:, 1] - x[:, 3] / 2 279 | y[:, 2] = x[:, 0] + x[:, 2] / 2 280 | y[:, 3] = x[:, 1] + x[:, 3] / 2 281 | return y 282 | 283 | def _scale_coords( 284 | self, model_input_shape: tuple, coords: torch.Tensor, original_shape: tuple 285 | ): 286 | """Scale coordinates from the model input size (letter-boxed) back to the original image size.""" 287 | h_model, w_model = model_input_shape 288 | h_orig, w_orig = original_shape 289 | 290 | gain = min(h_model / h_orig, w_model / w_orig) 291 | pad_w, pad_h = (w_model - w_orig * gain) / 2, (h_model - h_orig * gain) / 2 292 | 293 | coords[:, [0, 2]] -= pad_w 294 | coords[:, [1, 3]] -= pad_h 295 | coords[:, :4] /= gain 296 | 297 | coords[:, [0, 2]] = coords[:, [0, 2]].clamp(0, w_orig) 298 | coords[:, [1, 3]] = coords[:, [1, 3]].clamp(0, h_orig) 299 | 300 | @torch.no_grad() 301 | def __call__(self, image: np.ndarray) -> DetectionResults: 302 | """Implement the standard ‘preprocess -> infer -> postprocess’ pipeline.""" 303 | input_tensor, original_shape = self._preprocess(image) 304 | 305 | model_outputs = self.model(input_tensor) 306 | if isinstance(model_outputs, tuple): 307 | model_outputs = model_outputs[0] 308 | 309 | results = self._postprocess(model_outputs, original_shape) 310 | 311 | return results 312 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2023 daohu527@gmail.com 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | --------------------------------------------------------------------------------