├── .DS_Store
├── .gitignore
├── LICENSE
├── README.md
├── active_learning
├── images.py
└── video.py
├── api
└── uploadby_split.py
├── email
├── requirements.txt
├── roboflow_config.json
├── send_email.py
└── trigger.py
├── figures
├── ColabNotebook_SaveFile.png
├── ResponseObjectFormat_JSON.png
├── Visualized_ResponseObjectFormat_JSON.png
├── contact_us.png
└── roboflow-cv-utilities-header.png
├── images
├── blur_img.py
├── crop_img.py
├── draw_img.py
├── fill_img.py
├── metal_test.jpg
├── roboflow_cv_utils_draw_img.ipynb
├── roboflow_cv_utils_fill_img.ipynb
├── roboflow_cv_utils_writetext_img.ipynb
├── test_box.jpg
├── twoPass.py
└── writetext_img.py
├── measureObject.py
├── object_counting.py
├── requirements.txt
├── roboflow_config.json
├── roboflow_config_twopass.json
├── save_vidframes.py
├── stream
├── blur_stream.py
├── draw_stream.py
├── fill_stream.py
└── writetext_stream.py
├── streamlit
├── requirements.txt
└── streamlit_app.py
├── text_message
├── requirements.txt
└── send_text.py
├── trigger_power_automate.py
├── video
├── blur_video.py
├── boxesOnConveyer.mp4
├── draw_vid.py
├── fill_vid.py
└── writetext_vid.py
├── video_classification.py
├── video_od.py
├── webcam_classification.py
└── webcam_od.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/.DS_Store
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 | test.json
131 | video/draw_vid copy.py
132 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # roboflow-computer-vision-utilities
2 | Interface with the Roboflow API and Python SDK for running inference (receiving predictions) with AutoML and custom-trained models compatible with Roboflow Deploy.
3 |
4 | 
5 |
6 | 
7 | #### [Website](https://docs.roboflow.com/python) • [Docs](https://docs.roboflow.com) • [Blog](https://blog.roboflow.com) • [Community Forum](https://discuss.roboflow.com) • [Twitter](https://twitter.com/roboflow) • [Linkedin](https://www.linkedin.com/company/roboflow-ai) • [Roboflow Universe](https://universe.roboflow.com) • [Knowledge Base](https://help.roboflow.com)
8 |
9 | ## What is Roboflow?
10 | **Roboflow** makes managing, preprocessing, augmenting, and versioning datasets for computer vision seamless. This repo utilizes the official [Roboflow Python SDK](https://docs.roboflow.com/python) that interfaces with the [Roboflow Hosted API Inference Server](https://docs.roboflow.com/inference/hosted-api) and [REST API](https://docs.roboflow.com/rest-api).
11 |
12 | Key features of Roboflow:
13 |
14 | - Import and Export image datasets into any supported [formats](https://roboflow.com/formats)
15 | - [Preprocess](https://docs.roboflow.com/image-transformations/image-preprocessing)
16 | and [augment](https://docs.roboflow.com/image-transformations/image-augmentation) data using Roboflow's dataset
17 | management tools
18 | - Train computer vision models using [Roboflow Train](https://docs.roboflow.com/train) and deploy
19 | to [production](https://docs.roboflow.com/inference)
20 | - Use [community curated projects](https://universe.roboflow.com/) to start building your own vision-powered products
21 |
22 | #### Available Plans & Features
23 | https://roboflow.com/pricing
24 |
25 | Personal and Research Projects - Applying for Additional Account Features:
26 | * https://roboflow.com/community
27 |
28 | Business Projects and POC's - Requesting Additional Account Features:
29 | * https://roboflow.com/sales
30 |
31 | #### Popular Repositories and Learning Resources:
32 |
33 | • [Roboflow Python SDK](https://github.com/notebooks) • [Notebooks](https://github.com/notebooks) (Custom Model Training Tutorials) • [Learn Computer Vision](https://roboflow.com/learn) • [Supervision](https://universe.roboflow.com) • [Polygon Zone](https://universe.roboflow.com) •
34 |
35 | #### Installation (Dependencies):
36 | Python Version: `3.10>=Python>=3.7`.
37 | Install from Source:
38 | ```
39 | git clone https://github.com/roboflow-ai/roboflow-computer-vision-utilities.git
40 | cd roboflow-computer-vision-utilities
41 | python3 -m venv env
42 | source env/bin/activate
43 | pip3 install -r requirements.txt
44 | ```
45 |
46 | #### [Obtaining Your API Key](https://docs.roboflow.com/rest-api#obtaining-your-api-key) | [Workspace ID](https://docs.roboflow.com/roboflow-workspaces#workspace-id) | [Project ID](https://docs.roboflow.com/roboflow-workspaces/projects#project-id) | [Model Version Number](https://docs.roboflow.com/roboflow-workspaces/versions#version-number)
47 |
48 | ## 🤖 📹 Inference Utilities:
49 | | **functionality** | **images** | **video** | **stream** |
50 | |:------------:|:-------------------------------------------------:|:---------------------------:|:---------------------------:|
51 | | Draw Boxes | [](/images/draw_img.py)
[](https://colab.research.google.com/drive/13HeLzcMUII4fvEFyMyz4lQBje9R2dvM8?usp=sharing) | [](/video/draw_img.py) | [](/video/draw_vid.py) |
52 | | Write Text | [](/images/writetext_img.py)
[](/images/roboflow_cv_utils_writetext_img.ipynb.ipynb) | [](/video/writetext_vid.py) | [](/stream/writetext_stream.py) |
53 | | Fill Boxes | [](/images/fill_img.py)
[](/images/roboflow_cv_utils_fill_img.ipynb) | [](/video/fill_vid.py) | [](/stream/fill_stream.py) |
54 | | Crop Boxes | [](/images/crop_img.py) | | |
55 | | Blur Boxes | [](/images/blur_img.py) | [](/video/blur_video.py) | [](/stream/blur_stream.py) |
56 | | Two Stage Detection |
Object Detection ->
Crop Bounding Box-> Single-Label Classification
[](/images/twoPass.py) | | |
57 | | Object Counting | [](/object_counting.py)
| | |
58 | | Measure Object | [](/measureObject.py)
| | |
59 | | Send Email | [](/email/send_email.py)
| | |
60 | | Send Text | [](/text_message/send_text.py)
| | |
61 |
62 | #### Video Inference (Classification)
63 | * Model predictions for Classification models running via hosted or local deployment supported by Roboflow Deploy.
64 |
65 | [](/video_classification.py)
66 |
67 | #### Webcam Inference (Classification)
68 | * Model predictions for Classification models running via hosted or local deployment supported by Roboflow Deploy.
69 |
70 | [](/webcam_classification.py)
71 |
72 | #### Sample Video Frames
73 | * Extract and save video frames by a specified frames per second of video.
74 |
75 | [](/save_vidframes.py)
76 |
77 | ## 💻 🛜 API Utilities:
78 | #### Upload Images to a Specified Dataset Split (Train/Valid/Test)
79 |
80 | [](/api/uploadby_split.py)
81 | * Upload images from a directory (folder) to a specified dataset split [(train, valid, or test set)](https://blog.roboflow.com/train-test-split/) in your Roboflow project.
82 | * Base functionality also includes the ability to set a specific percentage of images to upload to the dataset from the specified directory, and the option to name the uploaded [dataset batch](https://docs.roboflow.com/annotate/collaborative-workflow)
83 |
84 | More on the Roboflow REST API: https://docs.roboflow.com/rest-api/
85 |
86 |
87 | ## 🔁 📈 Active Learning Utilities:
88 | Automate improvement of your dataset by using computer vision and conditional upload logic to determine which images should be directly uploaded to your Roboflow workspace.
89 | * [Active Learning](https://roboflow.com/python/active-learning) (Roboflow Documentation)
90 | * [Active Learning with the Roboflow Python SDK](https://blog.roboflow.com/pip-install-roboflow)
91 | * [Strategies for Active Learning Implementation](https://blog.roboflow.com/computer-vision-active-learning-tips/)
92 | * [Active Learning](https://help.roboflow.com/implementing-active-learning) (Knowledge Base guide)
93 |
94 | | **model type** | **images** | **video** |
95 | |:------------:|:-------------------------------------------------:|:---------------------------:|
96 | | Object Detection | [](/active_learning/images.py) | [](/active_learning/video.py) |
97 |
98 | Conditionals - Source Code:
99 |
100 | * `roboflow-python/roboflow/util/active_learning_utils.py`: [](https://github.com/roboflow/roboflow-python/blob/main/roboflow/util/active_learning_utils.py)
101 | * `roboflow-python/roboflow/core/workspace.py` Line 245: [](https://github.com/roboflow/roboflow-python/blob/02b717f453e95c5cdef8a817914a46f5fd6771a8/roboflow/core/workspace.py#L245)
102 | ```
103 | # set the conditionals values as necessary for your active learning needs
104 | # NOTE - not all conditional fields are required
105 | conditionals = {
106 | "required_objects_count" : 1,
107 | "required_class_count": 1,
108 | "target_classes": ["class_name"],
109 | "minimum_size_requirement" : float('-inf'),
110 | "maximum_size_requirement" : float('inf'),
111 | "confidence_interval" : [10,90],
112 | "similarity_confidence_threshold": .3,
113 | "similarity_timeout_limit": 3
114 | }
115 | ```
116 | * Note: [Filtering out images for upload by similarity](https://blog.roboflow.com/roboflow-inference-server-clip/) is available for paid plans. Please [contact the Roboflow team](https://roboflow.com/sales) for access.
117 |
118 | 
119 |
120 | 
121 |
122 | `twoPass.py`: Code for running inference (model predictions) in "two stages" on images.
123 | ```
124 | # default root save location: './inference_images'
125 | # also moves all images in /roboflow-computer-vision-utilities to /roboflow-computer-vision-utilities/inference_images before inferrence
126 | cd images
127 | python3 twoPass.py
128 | ```
129 | * Ex. Stage 1: object detection (find faces) --> crop the detected areas and send to --> Stage 2: classification (is this a real image or illustrated?)
130 | * To be used after updating the `roboflow_config.json` file in the main directory with your Model Info (Workpsace ID, Model/Project ID, Private API Key and Model Version Number)
131 | * Available in `roboflow-computer-vision-utilities/images`
132 |
133 | `trigger_power_automate.py`: make predictions on images, save the results, and send an email to the email specified in `roboflow_config.json`
134 | ```
135 | # default confidence and overlap for predictions: confidence = 40, overlap = 30
136 | cd images
137 | python3 trigger_power_automate.py
138 | ```
139 | * To be used after updating the `roboflow_config.json` file in the main directory with your Model Info (Workpsace ID, Model/Project ID, Private API Key and Model Version Number), and email address to send the inference results to.
140 |
141 | ## Streamlit App for Testing Roboflow Object Detection Models
142 |
143 | This app allows you to upload an image to be inferenced by an Object Detection model trained with [Roboflow Train](https://docs.roboflow.com/train)
144 | * [Inference: Hosted API](https://docs.roboflow.com/inference/hosted-api)
145 | * [Response Object Format](https://docs.roboflow.com/inference/hosted-api#response-object-format)
146 | * [Roboflow Python Package](https://docs.roboflow.com/python)
147 |
148 | The app example app can be [found here](https://mo-traor3-ai-streamlit-roboflow-model-test-streamlit-app-j8lako.streamlitapp.com/):
149 | * https://mo-traor3-ai-streamlit-roboflow-model-test-streamlit-app-j8lako.streamlitapp.com/
150 | * The code for the app is available in the `streamlit` directory of this repo, in the `streamlit_app.py` file
151 | * The original repo is hosted here: https://github.com/mo-traor3-ai/streamlit-roboflow-model-testing
152 | * [Creating a Basic Streamlit App for Roboflow Inference](https://blog.roboflow.com/how-to-use-roboflow-and-streamlit-to-visualize-object-detection-output/) | [Roboflow Live Video Inference: Streamlit Tutorial](https://www.youtube.com/watch?v=w4fgZg-jb28)
153 |
154 | The app will work as-is for inference on individual image files (png, jpeg, jpg formats). If you want to build your own model, you'll need your own API key. [Create a Roboflow account](https://app.roboflow.com) to get your own API key.
155 |
156 | The app was created using [Roboflow](https://roboflow.com) and [Streamlit](https://streamlit.io/).
157 |
158 | ## Example Code Snippets
159 | #### Receive model predictions from a single image file:
160 | ```
161 | img_path = 'INSERT_PATH_TO_IMG' # .jpg, .jpeg, .png
162 | img = cv2.imread(img_path)
163 |
164 | # perform inference on the selected image
165 | predictions = model.predict(img_path, confidence=40,
166 | overlap=30)
167 | ```
168 | #### Receive model predictions from images contained in a folder (directory):
169 | ```
170 | raw_data_location = "INSERT_PATH_TO_DIRECTORY"
171 |
172 | for raw_data_extension in ['.jpg', '.jpeg', 'png']:
173 | ## using the following line for raw_data_externsion results in inference on
174 | ## specified file types only
175 | raw_data_extension = ".jpg" # e.g jpg, jpeg, png
176 | globbed_files = glob.glob(raw_data_location + '/*' + raw_data_extension)
177 | for img_path in globbed_files:
178 | img = cv2.imread(img_path)
179 | predictions = model.predict(img_path, confidence=40, overlap=30)
180 | ```
181 | #### Drawing Bounding Boxes
182 | ```
183 | # main bounding box coordinates from JSON response object
184 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
185 | x0 = bounding_box['x'] - bounding_box['width'] / 2
186 | x1 = bounding_box['x'] + bounding_box['width'] / 2
187 | y0 = bounding_box['y'] - bounding_box['height'] / 2
188 | y1 = bounding_box['y'] + bounding_box['height'] / 2
189 |
190 | # position coordinates: start = (x0, y0), end = (x1, y1)
191 | # color = RGB-value for bounding box color, (0,0,0) is "black"
192 | # thickness = stroke width/thickness of bounding box
193 | # draw and place bounding boxes
194 | start_point = (int(x0), int(y0))
195 | end_point = (int(x1), int(y1))
196 |
197 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
198 | ```
199 | #### Writing and Placing Text:
200 | ```
201 | # write and place text
202 | cv2.putText(
203 | img, # PIL.Image object to place text on
204 | 'placeholder text',#text to place on image
205 | (12, 12),#location of text in pixels
206 | fontFace = cv2.FONT_HERSHEY_SIMPLEX, #text font
207 | fontScale = 0.6,#font scale
208 | color = (255, 255, 255),#text color in RGB
209 | thickness=2#thickness/"weight" of text
210 | )
211 | ```
212 | #### Drawing "Filled" Bounding Boxes:
213 | ```
214 | # main bounding box coordinates from JSON response object
215 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
216 | x0 = bounding_box['x'] - bounding_box['width'] / 2
217 | x1 = bounding_box['x'] + bounding_box['width'] / 2
218 | y0 = bounding_box['y'] - bounding_box['height'] / 2
219 | y1 = bounding_box['y'] + bounding_box['height'] / 2
220 |
221 | # position coordinates: start = (x0, y0), end = (x1, y1)
222 | # color = RGB-value for bounding box color, (0,0,0) is "black"
223 | # thickness = stroke width/thickness of bounding box
224 | # draw and place bounding boxes
225 | start_point = (int(x0), int(y0))
226 | end_point = (int(x1), int(y1))
227 |
228 | # setting thickness to -1 --> filled bounding box with the specified color
229 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=-1)
230 | ```
231 | #### Blurring the Contents of Bounding Boxes:
232 | ```
233 | # rip bounding box coordinates from current detection
234 | # note: infer returns center points of box as (x,y) and width, height
235 | # ----- but pillow crop requires the top left and bottom right points to crop
236 | x0 = prediction['x'] - prediction['width'] / 2
237 | x1 = prediction['x'] + prediction['width'] / 2
238 | y0 = prediction['y'] - prediction['height'] / 2
239 | y1 = prediction['y'] + prediction['height'] / 2
240 | box = [(x0, y0), (x1, y1)]
241 | blur_x = int(prediction['x'] - prediction['width'] / 2)
242 | blur_y = int(prediction['y'] - prediction['height'] / 2)
243 | blur_width = int(prediction['width'])
244 | blur_height = int(prediction['height'])
245 |
246 | # region of interest (ROI), or area to blur
247 | roi = img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width]
248 |
249 | # ADD BLURRED BBOXES
250 | # set blur to (31,31) or (51,51) based on amount of blur desired
251 | blur_image = cv2.GaussianBlur(roi,(51,51),0)
252 | img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width] = blur_image
253 | ```
254 | #### Cropping the Contents of Bounding Boxes:
255 | ```
256 | for bounding_box in predictions:
257 | # defining crop area [height_of_cropArea:width_of_cropArea]
258 | # croppedArea = img[start_row:end_row, start_col:end_col]
259 | x0 = bounding_box['x'] - bounding_box['width'] / 2#start_column
260 | x1 = bounding_box['x'] + bounding_box['width'] / 2#end_column
261 | y0 = bounding_box['y'] - bounding_box['height'] / 2#start row
262 | y1 = bounding_box['y'] + bounding_box['height'] / 2#end_row
263 | class_name = bounding_box['class']
264 | croppedArea = img[int(y0):int(y1), int(x0):int(x1)]
265 |
266 | # position coordinates: start = (x0, y0), end = (x1, y1)
267 | # color = RGB-value for bounding box color, (0,0,0) is "black"
268 | # thickness = stroke width/thickness of the box, -1 = fill box
269 | cv2.rectangle(croppedArea,
270 | (int(10), int(10)), (int(80), int(40)), color=(0,0,0),
271 | thickness=-1)
272 |
273 | # write class name on image, and print class name
274 | cv2.putText(
275 | croppedArea, # cv2 image object to place text on
276 | class_name,# text to place on image
277 | (20, 20),# location of text in pixels
278 | fontFace = cv2.FONT_HERSHEY_SIMPLEX,# text font
279 | fontScale = 0.4,# font scale
280 | color = (255, 255, 255),#text color in RGB
281 | thickness=2# thickness/"weight" of text
282 | )
283 |
284 | # SAVE CROPPED IMAGES
285 | cv2.imwrite(f'crop{i}_' + os.path.basename(img_path), croppedArea)
286 | i+=1
287 | ```
--------------------------------------------------------------------------------
/active_learning/images.py:
--------------------------------------------------------------------------------
1 | from roboflow import Roboflow
2 |
3 |
4 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
5 | rf = Roboflow(api_key="INSERT_PRIVATE_API_KEY")
6 | workspace = rf.workspace()
7 |
8 | raw_data_location = "INSERT_PATH_TO_IMAGES"
9 | raw_data_extension = ".jpg"
10 |
11 | # replace 1 with your model version number for inference
12 | inference_endpoint = ["INSERT_MODEL_ID", 1]
13 | upload_destination = "INSERT_MODEL_ID"
14 |
15 | # set the conditionals values as necessary for your active learning needs
16 | # NOTE - not all conditional fields are required
17 | conditionals = {
18 | "required_objects_count" : 1,
19 | "required_class_count": 1,
20 | "target_classes": [],
21 | "minimum_size_requirement" : float('-inf'),
22 | "maximum_size_requirement" : float('inf'),
23 | "confidence_interval" : [10,90],
24 | }
25 |
26 | ## filtering out images for upload by similarity is available for paid plans
27 | ## contact the Roboflow team for access: https://roboflow.com/sales
28 | # conditionals = {
29 | # "required_objects_count" : 1,
30 | # "required_class_count": 1,
31 | # "target_classes": [],
32 | # "minimum_size_requirement" : float('-inf'),
33 | # "maximum_size_requirement" : float('inf'),
34 | # "confidence_interval" : [10,90],
35 | # "similarity_confidence_threshold": .3,
36 | # "similarity_timeout_limit": 3
37 | # }
38 |
39 | workspace.active_learning(raw_data_location=raw_data_location,
40 | raw_data_extension=raw_data_extension,
41 | inference_endpoint=inference_endpoint,
42 | upload_destination=upload_destination,
43 | conditionals=conditionals)
44 |
--------------------------------------------------------------------------------
/active_learning/video.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import json
4 | import argparse
5 | from roboflow import Roboflow
6 |
7 | def process_frame(frame_path, model):
8 | predictions = model.predict(frame_path).json()["predictions"]
9 |
10 | return predictions
11 |
12 | def main(
13 | video_path: str,
14 | api_key: str,
15 | project_name: str,
16 | model_version: int,
17 | confidence: int,
18 | overlap: int,
19 | active_learning: bool,
20 | raw_data_location: str = ".",
21 | raw_data_extension: str = ".jpg",
22 | upload_destination: str = "",
23 | conditionals: dict = "{}",
24 | fps: int = 1,
25 | ):
26 | rf = Roboflow(api_key=api_key)
27 | inference_project = rf.workspace().project(project_name)
28 | model = inference_project.version(model_version).model
29 |
30 | model.confidence = confidence
31 | model.overlap = overlap
32 |
33 | video = cv2.VideoCapture(video_path)
34 | frame_number = 0
35 | total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
36 | skip_frames = int(video.get(cv2.CAP_PROP_FPS) // fps)
37 | sampled_frames = []
38 |
39 | while frame_number < total_frames:
40 | video.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
41 | ret, frame = video.read()
42 |
43 | if not ret:
44 | break
45 |
46 | if active_learning:
47 | active_learning_frames = os.path.join(raw_data_location + "/sampled_frames")
48 | if os.path.exists(active_learning_frames) is False:
49 | os.mkdir(active_learning_frames)
50 |
51 | frame_path = os.path.abspath(active_learning_frames + f"/frame_{frame_number:04d}{raw_data_extension}")
52 | sampled_frames.append(frame_path)
53 | print(frame_path)
54 | if os.path.exists(frame_path) is False:
55 | cv2.imwrite(frame_path, frame)
56 |
57 | predictions = process_frame(frame_path, model)
58 | frame_number += skip_frames
59 |
60 | # Press 'q' to exit the loop
61 | if cv2.waitKey(1) & 0xFF == ord('q'):
62 | break
63 |
64 | video.release()
65 | cv2.destroyAllWindows()
66 |
67 | if active_learning:
68 | workspace = rf.workspace()
69 |
70 | if "minimum_size_requirement" in conditionals:
71 | conditionals["minimum_size_requirement"] = float(
72 | conditionals["minimum_size_requirement"]) if not isinstance(
73 | conditionals["minimum_size_requirement"], float) else conditionals["minimum_size_requirement"]
74 | if "maximum_size_requirement" in conditionals:
75 | conditionals["maximum_size_requirement"] = float(
76 | conditionals["maximum_size_requirement"]) if not isinstance(
77 | conditionals["maximum_size_requirement"], float) else conditionals["maximum_size_requirement"]
78 | for i in range(len(sampled_frames)):
79 | workspace.active_learning(
80 | raw_data_location=sampled_frames[i],
81 | raw_data_extension=raw_data_extension,
82 | inference_endpoint=[project_name, model_version],
83 | upload_destination=upload_destination,
84 | conditionals=conditionals
85 | )
86 |
87 |
88 | if __name__ == "__main__":
89 | parser = argparse.ArgumentParser(description="Video object detection with Roboflow and optional Active Learning.")
90 | parser.add_argument("video_path", type=str, help="Path to the video file.")
91 | parser.add_argument("--api_key", type=str, help="Your Roboflow API key.")
92 | parser.add_argument("--project_name", type=str, help="Your Roboflow project name.")
93 | parser.add_argument("--model_version", type=int, help="Model version number.")
94 | parser.add_argument("--confidence", default=40, type=int, help="Confidence threshold.")
95 | parser.add_argument("--overlap", default=30, type=int, help="Overlap threshold.")
96 | parser.add_argument("--active_learning", default=True, action="store_true", help="Enable Active Learning.")
97 | parser.add_argument("--raw_data_location", type=str, default=f"{os.curdir}", help="Location to save frames for Active Learning.")
98 | parser.add_argument("--raw_data_extension", type=str, default=".jpg", help="Image extension for saved frames.")
99 | parser.add_argument("--upload_destination", type=str, help="Upload destination (model ID). e.g) project_name (str)")
100 | parser.add_argument("--conditionals", type=str, default="{}", help="Conditionals for Active Learning (JSON string).")
101 | parser.add_argument("--fps", type=int, default=1, help="Frames per second to sample from the video.")
102 |
103 | args = parser.parse_args()
104 |
105 | conditionals = json.loads(args.conditionals)
106 |
107 | main(
108 | args.video_path,
109 | args.api_key,
110 | args.project_name,
111 | args.model_version,
112 | args.confidence,
113 | args.overlap,
114 | args.active_learning,
115 | args.raw_data_location,
116 | args.raw_data_extension,
117 | args.upload_destination,
118 | conditionals,
119 | args.fps,
120 | )
121 | ## Example below for how to run the file (remove the comment
122 | ## from each line below, prior to copy/pasting to your Terminal)
123 | # python3 video.py --video_path="/test-1920x1080.mp4"
124 | # --api_key="PRIVATE_API_KEY" \
125 | # --project_name="face-detection-mik1i" \
126 | # --model_version=18 \
127 | # --raw_data_location="/active_learning_infer" \
128 | # --upload_destination="face-detection-mik1i" \
129 | # --conditionals='{"required_objects_count": 1, \
130 | # "required_class_count": 1, "target_classes": ["face"], \
131 | # "confidence_interval": [1,75], "minimum_size_requirement": \
132 | # "float('-inf')", "maximum_size_requirement": "float('inf')"}'
--------------------------------------------------------------------------------
/api/uploadby_split.py:
--------------------------------------------------------------------------------
1 | import os
2 | import random
3 | import requests
4 | import base64
5 | import io
6 | from PIL import Image
7 |
8 |
9 | def upload_image(image_path: str, api_key: str, project_id: str, split: str, batch_name: str):
10 | """
11 | Upload a single image to the Roboflow Upload API with the given parameters.
12 |
13 | Args:
14 | image_path (str): Path to the image file.
15 | api_key (str): Roboflow API key.
16 | project_id (str): Roboflow project ID.
17 | split (str): Dataset split, can be 'train', 'valid', or 'test'.
18 | batch_name (str): Batch name for the uploaded images.
19 | Returns:
20 | dict: JSON response from the Roboflow API.
21 | """
22 | image = Image.open(image_path).convert("RGB")
23 | buffered = io.BytesIO()
24 | image.save(buffered, quality=90, format="JPEG")
25 |
26 | img_str = base64.b64encode(buffered.getvalue())
27 | img_str = img_str.decode("ascii")
28 |
29 | upload_url = "".join([
30 | f"https://api.roboflow.com/dataset/{project_id}/upload",
31 | f"?api_key={api_key}",
32 | f"&name={os.path.basename(image_path)}",
33 | f"&split={split}",
34 | f"&batch={batch_name}"
35 | ])
36 |
37 | r = requests.post(upload_url, data=img_str, headers={
38 | "Content-Type": "application/x-www-form-urlencoded"
39 | })
40 |
41 | return r.json()
42 |
43 | def get_image_paths(directory: str):
44 | """
45 | Get a list of image file paths from a directory.
46 |
47 | Args:
48 | directory (str): Path to the directory containing images.
49 | Returns:
50 | list: A list of image file paths.
51 | """
52 | image_extensions = {'.jpeg', '.jpg', '.png'}
53 | image_paths = []
54 |
55 | for file in os.listdir(directory):
56 | file_extension = os.path.splitext(file)[1].lower()
57 | if file_extension in image_extensions:
58 | image_paths.append(os.path.join(directory, file))
59 |
60 | return image_paths
61 |
62 | def upload_images(directory: str, api_key: str, project_id: str, split: str, percentage: int, batch_name: str):
63 | """
64 | Upload a specified percentage of images from a directory to a given dataset split.
65 |
66 | Args:
67 | directory (str): Path to the directory containing images.
68 | api_key (str): Roboflow API key.
69 | project_id (str): Roboflow project ID.
70 | split (str): Dataset split, can be 'train', 'valid', or 'test'.
71 | percentage (int): The percentage of images to upload (1-100).
72 | batch_name (str): Batch name for the uploaded images.
73 | """
74 | image_paths = get_image_paths(directory)
75 | num_images_to_upload = int(len(image_paths) * percentage / 100)
76 | print(f"Uploading {num_images_to_upload} images to the {split} split...")
77 | sampled_image_paths = random.sample(image_paths, num_images_to_upload)
78 |
79 | for image_path in sampled_image_paths:
80 | result = upload_image(image_path, api_key, project_id, split, batch_name)
81 | print(result)
82 |
83 | if __name__ == '__main__':
84 | # Example usage:
85 | image_directory = 'path/to/your/image/directory'
86 | api_key = 'YOUR_API_KEY'
87 | project_id = 'YOUR_PROJECT_ID'
88 | split = 'train' # can be 'train', 'valid', or 'test'
89 | percentage = 50 # value between 1 and 100
90 | batch_name = 'YOUR_BATCH_NAME'
91 |
92 | print("Uploading images to Roboflow...This may take a few moments.\n")
93 | print(f"Uploading from directory: {image_directory} | Project ID: {project_id} | Dataset Split for Upload: {split}")
94 | print(f"Percent of images in the directory to be uploaded to the {split} split: {percentage} | Upload Batch Name: {batch_name}")
95 | upload_images(image_directory, api_key, project_id, split, percentage, batch_name)
96 |
--------------------------------------------------------------------------------
/email/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2022.12.7
2 | chardet==4.0.0
3 | cycler==0.10.0
4 | idna==2.10
5 | kiwisolver>=1.3.1
6 | matplotlib
7 | numpy>=1.18.5
8 | opencv-python>=4.1.2
9 | Pillow>=7.1.2
10 | pyparsing==2.4.7
11 | python-dateutil
12 | python-dotenv
13 | requests
14 | roboflow>=1.0.5
15 | six
16 | urllib3>=1.26.6
17 | wget
18 | tqdm>=4.41.0
19 | PyYAML>=5.3.1
20 | wget
21 | requests_toolbelt
22 | beautifulsoup4-4.12.2
23 | google-api-core-2.11.0
24 | google-api-python-client-2.86.0
25 | google-auth-2.17.3
26 | google-auth-httplib2-0.1.0
27 | googleapis-common-protos-1.59.0
28 | httplib2-0.22.0
29 | lxml-4.9.2
30 | oauth2client-4.1.3
31 | pyasn1-0.5.0
32 | pyasn1-modules-0.3.0
33 | rsa-4.9
34 | simplegmail-4.1.1
35 | soupsieve-2.4.1
36 | uritemplate-4.1.1
37 | supervision>=0.6.0
--------------------------------------------------------------------------------
/email/roboflow_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "ROBOFLOW_API_KEY": "API_KEY",
3 | "ROBOFLOW_MODEL": "package_detection-x2qlm/1",
4 | "ROBOFLOW_SIZE": 640,
5 |
6 | "simplegmail_config": {
7 | "to": "EMAIL_TO",
8 | "sender": "EMAIL_FROM",
9 | "subject": "Alert! Your Model has Detected Objects of Interest!",
10 | "msg_html": "Hey [Name]], your alert was triggered.
Check out the attached image ASAP.",
11 | "attachments": ["overlayed_image.jpg", "raw_image.jpg"]
12 | }
13 | }
--------------------------------------------------------------------------------
/email/send_email.py:
--------------------------------------------------------------------------------
1 | import json
2 | import random
3 | from trigger import execute_trigger
4 | from matplotlib.pyplot import stackplot
5 |
6 |
7 | # load config file, /roboflow_config.json
8 | with open('roboflow_config.json') as f:
9 | config = json.load(f)
10 |
11 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
12 | ROBOFLOW_MODEL = config["ROBOFLOW_MODEL"]
13 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
14 |
15 | import cv2
16 | import base64
17 | import numpy as np
18 | import requests
19 | import time
20 |
21 | # Construct the Roboflow Infer URL
22 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
23 | upload_url = "".join([
24 | "https://detect.roboflow.com/",
25 | ROBOFLOW_MODEL,
26 | "?api_key=",
27 | ROBOFLOW_API_KEY,
28 | "&format=json"
29 | ])
30 |
31 | # Get webcam interface via opencv-python
32 | # Change '0' to '1' or '2' if it cannot find your webcam
33 | video = cv2.VideoCapture(0)
34 |
35 | # Given an array of predictions, check if there are any
36 | # predictions that seem to be "stacked" on top of each other.
37 | # If any stacks have 3 or more boxes, increment a counter, which
38 | # keeps track of how many frames so far have been detected for having
39 | # a stack of three or more boxes.
40 | # If 5 consecutive frames are flagged, return True, and reset our counter.
41 | past_frames_where_stacked = 0
42 | def process_preds(preds):
43 | def check_stacks(pred, stacks):
44 | for stack in stacks:
45 | for box in stack:
46 | if(pred['x'] > (box['x'] - box['width'] / 2) and
47 | pred['x'] < (box['x'] + box['width'] / 2)):
48 | stack.append(pred)
49 | return True
50 | return False
51 |
52 | stacks = []
53 |
54 | # iterate over each detected box. If it's found to be part of an
55 | # existing stack, add it to that list. If it's not in any stack, add
56 | # it as a new, seperate stack.
57 |
58 | for pred in preds:
59 | if not check_stacks(pred, stacks):
60 | stacks.append([pred])
61 |
62 | print("========================")
63 | print("Detected " + str(len(stacks)) + " stacks from " + str(len(preds)) + " packages.")
64 | for i,stack in enumerate(stacks):
65 | print(f'Stack {i+1} has {len(stack)} packages stacked.')
66 |
67 |
68 | def check_if_any_stacks_over(stacks, threshold):
69 | for stack in stacks:
70 | if len(stack) > threshold-1:
71 | return True
72 | return False
73 |
74 | global past_frames_where_stacked
75 | if check_if_any_stacks_over(stacks, 3):
76 | past_frames_where_stacked += 1
77 | else:
78 | past_frames_where_stacked = 0
79 |
80 | if past_frames_where_stacked > 5:
81 | past_frames_where_stacked = 0
82 | return True, stacks
83 | else:
84 | return False, stacks
85 |
86 | # Infer via the Roboflow Infer API and return the result
87 | colors = []
88 | def infer(start, current):
89 | # Get the current image from the webcam
90 | ret, img = video.read()
91 |
92 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
93 | height, width, channels = img.shape
94 | scale = ROBOFLOW_SIZE / max(height, width)
95 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
96 |
97 | # Encode image to base64 string
98 | retval, buffer = cv2.imencode('.jpg', img)
99 | img_str = base64.b64encode(buffer)
100 |
101 | # Get prediction from Roboflow Infer API
102 | resp = requests.post(upload_url, data=img_str, headers={
103 | "Content-Type": "application/x-www-form-urlencoded"
104 | }, stream=True).raw
105 |
106 | try:
107 | resp = json.loads(resp.read())
108 | except:
109 | print("Could not parse response.")
110 |
111 | print(resp)
112 |
113 | preds = resp["predictions"]
114 | stacked, stacks = process_preds(preds)
115 |
116 | original_img = img.copy()
117 |
118 | global colors
119 | while (len(colors)) < len(stacks):
120 | colors.append((random.randrange(255),random.randrange(255),random.randrange(255)))
121 |
122 | # Parse result image
123 | for idx, stack in enumerate(stacks):
124 | for box in stack:
125 | x1 = round(box["x"] - box["width"] / 2)
126 | x2 = round(box["x"] + box["width"] / 2)
127 | y1 = round(box["y"] - box["height"] / 2)
128 | y2 = round(box["y"] + box["height"] / 2)
129 | cv2.rectangle(img, (x1, y1), (x2, y2), colors[idx], 5)
130 |
131 | if stacked:
132 | execute_trigger(img, original_img)
133 |
134 | return img
135 |
136 | cv2.namedWindow('image', cv2.WINDOW_NORMAL)
137 | # Main loop; infers sequentially until you press "q"
138 | while 1:
139 | # On "q" keypress, exit
140 | if(cv2.waitKey(1) == ord('q')):
141 | break
142 |
143 | # Capture start time to calculate fps
144 | start = time.time()
145 |
146 | # Synchronously get a prediction from the Roboflow Infer API
147 | image = infer()
148 | # And display the inference results
149 | cv2.imshow('image', image)
150 |
151 | # Print frames per second
152 | print((1/(time.time()-start)), " fps")
153 |
154 | # Release resources when finished
155 | video.release()
156 | cv2.destroyAllWindows()
157 |
--------------------------------------------------------------------------------
/email/trigger.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from simplegmail import Gmail#remember to pip install simglegmail
3 | import json
4 |
5 | gmail = Gmail() # will open a browser window to ask you to log in and authenticate
6 |
7 | # load config file, /roboflow_config.json
8 | with open('roboflow_config.json') as f:
9 | params = json.load(f)['simplegmail_config']
10 |
11 | # Defines a custom action to be taken when enough boxes are stacked.
12 | # We'll be sending an email with a photo of the stacked items.
13 | def execute_trigger(overlayed_image, raw_image):
14 | cv2.imwrite("overlayed_image.jpg", overlayed_image)
15 | cv2.imwrite("raw_image.jpg", raw_image)
16 | print("Image successfully saved! Attempting to send email.")
17 |
18 | message = gmail.send_message(**params)
19 |
--------------------------------------------------------------------------------
/figures/ColabNotebook_SaveFile.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/figures/ColabNotebook_SaveFile.png
--------------------------------------------------------------------------------
/figures/ResponseObjectFormat_JSON.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/figures/ResponseObjectFormat_JSON.png
--------------------------------------------------------------------------------
/figures/Visualized_ResponseObjectFormat_JSON.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/figures/Visualized_ResponseObjectFormat_JSON.png
--------------------------------------------------------------------------------
/figures/contact_us.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/figures/contact_us.png
--------------------------------------------------------------------------------
/figures/roboflow-cv-utilities-header.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/figures/roboflow-cv-utilities-header.png
--------------------------------------------------------------------------------
/images/blur_img.py:
--------------------------------------------------------------------------------
1 | from roboflow import Roboflow
2 | import cv2
3 | import os
4 |
5 |
6 | rf = Roboflow(api_key="INSERT_PRIVATE_API_KEY")
7 | project = rf.workspace("INSERT-WORKSPACE-ID").project("INSERT-PROJECT-ID")
8 | # REPLACE VERSION-NUMBER with (trained) model version number
9 | version = project.version(1)
10 | model = version.model
11 |
12 | # perform inference on the selected local image file
13 | file_location = "YOUR_IMAGE.jpg"
14 | predictions = model.predict(file_location, confidence=40, overlap=30)
15 | ## save prediction image - roboflow python sdk
16 | # predictions.save(f'inferenceResult_{os.path.basename({file_location})}')
17 | predictions_json = predictions.json()
18 | print(predictions_json)
19 |
20 | # drawing bounding boxes with the Pillow library
21 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
22 | img = cv2.imread(file_location)
23 | for bounding_box in predictions:
24 | x0 = bounding_box['x'] - bounding_box['width'] / 2
25 | x1 = bounding_box['x'] + bounding_box['width'] / 2
26 | y0 = bounding_box['y'] - bounding_box['height'] / 2
27 | y1 = bounding_box['y'] + bounding_box['height'] / 2
28 | class_name = bounding_box['class']
29 | confidence = bounding_box['confidence']
30 | box = (x0, x1, y0, y1)
31 | # position coordinates: start = (x0, y0), end = (x1, y1)
32 | # color = RGB-value for bounding box color, (0,0,0) is "black"
33 | # thickness = stroke width/thickness of bounding box
34 | box = [(x0, y0), (x1, y1)]
35 | blur_x = int(bounding_box['x'] - bounding_box['width'] / 2)
36 | blur_y = int(bounding_box['y'] - bounding_box['height'] / 2)
37 | blur_width = int(bounding_box['width'])
38 | blur_height = int(bounding_box['height'])
39 | ## region of interest (ROI), or area to blur
40 | roi = img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width]
41 |
42 | # ADD BLURRED BBOXES
43 | # set blur to (31,31) or (51,51) based on amount of blur desired
44 | blur_image = cv2.GaussianBlur(roi,(51,51),0)
45 | img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width] = blur_image
46 | ## draw/place bounding boxes on image
47 | #start_point = (int(x0), int(y0))
48 | #end_point = (int(x1), int(y1))
49 | #cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
50 |
51 | (text_width, text_height), _ = cv2.getTextSize(
52 | f"{class_name} | {confidence}",
53 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
54 |
55 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
56 | thickness=-1)
57 |
58 | text_location = (int(x0), int(y0))
59 |
60 | cv2.putText(img, f"{class_name} | {confidence}",
61 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
62 | color=(255,255,255), thickness=2)
63 |
64 | cv2.imwrite(f'inferenceResult_{os.path.basename({file_location})}', img)
65 |
--------------------------------------------------------------------------------
/images/crop_img.py:
--------------------------------------------------------------------------------
1 | import os, glob, shutil
2 | from roboflow import Roboflow
3 | import json
4 | import cv2
5 |
6 |
7 | def cropBoxes(model, img_path, printJson = True, save_img = True, confidence = 40, overlap = 30):
8 | """
9 | This functio fills the bounding boxes on images passed for prediction to your Roboflow model endpoint and also saves the "unfilled" prediction image\n
10 | :param model1: Roboflow model object for the first inference pass\n
11 | :param img_path: string, path to the image for inference\n
12 | :param printJson: bool, default: True - prints the JSON values of the predictions from the second inference pass\n
13 | :param save_img: bool, default: True - saves the prediction image [crop] from the second inference pass\n
14 | :param confidence: int, default: 40 - minimum confidence level (%) to return predictions\n
15 | :param overlap: int, default: 30 - maximum prediction overlap (% bounding boxes can overlap prior to being considered the same box/detection)
16 | """
17 | ## for a directory (folder) of images
18 | if os.path.isdir(img_path):
19 | raw_data_location = img_path
20 | for raw_data_ext in ['.jpg', '.jpeg', 'png']:
21 | globbed_files = glob.glob(raw_data_location + '/*' + raw_data_ext)
22 | for img_file in globbed_files:
23 | crop_number = 0
24 | img = cv2.imread(img_file)
25 | ## perform inference on the selected image
26 | predictions = model.predict(img_file, confidence=confidence,
27 | overlap=overlap)
28 | predictions_json = predictions.json()
29 | if predictions_json['predictions'] == []:
30 | print(f"No predictions for {img_path} at confidence: {confidence} and overlap {overlap}")
31 | else:
32 | original_file = os.path.basename(img_path).split('/')[-1]
33 | predictions.save(os.curdir + f"/inference_images/inferred/{original_file}")
34 | ## drawing bounding boxes with the Pillow library
35 | ## https://docs.roboflow.com/inference/hosted-api#response-object-format
36 | for bounding_box in predictions:
37 | # defining crop area [height_of_cropArea:width_of_cropArea]
38 | # croppedArea = img[start_row:end_row, start_col:end_col]
39 | x0 = bounding_box['x'] - bounding_box['width'] / 2#start_column
40 | x1 = bounding_box['x'] + bounding_box['width'] / 2#end_column
41 | y0 = bounding_box['y'] - bounding_box['height'] / 2#start row
42 | y1 = bounding_box['y'] + bounding_box['height'] / 2#end_row
43 | class_name = bounding_box['class']
44 | croppedArea = img[int(y0):int(y1), int(x0):int(x1)]
45 | #confidence_score = bounding_box['confidence']#confidence score of prediction
46 |
47 | if save_img:
48 | if os.path.exists(os.curdir + f"/inference_images/cropBoxes/DetectedAs_{class_name}"
49 | ) is False:
50 | os.mkdir(os.curdir + f"/inference_images/cropBoxes/DetectedAs_{class_name}")
51 |
52 | filename = f"Crop{crop_number}_{original_file}"
53 | save_loc = f'./inference_images/cropBoxes/DetectedAs_{class_name}/' + filename
54 | print(filename, save_loc)
55 | print(img_file)
56 | cv2.imwrite(save_loc, croppedArea)
57 |
58 | print(f'Success! Saved to {save_loc}')
59 | crop_number+=1
60 |
61 | if printJson:
62 | print(f'\n{bounding_box}')
63 |
64 | ## runs if there is only 1 image file in the ./inference_images directory
65 | elif os.path.isfile(img_path):
66 | crop_number = 0
67 | img = cv2.imread(img_path)
68 | # perform inference on the selected image
69 | predictions = model.predict(img_path, confidence=confidence,
70 | overlap=overlap)
71 | predictions_json = predictions.json()
72 | if predictions_json['predictions'] == []:
73 | print(f"No predictions for {img_path} at confidence: {confidence} and overlap {overlap}")
74 | else:
75 | original_file = os.path.basename(img_path).split('/')[-1]
76 | predictions.save(os.curdir + f"/inference_images/inferred/{original_file}")
77 | # drawing bounding boxes with the Pillow library
78 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
79 | for bounding_box in predictions:
80 | # defining crop area [height_of_cropArea:width_of_cropArea]
81 | # croppedArea = img[start_row:end_row, start_col:end_col]
82 | x0 = bounding_box['x'] - bounding_box['width'] / 2#start_column
83 | x1 = bounding_box['x'] + bounding_box['width'] / 2#end_column
84 | y0 = bounding_box['y'] - bounding_box['height'] / 2#start row
85 | y1 = bounding_box['y'] + bounding_box['height'] / 2#end_row
86 | class_name = bounding_box['class']
87 | croppedArea = img[int(y0):int(y1), int(x0):int(x1)]
88 | #confidence_score = bounding_box['confidence']#confidence score of prediction
89 |
90 | if save_img:
91 | if os.path.exists(os.curdir + f"/inference_images/cropBoxes/DetectedAs_{class_name}"
92 | ) is False:
93 | os.mkdir(os.curdir + f"/inference_images/cropBoxes/DetectedAs_{class_name}")
94 |
95 | filename = f"Crop{crop_number}_{original_file}"
96 | save_loc = f'./inference_images/cropBoxes/DetectedAs_{class_name}/' + filename
97 | print(filename, save_loc)
98 | cv2.imwrite(save_loc, croppedArea)
99 |
100 | print(f'Success! Saved to {save_loc}')
101 | crop_number+=1
102 |
103 | if printJson:
104 | print(f'\n{bounding_box}')
105 |
106 | else:
107 | return print('Please input a valid path to an image or directory (folder)')
108 |
109 |
110 | ## load config file for the models
111 | with open(os.pardir + '/roboflow_config.json') as f:
112 | config = json.load(f)
113 |
114 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
115 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
116 | ROBOFLOW_PROJECT_ID = config["ROBOFLOW_PROJECT_ID"]
117 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
118 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
119 |
120 | f.close()
121 |
122 | ## obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
123 | ## create Roboflow object: https://docs.roboflow.com/python
124 | rf = Roboflow(api_key=ROBOFLOW_API_KEY)
125 | workspace = rf.workspace(ROBOFLOW_WORKSPACE_ID)
126 | project = workspace.project(ROBOFLOW_PROJECT_ID)
127 | version = project.version(ROBOFLOW_VERSION_NUMBER)
128 | model = version.model
129 |
130 | ## creating a directory to add images we wish to infer
131 | if os.path.exists(os.curdir + '/inference_images') is False:
132 | os.mkdir(os.curdir + '/inference_images')
133 |
134 | ## creating directory to place Roboflow prediction images
135 | if os.path.exists(os.curdir + '/inference_images/inferred') is False:
136 | os.mkdir(os.curdir + '/inference_images/inferred')
137 |
138 | ## creating directory to place prediction images with filled bounding boxes
139 | if os.path.exists(os.curdir + '/inference_images/cropBoxes') is False:
140 | os.mkdir(os.curdir + '/inference_images/cropBoxes')
141 |
142 | for raw_data_ext in ['.jpg', '.jpeg', 'png']:
143 | globbed_files = glob.glob(os.curdir + '/*' + raw_data_ext)
144 | for img_file in globbed_files:
145 | shutil.move(img_file, os.curdir + '/inference_images')
146 |
147 | cropBoxes(model, './inference_images', confidence = 40, overlap = 30)
148 |
--------------------------------------------------------------------------------
/images/draw_img.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import cv2
4 | import numpy as np
5 | from roboflow import Roboflow
6 |
7 | import supervision as sv
8 | from supervision.draw.color import Color
9 | from supervision.draw.color import ColorPalette
10 | from supervision import Detections, BoxAnnotator
11 |
12 |
13 | def load_roboflow_model(api_key, workspace_id, project_id, version_number):
14 |
15 | # authenticate to your Roboflow account and load your model
16 | rf = Roboflow(api_key=api_key)
17 | project = rf.workspace(workspace_id).project(project_id)
18 | version = project.version(version_number)
19 | model = version.model
20 |
21 | return project, model
22 |
23 | def make_prediction(project, model, image_path, confidence, overlap):
24 |
25 | # load the image and make predictions with your model
26 | img = cv2.imread(image_path)
27 | predictions = model.predict(image_path, confidence=confidence, overlap=overlap)
28 | predictions_json = predictions.json()
29 | roboflow_xyxy = np.empty((0, 4))
30 | predicted_classes = []
31 | for bounding_box in predictions:
32 | x1 = bounding_box['x'] - bounding_box['width'] / 2
33 | x2 = bounding_box['x'] + bounding_box['width'] / 2
34 | y1 = bounding_box['y'] - bounding_box['height'] / 2
35 | y2 = bounding_box['y'] + bounding_box['height'] / 2
36 | np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))
37 | predicted_classes.append(bounding_box['class'])
38 |
39 | # class_name = bounding_box['class']
40 | # confidence = bounding_box['confidence']
41 | sv_xyxy = Detections(roboflow_xyxy).from_roboflow(
42 | predictions_json,class_list=list((project.classes).keys()))
43 |
44 | return img, predictions_json, sv_xyxy, predicted_classes
45 |
46 | def draw_bounding_boxes(image, sv_xyxy, class_ids, add_labels):
47 |
48 | #set add_labels to True to show the label for each object
49 | image_with_boxes = BoxAnnotator(
50 | color=ColorPalette.default(), thickness=2).annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)
51 |
52 | return image_with_boxes
53 |
54 | def save_image(image, original_image_path, output_directory="results"):
55 |
56 | os.makedirs(output_directory, exist_ok=True)
57 | filename = os.path.basename(original_image_path)
58 | output_path = os.path.join(output_directory, f"result_{filename}")
59 | cv2.imwrite(output_path, image)
60 |
61 | return output_path
62 |
63 | def main():
64 | ## Authentication info to load the model. The config file is located at ../roboflow_config.json
65 | ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25
66 | ## Workspace ID: "roboflow-universe-projects", Project ID: "construction-site-safety", Version Number: 25
67 | with open(os.pardir + '/roboflow_config.json') as f:
68 | config = json.load(f)
69 |
70 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
71 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
72 | ROBOFLOW_PROJECT_ID = config["ROBOFLOW_PROJECT_ID"]
73 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
74 |
75 | f.close()
76 |
77 | api_key = ROBOFLOW_API_KEY
78 | workspace_id = ROBOFLOW_WORKSPACE_ID
79 | project_id = ROBOFLOW_PROJECT_ID
80 | version_number = ROBOFLOW_VERSION_NUMBER
81 | project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)
82 |
83 | # Make a prediction on the specified image file
84 | image_path = "/path/to/image.jpg"
85 | confidence = 40
86 | overlap = 30
87 | image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(
88 | project, model, image_path, confidence, overlap)
89 |
90 | print(predictions_json)
91 |
92 | ## Set add_labels to False to draw class labels on the bounding boxes
93 | add_labels = True
94 | for i in range(len(pred_sv_xyxy)):
95 | image_with_boxes = draw_bounding_boxes(image, pred_sv_xyxy, predicted_classes, add_labels)
96 |
97 | # Save the image with bounding boxes for the detected objects drawn on them
98 | output_path = save_image(image_with_boxes, image_path)
99 |
100 | print(f"The image has been processed and saved to {output_path}")
101 |
102 | if __name__ == "__main__":
103 | main()
104 |
--------------------------------------------------------------------------------
/images/fill_img.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import cv2
4 | import numpy as np
5 | from roboflow import Roboflow
6 |
7 | import supervision as sv
8 | from supervision.draw.color import Color
9 | from supervision.draw.color import ColorPalette
10 | from supervision import Detections, BoxAnnotator
11 |
12 |
13 | def load_roboflow_model(api_key, workspace_id, project_id, version_number):
14 |
15 | # authenticate to your Roboflow account and load your model
16 | rf = Roboflow(api_key=api_key)
17 | project = rf.workspace(workspace_id).project(project_id)
18 | version = project.version(version_number)
19 | model = version.model
20 |
21 | return project, model
22 |
23 | def make_prediction(project, model, image_path, confidence, overlap):
24 |
25 | # load the image and make predictions with your model
26 | img = cv2.imread(image_path)
27 | predictions = model.predict(image_path, confidence=confidence, overlap=overlap)
28 | predictions_json = predictions.json()
29 | roboflow_xyxy = np.empty((0, 4))
30 | predicted_classes = []
31 | for bounding_box in predictions:
32 | x1 = bounding_box['x'] - bounding_box['width'] / 2
33 | x2 = bounding_box['x'] + bounding_box['width'] / 2
34 | y1 = bounding_box['y'] - bounding_box['height'] / 2
35 | y2 = bounding_box['y'] + bounding_box['height'] / 2
36 | np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))
37 | predicted_classes.append(bounding_box['class'])
38 |
39 | # class_name = bounding_box['class']
40 | # confidence = bounding_box['confidence']
41 | sv_xyxy = Detections(roboflow_xyxy).from_roboflow(
42 | predictions_json,class_list=list((project.classes).keys()))
43 |
44 | return img, predictions_json, sv_xyxy, predicted_classes
45 |
46 | def fill_bounding_boxes(image, sv_xyxy, class_ids, add_labels):
47 |
48 | #set add_labels to True to show the label for each object
49 | image_with_boxes = BoxAnnotator(
50 | color= Color.black(), thickness=-1).annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)
51 |
52 | return image_with_boxes
53 |
54 | def save_image(image, original_image_path, output_directory="results"):
55 |
56 | os.makedirs(output_directory, exist_ok=True)
57 | filename = os.path.basename(original_image_path)
58 | output_path = os.path.join(output_directory, f"result_{filename}")
59 | cv2.imwrite(output_path, image)
60 |
61 | return output_path
62 |
63 | def main():
64 | ## Authentication info to load the model. The config file is located at ../roboflow_config.json
65 | ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25
66 | ## Workspace ID: "roboflow-universe-projects", Project ID: "construction-site-safety", Version Number: 25
67 | with open(os.pardir + '/roboflow_config.json') as f:
68 | config = json.load(f)
69 |
70 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
71 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
72 | ROBOFLOW_PROJECT_ID = config["ROBOFLOW_PROJECT_ID"]
73 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
74 |
75 | f.close()
76 |
77 | api_key = ROBOFLOW_API_KEY
78 | workspace_id = ROBOFLOW_WORKSPACE_ID
79 | project_id = ROBOFLOW_PROJECT_ID
80 | version_number = ROBOFLOW_VERSION_NUMBER
81 | project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)
82 |
83 | # Make a prediction on the specified image file
84 | image_path = "/path/to/image.jpg"
85 | confidence = 40
86 | overlap = 30
87 | image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(
88 | project, model, image_path, confidence, overlap)
89 |
90 | print(predictions_json)
91 |
92 | ## Set add_labels to False to draw class labels on the bounding boxes
93 | add_labels = True
94 | for i in range(len(pred_sv_xyxy)):
95 | image_with_boxes = fill_bounding_boxes(image, pred_sv_xyxy, predicted_classes, add_labels)
96 |
97 | # Save the image with bounding boxes for the detected objects drawn on them
98 | output_path = save_image(image_with_boxes, image_path)
99 |
100 | print(f"The image has been processed and saved to {output_path}")
101 |
102 | if __name__ == "__main__":
103 | main()
104 |
--------------------------------------------------------------------------------
/images/metal_test.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/images/metal_test.jpg
--------------------------------------------------------------------------------
/images/roboflow_cv_utils_draw_img.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": []
7 | },
8 | "kernelspec": {
9 | "name": "python3",
10 | "display_name": "Python 3"
11 | },
12 | "language_info": {
13 | "name": "python"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "source": [
20 | "## Draw Bounding Boxes on Images with the Roboflow Python SDK and Supervision\n",
21 | "\n",
22 | " \n",
23 | "
\n",
27 | " \n",
28 | "
"
29 | ],
30 | "metadata": {
31 | "id": "cfsYWzUw55Iy"
32 | }
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "source": [
37 | "## Installing Dependencies"
38 | ],
39 | "metadata": {
40 | "id": "pjMiL0i0lZLF"
41 | }
42 | },
43 | {
44 | "cell_type": "code",
45 | "source": [
46 | "!pip install -q roboflow>=1.0.5\n",
47 | "!pip install -q supervision>=0.6.0"
48 | ],
49 | "metadata": {
50 | "id": "XTCBgNLMlYhI"
51 | },
52 | "execution_count": null,
53 | "outputs": []
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "source": [
58 | "### Setting up our utility functions"
59 | ],
60 | "metadata": {
61 | "id": "ej5qGDcnlZox"
62 | }
63 | },
64 | {
65 | "cell_type": "code",
66 | "source": [
67 | "import os\n",
68 | "import json\n",
69 | "import cv2\n",
70 | "import numpy as np\n",
71 | "import supervision as sv\n",
72 | "from roboflow import Roboflow\n",
73 | "\n",
74 | "\n",
75 | "def load_roboflow_model(api_key, workspace_id, project_id, version_number):\n",
76 | "\n",
77 | " # authenticate to your Roboflow account and load your model\n",
78 | " rf = Roboflow(api_key=api_key)\n",
79 | " project = rf.workspace(workspace_id).project(project_id)\n",
80 | " version = project.version(version_number)\n",
81 | " model = version.model\n",
82 | " \n",
83 | " return project, model"
84 | ],
85 | "metadata": {
86 | "id": "9_e5mcsTlTuY"
87 | },
88 | "execution_count": null,
89 | "outputs": []
90 | },
91 | {
92 | "cell_type": "markdown",
93 | "source": [
94 | "Receiving Model Predictions"
95 | ],
96 | "metadata": {
97 | "id": "d2Pj0IJLlZ_J"
98 | }
99 | },
100 | {
101 | "cell_type": "code",
102 | "source": [
103 | "def make_prediction(project, model, image_path, confidence, overlap):\n",
104 | "\n",
105 | " # load the image and make predictions with your model\n",
106 | " img = cv2.imread(image_path)\n",
107 | " predictions = model.predict(image_path, confidence=confidence, overlap=overlap)\n",
108 | " predictions_json = predictions.json()\n",
109 | " roboflow_xyxy = np.empty((0, 4))\n",
110 | " predicted_classes = []\n",
111 | " for bounding_box in predictions:\n",
112 | " x1 = bounding_box['x'] - bounding_box['width'] / 2\n",
113 | " x2 = bounding_box['x'] + bounding_box['width'] / 2\n",
114 | " y1 = bounding_box['y'] - bounding_box['height'] / 2\n",
115 | " y2 = bounding_box['y'] + bounding_box['height'] / 2\n",
116 | " np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))\n",
117 | " predicted_classes.append(bounding_box['class'])\n",
118 | " \n",
119 | " # class_name = bounding_box['class']\n",
120 | " # confidence = bounding_box['confidence']\n",
121 | " sv_xyxy = sv.Detections(roboflow_xyxy).from_roboflow(\n",
122 | " predictions_json,class_list=list((project.classes).keys()))\n",
123 | "\n",
124 | " return img, predictions_json, sv_xyxy, predicted_classes"
125 | ],
126 | "metadata": {
127 | "id": "OYWgcM3SlElH"
128 | },
129 | "execution_count": null,
130 | "outputs": []
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "source": [
135 | "#### [Supervision](https://github.com/roboflow/supervision/)\n",
136 | "A set of easy-to-use utils that will come in handy in any computer vision project. Supervision is still in pre-release stage. 🚧 Keep your eyes open for potential bugs and be aware that at this stage our API is still fluid and may change.\n",
137 | "\n",
138 | " \n",
139 | "
\n",
143 | " \n",
144 | "
\n",
145 | "\n",
146 | "#### [Roboflow Response Object Format](https://docs.roboflow.com/inference/hosted-api#response-object-format) (Object Detection)\n",
147 | "Draw Bounding Boxes on Images with the Roboflow Python SDK and Supervision\n",
148 | "\n",
149 | "\n",
150 | " \n",
151 | "
\n",
155 | " \n",
156 | "
"
157 | ],
158 | "metadata": {
159 | "id": "LPCP26FplcPs"
160 | }
161 | },
162 | {
163 | "cell_type": "code",
164 | "source": [
165 | "def draw_bounding_boxes(image, sv_xyxy, class_ids, add_labels):\n",
166 | "\n",
167 | " #set add_labels to True to show the label for each object\n",
168 | " image_with_boxes = sv.BoxAnnotator().annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)\n",
169 | "\n",
170 | " return image_with_boxes"
171 | ],
172 | "metadata": {
173 | "id": "-sHP4xnklI80"
174 | },
175 | "execution_count": null,
176 | "outputs": []
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "source": [
181 | "Saving the Image with Bounding Boxes"
182 | ],
183 | "metadata": {
184 | "id": "7WWr6O35lbAj"
185 | }
186 | },
187 | {
188 | "cell_type": "code",
189 | "source": [
190 | "def save_image(image, original_image_path, output_directory=\"results\"):\n",
191 | "\n",
192 | " os.makedirs(output_directory, exist_ok=True)\n",
193 | " filename = os.path.basename(original_image_path)\n",
194 | " output_path = os.path.join(output_directory, f\"result_{filename}\")\n",
195 | " cv2.imwrite(output_path, image)\n",
196 | " return output_path"
197 | ],
198 | "metadata": {
199 | "id": "2Fap9bCBlMEx"
200 | },
201 | "execution_count": null,
202 | "outputs": []
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "source": [
207 | "#### Don't forgot to add your image file(s)!"
208 | ],
209 | "metadata": {
210 | "id": "E3m00687mrP-"
211 | }
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "source": [
216 | "### Running the whole thing!"
217 | ],
218 | "metadata": {
219 | "id": "Ac53DIQEli_w"
220 | }
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {
226 | "colab": {
227 | "base_uri": "https://localhost:8080/"
228 | },
229 | "id": "lg98goeWlCrQ",
230 | "outputId": "39f5052d-b49c-4a5f-f652-241df0d10130"
231 | },
232 | "outputs": [
233 | {
234 | "output_type": "stream",
235 | "name": "stdout",
236 | "text": [
237 | "loading Roboflow workspace...\n",
238 | "loading Roboflow project...\n",
239 | "{'predictions': [{'x': 794, 'y': 469, 'width': 668, 'height': 433, 'confidence': 0.9207110404968262, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 460, 'y': 23, 'width': 60, 'height': 21, 'confidence': 0.6908699870109558, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 56, 'y': 323, 'width': 112, 'height': 376, 'confidence': 0.5661516189575195, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 584, 'y': 180, 'width': 28, 'height': 31, 'confidence': 0.4035103917121887, 'class': 'Person', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}], 'image': {'width': '1280', 'height': '720'}}\n",
240 | "The image has been processed and saved to results/result_construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg\n"
241 | ]
242 | }
243 | ],
244 | "source": [
245 | "def main():\n",
246 | "\n",
247 | " ## Authentication info to load the model\n",
248 | " ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25\n",
249 | " ## Workspace ID: \"roboflow-universe-projects\", Project ID: \"construction-site-safety\", Version Number: 25\n",
250 | " api_key = \"ROBOFLOW_PRIVATE_API_KEY\"\n",
251 | " workspace_id = \"ROBOFLOW_WORKSPACE_ID\"\n",
252 | " project_id = \"ROBOFLOW_POJECT_ID\"\n",
253 | " version_number = \"ROBOFLOW_VERSION_NUMBER\"\n",
254 | " project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)\n",
255 | "\n",
256 | " # Make a prediction on the specified image file\n",
257 | " image_path = \"/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg\"\n",
258 | " confidence = 40\n",
259 | " overlap = 30\n",
260 | " image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(\n",
261 | " project, model, image_path, confidence, overlap)\n",
262 | "\n",
263 | " print(predictions_json)\n",
264 | "\n",
265 | " ## Set add_labels to False to draw class labels on the bounding boxes\n",
266 | " add_labels = True\n",
267 | " for i in range(len(pred_sv_xyxy)):\n",
268 | " image_with_boxes = draw_bounding_boxes(image, pred_sv_xyxy, predicted_classes, add_labels)\n",
269 | "\n",
270 | " # Save the image with bounding boxes for the detected objects drawn on them\n",
271 | " output_path = save_image(image_with_boxes, image_path)\n",
272 | "\n",
273 | " print(f\"The image has been processed and saved to {output_path}\")\n",
274 | "\n",
275 | "if __name__ == \"__main__\":\n",
276 | " main()\n"
277 | ]
278 | }
279 | ]
280 | }
--------------------------------------------------------------------------------
/images/roboflow_cv_utils_fill_img.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": []
7 | },
8 | "kernelspec": {
9 | "name": "python3",
10 | "display_name": "Python 3"
11 | },
12 | "language_info": {
13 | "name": "python"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "source": [
20 | "## Draw Bounding Boxes on Images with the Roboflow Python SDK and Supervision\n",
21 | "\n",
22 | " \n",
23 | "
\n",
27 | " \n",
28 | "
"
29 | ],
30 | "metadata": {
31 | "id": "cfsYWzUw55Iy"
32 | }
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "source": [
37 | "## Installing Dependencies"
38 | ],
39 | "metadata": {
40 | "id": "pjMiL0i0lZLF"
41 | }
42 | },
43 | {
44 | "cell_type": "code",
45 | "source": [
46 | "!pip install -q roboflow>=1.0.5\n",
47 | "!pip install -q supervision>=0.6.0"
48 | ],
49 | "metadata": {
50 | "id": "XTCBgNLMlYhI"
51 | },
52 | "execution_count": 1,
53 | "outputs": []
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "source": [
58 | "### Setting up our utility functions"
59 | ],
60 | "metadata": {
61 | "id": "ej5qGDcnlZox"
62 | }
63 | },
64 | {
65 | "cell_type": "code",
66 | "source": [
67 | "import os\n",
68 | "import json\n",
69 | "import cv2\n",
70 | "import numpy as np\n",
71 | "from roboflow import Roboflow\n",
72 | "\n",
73 | "import supervision as sv\n",
74 | "from supervision.draw.color import Color\n",
75 | "from supervision.draw.color import ColorPalette\n",
76 | "from supervision import Detections, BoxAnnotator\n",
77 | "\n",
78 | "\n",
79 | "def load_roboflow_model(api_key, workspace_id, project_id, version_number):\n",
80 | "\n",
81 | " # authenticate to your Roboflow account and load your model\n",
82 | " rf = Roboflow(api_key=api_key)\n",
83 | " project = rf.workspace(workspace_id).project(project_id)\n",
84 | " version = project.version(version_number)\n",
85 | " model = version.model\n",
86 | " \n",
87 | " return project, model"
88 | ],
89 | "metadata": {
90 | "id": "9_e5mcsTlTuY"
91 | },
92 | "execution_count": 8,
93 | "outputs": []
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "source": [
98 | "Receiving Model Predictions"
99 | ],
100 | "metadata": {
101 | "id": "d2Pj0IJLlZ_J"
102 | }
103 | },
104 | {
105 | "cell_type": "code",
106 | "source": [
107 | "def make_prediction(project, model, image_path, confidence, overlap):\n",
108 | "\n",
109 | " # load the image and make predictions with your model\n",
110 | " img = cv2.imread(image_path)\n",
111 | " predictions = model.predict(image_path, confidence=confidence, overlap=overlap)\n",
112 | " predictions_json = predictions.json()\n",
113 | " roboflow_xyxy = np.empty((0, 4))\n",
114 | " predicted_classes = []\n",
115 | " for bounding_box in predictions:\n",
116 | " x1 = bounding_box['x'] - bounding_box['width'] / 2\n",
117 | " x2 = bounding_box['x'] + bounding_box['width'] / 2\n",
118 | " y1 = bounding_box['y'] - bounding_box['height'] / 2\n",
119 | " y2 = bounding_box['y'] + bounding_box['height'] / 2\n",
120 | " np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))\n",
121 | " predicted_classes.append(bounding_box['class'])\n",
122 | " \n",
123 | " # class_name = bounding_box['class']\n",
124 | " # confidence = bounding_box['confidence']\n",
125 | " sv_xyxy = Detections(roboflow_xyxy).from_roboflow(\n",
126 | " predictions_json,class_list=list((project.classes).keys()))\n",
127 | "\n",
128 | " return img, predictions_json, sv_xyxy, predicted_classes"
129 | ],
130 | "metadata": {
131 | "id": "OYWgcM3SlElH"
132 | },
133 | "execution_count": 9,
134 | "outputs": []
135 | },
136 | {
137 | "cell_type": "markdown",
138 | "source": [
139 | "#### [Supervision](https://github.com/roboflow/supervision/)\n",
140 | "A set of easy-to-use utils that will come in handy in any computer vision project. Supervision is still in pre-release stage. 🚧 Keep your eyes open for potential bugs and be aware that at this stage our API is still fluid and may change.\n",
141 | "\n",
142 | " \n",
143 | "
\n",
147 | " \n",
148 | "
\n",
149 | "\n",
150 | "#### [Roboflow Response Object Format](https://docs.roboflow.com/inference/hosted-api#response-object-format) (Object Detection)\n",
151 | "Draw Bounding Boxes on Images with the Roboflow Python SDK and Supervision\n",
152 | "\n",
153 | "\n",
154 | " \n",
155 | "
\n",
159 | " \n",
160 | "
"
161 | ],
162 | "metadata": {
163 | "id": "LPCP26FplcPs"
164 | }
165 | },
166 | {
167 | "cell_type": "code",
168 | "source": [
169 | "def fill_bounding_boxes(image, sv_xyxy, class_ids, add_labels):\n",
170 | "\n",
171 | " #set add_labels to True to show the label for each object\n",
172 | " image_with_boxes = BoxAnnotator(\n",
173 | " color=Color.black(), thickness=-1).annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)\n",
174 | "\n",
175 | " return image_with_boxes"
176 | ],
177 | "metadata": {
178 | "id": "-sHP4xnklI80"
179 | },
180 | "execution_count": 21,
181 | "outputs": []
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "source": [
186 | "Saving the Image with Bounding Boxes"
187 | ],
188 | "metadata": {
189 | "id": "7WWr6O35lbAj"
190 | }
191 | },
192 | {
193 | "cell_type": "code",
194 | "source": [
195 | "def save_image(image, original_image_path, output_directory=\"results\"):\n",
196 | "\n",
197 | " os.makedirs(output_directory, exist_ok=True)\n",
198 | " filename = os.path.basename(original_image_path)\n",
199 | " output_path = os.path.join(output_directory, f\"result_{filename}\")\n",
200 | " cv2.imwrite(output_path, image)\n",
201 | " return output_path"
202 | ],
203 | "metadata": {
204 | "id": "2Fap9bCBlMEx"
205 | },
206 | "execution_count": 11,
207 | "outputs": []
208 | },
209 | {
210 | "cell_type": "markdown",
211 | "source": [
212 | "#### Don't forgot to add your image file(s)!"
213 | ],
214 | "metadata": {
215 | "id": "E3m00687mrP-"
216 | }
217 | },
218 | {
219 | "cell_type": "markdown",
220 | "source": [
221 | "### Running the whole thing!"
222 | ],
223 | "metadata": {
224 | "id": "Ac53DIQEli_w"
225 | }
226 | },
227 | {
228 | "cell_type": "code",
229 | "execution_count": 20,
230 | "metadata": {
231 | "colab": {
232 | "base_uri": "https://localhost:8080/"
233 | },
234 | "id": "lg98goeWlCrQ",
235 | "outputId": "c41a08e4-9ed8-45d3-dd46-3e98927c5ee7"
236 | },
237 | "outputs": [
238 | {
239 | "output_type": "stream",
240 | "name": "stdout",
241 | "text": [
242 | "loading Roboflow workspace...\n",
243 | "loading Roboflow project...\n",
244 | "{'predictions': [{'x': 1113, 'y': 363, 'width': 334, 'height': 280, 'confidence': 0.9350578784942627, 'class': 'machinery', 'image_path': '/content/construction-2-_mp4-162_jpg.rf.edc08e7528429b4315f59637aaf65cf0.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 307, 'y': 314, 'width': 582, 'height': 460, 'confidence': 0.8053478002548218, 'class': 'machinery', 'image_path': '/content/construction-2-_mp4-162_jpg.rf.edc08e7528429b4315f59637aaf65cf0.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 905, 'y': 272, 'width': 722, 'height': 481, 'confidence': 0.7803330421447754, 'class': 'machinery', 'image_path': '/content/construction-2-_mp4-162_jpg.rf.edc08e7528429b4315f59637aaf65cf0.jpg', 'prediction_type': 'ObjectDetectionModel'}], 'image': {'width': '1280', 'height': '720'}}\n",
245 | "The image has been processed and saved to results/result_construction-2-_mp4-162_jpg.rf.edc08e7528429b4315f59637aaf65cf0.jpg\n"
246 | ]
247 | }
248 | ],
249 | "source": [
250 | "def main():\n",
251 | "\n",
252 | " ## Authentication info to load the model\n",
253 | " ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25\n",
254 | " ## Workspace ID: \"roboflow-universe-projects\", Project ID: \"construction-site-safety\", Version Number: 25\n",
255 | " api_key = \"ROBOFLOW_PRIVATE_API_KEY\"\n",
256 | " workspace_id = \"ROBOFLOW_WORKSPACE_ID\"\n",
257 | " project_id = \"ROBOFLOW_POJECT_ID\"\n",
258 | " version_number = \"ROBOFLOW_VERSION_NUMBER\"\n",
259 | " project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)\n",
260 | "\n",
261 | " # Make a prediction on the specified image file\n",
262 | " image_path = \"/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg\"\n",
263 | " confidence = 40\n",
264 | " overlap = 30\n",
265 | " image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(\n",
266 | " project, model, image_path, confidence, overlap)\n",
267 | "\n",
268 | " print(predictions_json)\n",
269 | "\n",
270 | " ## Set add_labels to False to draw class labels on the bounding boxes\n",
271 | " add_labels = True\n",
272 | " for i in range(len(pred_sv_xyxy)):\n",
273 | " image_with_boxes = fill_bounding_boxes(image, pred_sv_xyxy, predicted_classes, add_labels)\n",
274 | "\n",
275 | " # Save the image with bounding boxes for the detected objects drawn on them\n",
276 | " output_path = save_image(image_with_boxes, image_path)\n",
277 | "\n",
278 | " print(f\"The image has been processed and saved to {output_path}\")\n",
279 | "\n",
280 | "if __name__ == \"__main__\":\n",
281 | " main()\n"
282 | ]
283 | }
284 | ]
285 | }
--------------------------------------------------------------------------------
/images/roboflow_cv_utils_writetext_img.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": []
7 | },
8 | "kernelspec": {
9 | "name": "python3",
10 | "display_name": "Python 3"
11 | },
12 | "language_info": {
13 | "name": "python"
14 | }
15 | },
16 | "cells": [
17 | {
18 | "cell_type": "markdown",
19 | "source": [
20 | "## Draw Bounding Boxes on Images and Write Class Labels with the Roboflow Python SDK and Supervision\n",
21 | "\n",
22 | " \n",
23 | "
\n",
27 | " \n",
28 | "
"
29 | ],
30 | "metadata": {
31 | "id": "cfsYWzUw55Iy"
32 | }
33 | },
34 | {
35 | "cell_type": "markdown",
36 | "source": [
37 | "## Installing Dependencies"
38 | ],
39 | "metadata": {
40 | "id": "pjMiL0i0lZLF"
41 | }
42 | },
43 | {
44 | "cell_type": "code",
45 | "source": [
46 | "!pip install -q roboflow>=1.0.5\n",
47 | "!pip install -q supervision>=0.6.0"
48 | ],
49 | "metadata": {
50 | "id": "XTCBgNLMlYhI"
51 | },
52 | "execution_count": null,
53 | "outputs": []
54 | },
55 | {
56 | "cell_type": "markdown",
57 | "source": [
58 | "### Setting up our utility functions"
59 | ],
60 | "metadata": {
61 | "id": "ej5qGDcnlZox"
62 | }
63 | },
64 | {
65 | "cell_type": "code",
66 | "source": [
67 | "import os\n",
68 | "import json\n",
69 | "import cv2\n",
70 | "import numpy as np\n",
71 | "import supervision as sv\n",
72 | "from roboflow import Roboflow\n",
73 | "\n",
74 | "\n",
75 | "def load_roboflow_model(api_key, workspace_id, project_id, version_number):\n",
76 | "\n",
77 | " # authenticate to your Roboflow account and load your model\n",
78 | " rf = Roboflow(api_key=api_key)\n",
79 | " project = rf.workspace(workspace_id).project(project_id)\n",
80 | " version = project.version(version_number)\n",
81 | " model = version.model\n",
82 | " \n",
83 | " return project, model"
84 | ],
85 | "metadata": {
86 | "id": "9_e5mcsTlTuY"
87 | },
88 | "execution_count": null,
89 | "outputs": []
90 | },
91 | {
92 | "cell_type": "markdown",
93 | "source": [
94 | "Receiving Model Predictions"
95 | ],
96 | "metadata": {
97 | "id": "d2Pj0IJLlZ_J"
98 | }
99 | },
100 | {
101 | "cell_type": "code",
102 | "source": [
103 | "def make_prediction(project, model, image_path, confidence, overlap):\n",
104 | "\n",
105 | " # load the image and make predictions with your model\n",
106 | " img = cv2.imread(image_path)\n",
107 | " predictions = model.predict(image_path, confidence=confidence, overlap=overlap)\n",
108 | " predictions_json = predictions.json()\n",
109 | " roboflow_xyxy = np.empty((0, 4))\n",
110 | " predicted_classes = []\n",
111 | " for bounding_box in predictions:\n",
112 | " x1 = bounding_box['x'] - bounding_box['width'] / 2\n",
113 | " x2 = bounding_box['x'] + bounding_box['width'] / 2\n",
114 | " y1 = bounding_box['y'] - bounding_box['height'] / 2\n",
115 | " y2 = bounding_box['y'] + bounding_box['height'] / 2\n",
116 | " np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))\n",
117 | " predicted_classes.append(bounding_box['class'])\n",
118 | " \n",
119 | " # class_name = bounding_box['class']\n",
120 | " # confidence = bounding_box['confidence']\n",
121 | " sv_xyxy = sv.Detections(roboflow_xyxy).from_roboflow(\n",
122 | " predictions_json,class_list=list((project.classes).keys()))\n",
123 | "\n",
124 | " return img, predictions_json, sv_xyxy, predicted_classes"
125 | ],
126 | "metadata": {
127 | "id": "OYWgcM3SlElH"
128 | },
129 | "execution_count": null,
130 | "outputs": []
131 | },
132 | {
133 | "cell_type": "markdown",
134 | "source": [
135 | "#### [Supervision](https://github.com/roboflow/supervision/)\n",
136 | "A set of easy-to-use utils that will come in handy in any computer vision project. Supervision is still in pre-release stage. 🚧 Keep your eyes open for potential bugs and be aware that at this stage our API is still fluid and may change.\n",
137 | "\n",
138 | " \n",
139 | "
\n",
143 | " \n",
144 | "
\n",
145 | "\n",
146 | "#### [Roboflow Response Object Format](https://docs.roboflow.com/inference/hosted-api#response-object-format) (Object Detection)\n",
147 | "Draw Bounding Boxes on Images with the Roboflow Python SDK and Supervision\n",
148 | "\n",
149 | "\n",
150 | " \n",
151 | "
\n",
155 | " \n",
156 | "
"
157 | ],
158 | "metadata": {
159 | "id": "LPCP26FplcPs"
160 | }
161 | },
162 | {
163 | "cell_type": "code",
164 | "source": [
165 | "def draw_boxes_and_labels(image, sv_xyxy, class_ids, add_labels):\n",
166 | "\n",
167 | " #set add_labels to True to show the label for each object\n",
168 | " image_with_boxes = sv.BoxAnnotator().annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)\n",
169 | "\n",
170 | " return image_with_boxes"
171 | ],
172 | "metadata": {
173 | "id": "-sHP4xnklI80"
174 | },
175 | "execution_count": null,
176 | "outputs": []
177 | },
178 | {
179 | "cell_type": "markdown",
180 | "source": [
181 | "Saving the Image with Bounding Boxes"
182 | ],
183 | "metadata": {
184 | "id": "7WWr6O35lbAj"
185 | }
186 | },
187 | {
188 | "cell_type": "code",
189 | "source": [
190 | "def save_image(image, original_image_path, output_directory=\"results\"):\n",
191 | "\n",
192 | " os.makedirs(output_directory, exist_ok=True)\n",
193 | " filename = os.path.basename(original_image_path)\n",
194 | " output_path = os.path.join(output_directory, f\"result_{filename}\")\n",
195 | " cv2.imwrite(output_path, image)\n",
196 | " return output_path"
197 | ],
198 | "metadata": {
199 | "id": "2Fap9bCBlMEx"
200 | },
201 | "execution_count": null,
202 | "outputs": []
203 | },
204 | {
205 | "cell_type": "markdown",
206 | "source": [
207 | "#### Don't forgot to add your image file(s)!"
208 | ],
209 | "metadata": {
210 | "id": "E3m00687mrP-"
211 | }
212 | },
213 | {
214 | "cell_type": "markdown",
215 | "source": [
216 | "### Running the whole thing!"
217 | ],
218 | "metadata": {
219 | "id": "Ac53DIQEli_w"
220 | }
221 | },
222 | {
223 | "cell_type": "code",
224 | "execution_count": null,
225 | "metadata": {
226 | "colab": {
227 | "base_uri": "https://localhost:8080/"
228 | },
229 | "id": "lg98goeWlCrQ",
230 | "outputId": "39f5052d-b49c-4a5f-f652-241df0d10130"
231 | },
232 | "outputs": [
233 | {
234 | "output_type": "stream",
235 | "name": "stdout",
236 | "text": [
237 | "loading Roboflow workspace...\n",
238 | "loading Roboflow project...\n",
239 | "{'predictions': [{'x': 794, 'y': 469, 'width': 668, 'height': 433, 'confidence': 0.9207110404968262, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 460, 'y': 23, 'width': 60, 'height': 21, 'confidence': 0.6908699870109558, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 56, 'y': 323, 'width': 112, 'height': 376, 'confidence': 0.5661516189575195, 'class': 'machinery', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}, {'x': 584, 'y': 180, 'width': 28, 'height': 31, 'confidence': 0.4035103917121887, 'class': 'Person', 'image_path': '/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg', 'prediction_type': 'ObjectDetectionModel'}], 'image': {'width': '1280', 'height': '720'}}\n",
240 | "The image has been processed and saved to results/result_construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg\n"
241 | ]
242 | }
243 | ],
244 | "source": [
245 | "def main():\n",
246 | "\n",
247 | " ## Authentication info to load the model\n",
248 | " ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25\n",
249 | " ## Workspace ID: \"roboflow-universe-projects\", Project ID: \"construction-site-safety\", Version Number: 25\n",
250 | " api_key = \"ROBOFLOW_PRIVATE_API_KEY\"\n",
251 | " workspace_id = \"ROBOFLOW_WORKSPACE_ID\"\n",
252 | " project_id = \"ROBOFLOW_POJECT_ID\"\n",
253 | " version_number = \"ROBOFLOW_VERSION_NUMBER\"\n",
254 | " project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)\n",
255 | "\n",
256 | " # Make a prediction on the specified image file\n",
257 | " image_path = \"/content/test_images/construction-2-_mp4-13_jpg.rf.dcfc8cd7a7c9f6109ff18cab2b50f6bc.jpg\"\n",
258 | " confidence = 40\n",
259 | " overlap = 30\n",
260 | " image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(\n",
261 | " project, model, image_path, confidence, overlap)\n",
262 | "\n",
263 | " print(predictions_json)\n",
264 | "\n",
265 | " ## Set add_labels to False to draw class labels on the bounding boxes\n",
266 | " add_labels = False\n",
267 | " for i in range(len(pred_sv_xyxy)):\n",
268 | " image_with_boxes = draw_boxes_and_labels(image, pred_sv_xyxy, predicted_classes, add_labels)\n",
269 | "\n",
270 | " # Save the image with bounding boxes for the detected objects drawn on them\n",
271 | " output_path = save_image(image_with_boxes, image_path)\n",
272 | "\n",
273 | " print(f\"The image has been processed and saved to {output_path}\")\n",
274 | "\n",
275 | "if __name__ == \"__main__\":\n",
276 | " main()\n"
277 | ]
278 | }
279 | ]
280 | }
--------------------------------------------------------------------------------
/images/test_box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/images/test_box.jpg
--------------------------------------------------------------------------------
/images/twoPass.py:
--------------------------------------------------------------------------------
1 | import os, glob, shutil
2 | from roboflow import Roboflow
3 | import json
4 | import cv2
5 |
6 |
7 | def secondModel(model2, cropped_det_path: str, printJson = True, save_img = True):
8 | """
9 | :param model2: Roboflow model object for the second inference pass\n
10 | :param cropped_det_path: string, path to the cropped detection for inference passed from cropBoxes()\n
11 | :param printJson: bool, default: True - prints the JSON values of the predictions from the second inference pass\n
12 | :param save_img: bool, default: True - saves the prediction image [crop] from the second inference pass
13 | """
14 | # load image as OpenCV object (array) and scale to classification model Resize (ROBOFLOW_SIZE_MODEL2)
15 | cropped_img = cv2.imread(cropped_det_path)
16 |
17 | # perform inference on the selected image
18 | predictions_MODEL2 = model2.predict(cropped_det_path)
19 | predictions_json_MODEL2 = predictions_MODEL2.json()['predictions'][0]
20 |
21 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
22 | top_class = predictions_json_MODEL2['top']
23 | #top_confidence = predictions_json_MODEL2['confidence']
24 |
25 | if save_img:
26 | original_file = os.path.basename(cropped_det_path).split('/')[-1]
27 | if os.path.exists(os.curdir + f"/inferred_secondPassResult/ClassifiedAs_{top_class}"
28 | ) is False:
29 | os.mkdir(os.curdir + f"/inferred_secondPassResult/ClassifiedAs_{top_class}")
30 |
31 | filename = original_file
32 | save_loc = f'./inferred_secondPassResult/ClassifiedAs_{top_class}/' + filename
33 | cv2.imwrite(save_loc, cropped_img)
34 |
35 | print(f'Success! Second Pass (classification) RESIZED IMAGE FOR INFERENCE saved to {save_loc}')
36 |
37 | if printJson:
38 | print(f'\n{predictions_json_MODEL2}')
39 |
40 |
41 | def cropBoxes(model1, model2, img_path, printJson = True, save_img = True, confidence = 40, overlap = 30):
42 | """
43 | :param model1: Roboflow model object for the first inference pass\n
44 | :param model2: Roboflow model object for the second inference pass\n
45 | :param cropped_det_path: string, path to the cropped detection for inference passed from cropBoxes()\n
46 | :param printJson: bool, default: True - prints the JSON values of the predictions from the second inference pass\n
47 | :param save_img: bool, default: True - saves the prediction image [crop] from the second inference pass
48 | """
49 | # for a directory (folder) of images
50 | if os.path.isdir(img_path):
51 | raw_data_location = img_path
52 | for raw_data_ext in ['.jpg', '.jpeg', '.png']:
53 | globbed_files = glob.glob(raw_data_location + '/*' + raw_data_ext)
54 | for img_file in globbed_files:
55 | crop_number = 0
56 | img = cv2.imread(img_file)
57 | # perform inference on the selected image
58 | predictions = model1.predict(img_file, confidence=confidence,
59 | overlap=overlap)
60 | #predictions_json = predictions.json()
61 | original_file = os.path.basename(img_file).split('/')[-1]
62 | predictions.save(os.curdir + f"/inference_images/inferred/{original_file}")
63 | # drawing bounding boxes with the Pillow library
64 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
65 | for bounding_box in predictions:
66 | # defining crop area [height_of_cropArea:width_of_cropArea]
67 | # croppedArea = img[start_row:end_row, start_col:end_col]
68 | x0 = bounding_box['x'] - bounding_box['width'] / 2#start_column
69 | x1 = bounding_box['x'] + bounding_box['width'] / 2#end_column
70 | y0 = bounding_box['y'] - bounding_box['height'] / 2#start row
71 | y1 = bounding_box['y'] + bounding_box['height'] / 2#end_row
72 | class_name = bounding_box['class']
73 | croppedArea = img[int(y0):int(y1), int(x0):int(x1)]
74 | #confidence_score = bounding_box['confidence']#confidence score of prediction
75 |
76 | if save_img:
77 | if os.path.exists(os.curdir + f"/inferred_cropBoxes/DetectedAs_{class_name}"
78 | ) is False:
79 | os.mkdir(os.curdir + f"/inferred_cropBoxes/DetectedAs_{class_name}")
80 |
81 | filename = f"Crop{crop_number}_{original_file}"
82 | save_loc = f'./inferred_cropBoxes/DetectedAs_{class_name}/' + filename
83 | cv2.imwrite(save_loc, croppedArea)
84 |
85 | print(f'Success! First Pass (object detection) saved to {save_loc}')
86 | crop_number+=1
87 |
88 | if printJson:
89 | print(f'\n{bounding_box}')
90 |
91 | secondModel(model2, save_loc)
92 |
93 | # runs if there is only 1 image file in the ./inference_images directory
94 | elif os.path.isfile(img_path):
95 | crop_number = 0
96 | img = cv2.imread(img_path)
97 | # perform inference on the selected image
98 | predictions = model.predict(img_path, confidence=confidence,
99 | overlap=overlap)
100 | #predictions_json = predictions.json()
101 | original_file = os.path.basename(img_file).split('/')[-1]
102 | predictions.save(os.curdir + f"/inference_images/inferred/{original_file}")
103 | # drawing bounding boxes with the Pillow library
104 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
105 | for bounding_box in predictions:
106 | # defining crop area [height_of_cropArea:width_of_cropArea]
107 | # croppedArea = img[start_row:end_row, start_col:end_col]
108 | x0 = bounding_box['x'] - bounding_box['width'] / 2#start_column
109 | x1 = bounding_box['x'] + bounding_box['width'] / 2#end_column
110 | y0 = bounding_box['y'] - bounding_box['height'] / 2#start row
111 | y1 = bounding_box['y'] + bounding_box['height'] / 2#end_row
112 | class_name = bounding_box['class']
113 | croppedArea = img[int(y0):int(y1), int(x0):int(x1)]
114 | #confidence_score = bounding_box['confidence']#confidence score of prediction
115 |
116 | if save_img:
117 | original_file = os.path.basename(img_path).split('/')[-1]
118 | if os.path.exists(os.curdir + f"/inferred_cropBoxes/DetectedAs_{class_name}"
119 | ) is False:
120 | os.mkdir(os.curdir + f"/inferred_cropBoxes/DetectedAs_{class_name}")
121 |
122 | filename = original_file
123 | save_loc = f'./inferred_cropBoxes/DetectedAs_{class_name}/' + filename
124 | cv2.imwrite(save_loc, croppedArea)
125 |
126 | print(f'Success! First Pass (object detection) saved to {save_loc}')
127 | crop_number+=1
128 |
129 | if printJson:
130 | print(f'\n{bounding_box}')
131 |
132 | secondModel(model2, save_loc, x0, y0)
133 |
134 | else:
135 | return print('Please input a valid path to an image or directory (folder)')
136 |
137 |
138 | # load config file for the models
139 | with open(os.pardir + '/roboflow_config_twopass.json') as f:
140 | config = json.load(f)
141 |
142 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
143 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
144 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
145 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
146 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
147 | ROBOFLOW_API_KEY_MODEL2 = config["ROBOFLOW_API_KEY_MODEL2"]
148 | ROBOFLOW_WORKSPACE_ID_MODEL2 = config["ROBOFLOW_WORKSPACE_ID_MODEL2"]
149 | ROBOFLOW_MODEL_ID_MODEL2 = config["ROBOFLOW_MODEL_ID_MODEL2"]
150 | ROBOFLOW_VERSION_NUMBER_MODEL2 = config["ROBOFLOW_VERSION_NUMBER_MODEL2"]
151 | ROBOFLOW_SIZE_MODEL2 = config["ROBOFLOW_SIZE_MODEL2"]
152 |
153 | f.close()
154 |
155 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
156 | # create Roboflow object: https://docs.roboflow.com/python
157 | rf = Roboflow(api_key=ROBOFLOW_API_KEY)
158 | workspace = rf.workspace(ROBOFLOW_WORKSPACE_ID)
159 | project = workspace.project(ROBOFLOW_MODEL_ID)
160 | version = project.version(ROBOFLOW_VERSION_NUMBER)
161 | model = version.model
162 |
163 | rf_MODEL2 = Roboflow(api_key=ROBOFLOW_API_KEY_MODEL2)
164 | workspace_MODEL2 = rf_MODEL2.workspace(ROBOFLOW_WORKSPACE_ID_MODEL2)
165 | project_MODEL2 = workspace_MODEL2.project(ROBOFLOW_MODEL_ID_MODEL2)
166 | version_MODEL2 = project_MODEL2.version(ROBOFLOW_VERSION_NUMBER_MODEL2)
167 | model_MODEL2 = version_MODEL2.model
168 |
169 | # creating a directory to add images we wish to infer
170 | if os.path.exists(os.curdir + '/inference_images') is False:
171 | os.mkdir(os.curdir + '/inference_images')
172 |
173 | if os.path.exists(os.curdir + '/inference_images/inferred') is False:
174 | os.mkdir(os.curdir + '/inference_images/inferred')
175 |
176 | for raw_data_ext in ['.jpg', '.jpeg', '.png']:
177 | globbed_files = glob.glob(os.curdir + '/*' + raw_data_ext)
178 | for img_file in globbed_files:
179 | shutil.move(img_file, os.curdir + '/inference_images')
180 |
181 | # creating directories to save inference results
182 | for directory in ['/inferred_cropBoxes', '/inferred_secondPassResult']:
183 | if os.path.exists(os.curdir + directory) is False:
184 | os.mkdir(os.curdir + directory)
185 |
186 | cropBoxes(model, model_MODEL2, './inference_images', confidence = 40, overlap = 30)
187 |
--------------------------------------------------------------------------------
/images/writetext_img.py:
--------------------------------------------------------------------------------
1 | import os
2 | import json
3 | import cv2
4 | import numpy as np
5 | from roboflow import Roboflow
6 |
7 | import supervision as sv
8 | from supervision.draw.color import Color
9 | from supervision.draw.color import ColorPalette
10 | from supervision import Detections, BoxAnnotator
11 |
12 |
13 | def load_roboflow_model(api_key, workspace_id, project_id, version_number):
14 |
15 | # authenticate to your Roboflow account and load your model
16 | rf = Roboflow(api_key=api_key)
17 | project = rf.workspace(workspace_id).project(project_id)
18 | version = project.version(version_number)
19 | model = version.model
20 |
21 | return project, model
22 |
23 | def make_prediction(project, model, image_path, confidence, overlap):
24 |
25 | # load the image and make predictions with your model
26 | img = cv2.imread(image_path)
27 | predictions = model.predict(image_path, confidence=confidence, overlap=overlap)
28 | predictions_json = predictions.json()
29 | roboflow_xyxy = np.empty((0, 4))
30 | predicted_classes = []
31 | for bounding_box in predictions:
32 | x1 = bounding_box['x'] - bounding_box['width'] / 2
33 | x2 = bounding_box['x'] + bounding_box['width'] / 2
34 | y1 = bounding_box['y'] - bounding_box['height'] / 2
35 | y2 = bounding_box['y'] + bounding_box['height'] / 2
36 | np.vstack((roboflow_xyxy, [x1, y1, x2, y2]))
37 | predicted_classes.append(bounding_box['class'])
38 |
39 | # class_name = bounding_box['class']
40 | # confidence = bounding_box['confidence']
41 | sv_xyxy = Detections(roboflow_xyxy).from_roboflow(
42 | predictions_json,class_list=list((project.classes).keys()))
43 |
44 | return img, predictions_json, sv_xyxy, predicted_classes
45 |
46 | def draw_boxes_and_labels(image, sv_xyxy, class_ids, add_labels):
47 |
48 | #set add_labels to True to show the label for each object
49 | image_with_boxes = BoxAnnotator(
50 | color=ColorPalette.default(), thickness=2).annotate(image, sv_xyxy, labels=class_ids, skip_label=add_labels)
51 |
52 | return image_with_boxes
53 |
54 | def save_image(image, original_image_path, output_directory="results"):
55 |
56 | os.makedirs(output_directory, exist_ok=True)
57 | filename = os.path.basename(original_image_path)
58 | output_path = os.path.join(output_directory, f"result_{filename}")
59 | cv2.imwrite(output_path, image)
60 |
61 | return output_path
62 |
63 | def main():
64 | ## Authentication info to load the model. The config file is located at ../roboflow_config.json
65 | ## Sample project: https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety/model/25
66 | ## Workspace ID: "roboflow-universe-projects", Project ID: "construction-site-safety", Version Number: 25
67 | with open(os.pardir + '/roboflow_config.json') as f:
68 | config = json.load(f)
69 |
70 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
71 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
72 | ROBOFLOW_PROJECT_ID = config["ROBOFLOW_PROJECT_ID"]
73 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
74 |
75 | f.close()
76 |
77 | api_key = ROBOFLOW_API_KEY
78 | workspace_id = ROBOFLOW_WORKSPACE_ID
79 | project_id = ROBOFLOW_PROJECT_ID
80 | version_number = ROBOFLOW_VERSION_NUMBER
81 | project, model = load_roboflow_model(api_key, workspace_id, project_id, version_number)
82 |
83 | # Make a prediction on the specified image file
84 | image_path = "/path/to/image.jpg"
85 | confidence = 40
86 | overlap = 30
87 | image, predictions_json, pred_sv_xyxy, predicted_classes = make_prediction(
88 | project, model, image_path, confidence, overlap)
89 |
90 | print(predictions_json)
91 |
92 | ## Set add_labels to True to omit class labels on the bounding boxes
93 | add_labels = False
94 | for i in range(len(pred_sv_xyxy)):
95 | image_with_boxes = draw_boxes_and_labels(image, pred_sv_xyxy, predicted_classes, add_labels)
96 |
97 | # Save the image with bounding boxes for the detected objects drawn on them
98 | output_path = save_image(image_with_boxes, image_path)
99 |
100 | print(f"The image has been processed and saved to {output_path}")
101 |
102 | if __name__ == "__main__":
103 | main()
104 |
--------------------------------------------------------------------------------
/measureObject.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os, glob
3 | import time
4 | from roboflow import Roboflow
5 | from numpy import mean
6 |
7 | rf = Roboflow(api_key="API_KEY")
8 | project = rf.workspace().project("measure-drill-holes")
9 | model = project.version(5).model
10 |
11 | # grab all the .jpg files
12 | extention_images = ".jpg"
13 | get_images = sorted(glob.glob('images/' + '*' + extention_images))
14 |
15 | print(get_images)
16 |
17 | # font
18 | font = cv2.FONT_HERSHEY_COMPLEX_SMALL
19 | org = (25, 25)
20 | fontScale = 2
21 | color = (255, 0, 0)
22 | thickness = 2
23 |
24 | box_color = (125, 0, 125)
25 | box_thickness = 3
26 | box_scale = 4
27 |
28 | fpsArray = []
29 | averageFPS = 0
30 |
31 | pixel_ratio_array = []
32 | averagePR = []
33 |
34 | try:
35 | for image_paths in get_images:
36 |
37 | print(image_paths)
38 |
39 | response = model.predict(image_paths, confidence=40, overlap=30).json()
40 |
41 | frame = cv2.imread(image_paths)
42 |
43 | t0 = time.time()
44 |
45 | pixel_ratio_array = []
46 | averagePR = []
47 |
48 | for objects in response['predictions']:
49 | # get prediction_name and confidence of each object
50 | object_class = str(objects['class'])
51 |
52 | # pull bbox coordinate points
53 | x0 = objects['x'] - objects['width'] / 2
54 | y0 = objects['y'] - objects['height'] / 2
55 | x1 = objects['x'] + objects['width'] / 2
56 | y1 = objects['y'] + objects['height'] / 2
57 | box = (x0, y0, x1, y1)
58 |
59 | box_start_point = (int(x0), int(y0))
60 | box_end_point = (int(x1), int(y1))
61 |
62 | if object_class == "Reference":
63 |
64 | object_class_text_size = cv2.getTextSize(object_class, font, fontScale, thickness)
65 | object_confidence = str(round(objects['confidence']*100 , 2)) + "%"
66 |
67 | reference_inches = 1
68 |
69 | reference_height = objects['height']
70 | reference_width = objects['width']
71 |
72 | pixel_to_inches = reference_height / reference_inches
73 | pixel_ratio_array.append(pixel_to_inches)
74 | averagePR = mean(pixel_ratio_array)
75 |
76 | object_Inches = reference_height / averagePR
77 |
78 | inches_ORG = (int(x0), int(y0-10))
79 |
80 | frame = cv2.putText(frame, 'Inches: ' + str(object_Inches)[:5], inches_ORG, font, fontScale, (255,255,255), thickness, cv2.LINE_AA)
81 |
82 | # draw ground truth boxes
83 | frame = cv2.rectangle(frame, box_start_point, box_end_point, box_color, box_thickness)
84 |
85 | ratio_weight = 1.10
86 | averagePR = averagePR * ratio_weight
87 |
88 | target_size = 0.15625
89 |
90 | target_max = target_size * 1.10
91 | target_min = target_size * 0.9
92 |
93 | for objects in response['predictions']:
94 | # get prediction_name and confidence of each object
95 | object_class = str(objects['class'])
96 |
97 | # pull bbox coordinate points
98 | x0 = objects['x'] - objects['width'] / 2
99 | y0 = objects['y'] - objects['height'] / 2
100 | x1 = objects['x'] + objects['width'] / 2
101 | y1 = objects['y'] + objects['height'] / 2
102 | box = (x0, y0, x1, y1)
103 |
104 | box_start_point = (int(x0), int(y0))
105 | box_end_point = (int(x1), int(y1))
106 |
107 | anomaly_detected = False
108 |
109 | box_color = (0, 0, 255)
110 |
111 | if object_class == "Drill Hole":
112 |
113 | object_class_text_size = cv2.getTextSize(object_class, font, fontScale, thickness)
114 | object_confidence = str(round(objects['confidence']*100 , 2)) + "%"
115 |
116 | hole_inches = 1
117 |
118 | hole_height = objects['height']
119 | hole_height_THRESHOLD = hole_height * 1.25
120 |
121 | hole_width = objects['width']
122 | hole_width_THRESHOLD = hole_width * 1.25
123 |
124 | object_Inches = hole_height / averagePR
125 |
126 | if object_Inches < target_max and object_Inches > target_min:
127 | box_color = (0, 200, 0)
128 |
129 | if hole_height > hole_width_THRESHOLD:
130 | anomaly_detected = True
131 | box_color = (0, 200, 255)
132 |
133 | if hole_width > hole_height_THRESHOLD:
134 | anomaly_detected = True
135 | box_color = (0, 200, 255)
136 |
137 | inches_ORG = (int(x0), int(y0-10))
138 |
139 | frame = cv2.putText(frame, 'Inches: ' + str(object_Inches)[:5], inches_ORG, font, fontScale, (255,255,255), thickness, cv2.LINE_AA)
140 |
141 | # draw ground truth boxes
142 | frame = cv2.rectangle(frame, box_start_point, box_end_point, box_color, box_thickness)
143 |
144 | # timing: for benchmarking purposes
145 | t = time.time()-t0
146 |
147 | fpsArray.append(1/t)
148 | averageFPS = mean(fpsArray)
149 | averagePR = mean(pixel_ratio_array)
150 |
151 | print("IMAGE CONFIRMED")
152 | print("PIXEL RATIO: " + str(averagePR) + "\n")
153 |
154 | cv2.imwrite(image_paths[:-3]+"prediction.jpg", frame)
155 | except:
156 | print("IMAGE ERROR")
157 | pass
--------------------------------------------------------------------------------
/object_counting.py:
--------------------------------------------------------------------------------
1 | from roboflow import Roboflow
2 | import os, sys, shutil
3 | import json
4 | import re
5 | import glob
6 |
7 |
8 | def count_objects(predictions, target_classes):
9 | """
10 | Helper method to count the number of objects in an image for a given class
11 | :param predictions: predictions returned from calling the predict method
12 | :param target_class: str, target class for object count
13 | :return: dictionary with target class and total count of occurrences in image
14 | """
15 | object_counts = {x:0 for x in target_classes}
16 | for prediction in predictions:
17 | if prediction['class'] in target_classes:
18 | object_counts[prediction['class']] += 1
19 | elif prediction['class'] not in target_classes:
20 | object_counts[prediction['class']] = 1
21 |
22 | present_objects = object_counts.copy()
23 |
24 | for i in object_counts:
25 | if object_counts[i] < 1:
26 | present_objects.pop(i)
27 |
28 | return present_objects
29 |
30 |
31 | ## load config file for the models
32 | with open(os.curdir + '/roboflow_config.json') as f:
33 | config = json.load(f)
34 |
35 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
36 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
37 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
38 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
39 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
40 |
41 | f.close()
42 |
43 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
44 | rf = Roboflow(api_key=ROBOFLOW_API_KEY)
45 | workspace = rf.workspace(ROBOFLOW_WORKSPACE_ID)
46 | project = workspace.project(ROBOFLOW_MODEL_ID)
47 | version = project.version(ROBOFLOW_VERSION_NUMBER)
48 | model = version.model
49 |
50 | ## creating a directory to add images we wish to infer
51 | if os.path.exists(os.curdir + '/images_to_infer') is False:
52 | os.mkdir(os.curdir + '/images_to_infer')
53 |
54 | for data_ext in ['.jpg', '.jpeg', '.png']:
55 | globbed_files = glob.glob(os.curdir + '/*' + data_ext)
56 | for img_file in globbed_files:
57 | shutil.move(img_file, os.curdir + '/images_to_infer')
58 |
59 | file_location = f"{os.curdir + '/images_to_infer'}"
60 | file_extension = ".jpg" # e.g jpg, jpeg, png
61 |
62 | globbed_files = glob.glob(file_location + '/*' + file_extension)
63 | ## Uncomment the following line to print all class labels in the project
64 | # print(project.classes)
65 |
66 | for img_file in globbed_files:
67 | # perform inference on the selected image
68 | predictions = model.predict(img_file)
69 | class_counts = count_objects(predictions, project.classes)
70 | ## Uncomment the following line to print the individual JSON Predictions
71 | # print(predictions)
72 | print('\n', "Class Counts:", '\n')
73 | print(class_counts)
74 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2022.12.7
2 | chardet==4.0.0
3 | cycler==0.10.0
4 | idna==2.10
5 | kiwisolver>=1.3.1
6 | matplotlib
7 | numpy>=1.18.5
8 | opencv-python>=4.1.2
9 | Pillow>=7.1.2
10 | pyparsing==2.4.7
11 | python-dateutil
12 | python-dotenv
13 | requests
14 | roboflow>=1.0.5
15 | six
16 | urllib3>=1.26.6
17 | wget
18 | tqdm>=4.41.0
19 | PyYAML>=5.3.1
20 | wget
21 | requests_toolbelt
22 | supervision>=0.6.0
23 | aiohttp-3.8.4
24 | aiohttp-retry-2.8.3
25 | aiosignal-1.3.1
26 | async-timeout-4.0.2
27 | frozenlist-1.3.3
28 | multidict-6.0.4
29 | twilio-8.1.0
30 | PyJWT-2.6.0
31 | yarl-1.9.1
32 | beautifulsoup4-4.12.2
33 | google-api-core-2.11.0
34 | google-api-python-client-2.86.0
35 | google-auth-2.17.3
36 | google-auth-httplib2-0.1.0
37 | googleapis-common-protos-1.59.0
38 | httplib2-0.22.0
39 | lxml-4.9.2
40 | oauth2client-4.1.3
41 | pyasn1-0.5.0
42 | pyasn1-modules-0.3.0
43 | rsa-4.9
44 | simplegmail-4.1.1
45 | soupsieve-2.4.1
46 | uritemplate-4.1.1
--------------------------------------------------------------------------------
/roboflow_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "__comment1": "Obtain these values via Roboflow",
3 | "ROBOFLOW_API_KEY": "private-api-key",
4 | "ROBOFLOW_WORKSPACE_ID": "workspace-id",
5 | "ROBOFLOW_PROJECT_ID": "project-id",
6 | "ROBOFLOW_MODEL_ID": "model-id",
7 | "ROBOFLOW_VERSION_NUMBER": "#",
8 | "ROBOFLOW_SIZE": "640",
9 | "EMAIL": "your@email.com",
10 |
11 | "__comment2": "The following are only needed for infer-async.py",
12 | "FRAMERATE": 24,
13 | "BUFFER": 0.5
14 | }
--------------------------------------------------------------------------------
/roboflow_config_twopass.json:
--------------------------------------------------------------------------------
1 | {
2 | "__comment1": "Obtain these values via Roboflow",
3 | "ROBOFLOW_API_KEY": "private-api-key",
4 | "ROBOFLOW_WORKSPACE_ID": "workspace-id",
5 | "ROBOFLOW_PROJECT_ID": "project-id",
6 | "ROBOFLOW_MODEL_ID": "model-id",
7 | "ROBOFLOW_VERSION_NUMBER": "#",
8 | "ROBOFLOW_SIZE": 640,
9 | "ANNOTATION_FORMAT": "yolov5",
10 |
11 | "__comment2": "Obtain these values via Roboflow",
12 | "ROBOFLOW_API_KEY_MODEL2": "private-api-key",
13 | "ROBOFLOW_WORKSPACE_ID_MODEL2": "workspace-id",
14 | "ROBOFLOW_PROJECT_ID_MODEL2": "project-id",
15 | "ROBOFLOW_MODEL_ID_MODEL2": "model-id",
16 | "ROBOFLOW_VERSION_NUMBER_MODEL2": "#",
17 | "ROBOFLOW_SIZE_MODEL2": 512,
18 | "ANNOTATION_FORMAT_MODEL2": "yolov5",
19 |
20 | "__comment3": "The following are only needed for infer-async.py",
21 | "FRAMERATE": 24,
22 | "BUFFER": 0.5
23 | }
--------------------------------------------------------------------------------
/save_vidframes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import cv2
3 | import argparse
4 |
5 | def main(
6 | video_path: str,
7 | active_learning: bool,
8 | raw_data_location: str = ".",
9 | raw_data_extension: str = ".jpg",
10 | fps: int = 1,
11 | ):
12 | video = cv2.VideoCapture(video_path)
13 | frame_number = 0
14 | total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
15 | skip_frames = int(video.get(cv2.CAP_PROP_FPS) // fps)
16 | sampled_frames = []
17 |
18 | while frame_number < total_frames:
19 | video.set(cv2.CAP_PROP_POS_FRAMES, frame_number)
20 | ret, frame = video.read()
21 |
22 | if not ret:
23 | break
24 |
25 | if active_learning:
26 | active_learning_frames = os.path.join(raw_data_location + "/sampled_frames")
27 | if os.path.exists(active_learning_frames) is False:
28 | os.mkdir(active_learning_frames)
29 |
30 | frame_path = os.path.abspath(active_learning_frames + f"/frame_{frame_number:04d}{raw_data_extension}")
31 | sampled_frames.append(frame_path)
32 | print(frame_path)
33 | if os.path.exists(frame_path) is False:
34 | cv2.imwrite(frame_path, frame)
35 |
36 | frame_number += skip_frames
37 |
38 | # Press 'q' to exit the loop
39 | if cv2.waitKey(1) & 0xFF == ord('q'):
40 | break
41 |
42 | video.release()
43 | cv2.destroyAllWindows()
44 |
45 |
46 | if __name__ == "__main__":
47 | parser = argparse.ArgumentParser(description="sampling videos by a specified frames per second of video.")
48 | parser.add_argument("--video_path", type=str, help="Path to the video file.")
49 | parser.add_argument("--active_learning", type=str, help="Path to the video file.")
50 | parser.add_argument("--raw_data_location", type=str, default=f"{os.curdir}", help="Location to save frames.")
51 | parser.add_argument("--raw_data_extension", type=str, default=".jpg", help="Image extension for saved frames.")
52 | parser.add_argument("--fps", type=int, default=1, help="Frames per second to sample from the video.")
53 |
54 | args = parser.parse_args()
55 |
56 | main(
57 | args.video_path,
58 | args.active_learning,
59 | args.raw_data_location,
60 | args.raw_data_extension,
61 | args.fps,
62 | )
63 | ## Example below for how to run the file (remove the comment from each line below, prior to copy/paste to your Terminal)
64 | # python3 save_vidframes.py --video_path="/path/to/video.mp4" \
65 | # --active_learning=True \
66 | # --raw_data_location="/path/to/save/video/frames" \
67 | # --raw_data_extension=".jpg" \
68 | # --fps=5
69 |
--------------------------------------------------------------------------------
/stream/blur_stream.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Change '0' to '1' or '2' if it cannot find your webcam
36 | video = cv2.VideoCapture(0)
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | box = [(x0, y0), (x1, y1)]
76 | blur_x = int(bounding_box['x'] - bounding_box['width'] / 2)
77 | blur_y = int(bounding_box['y'] - bounding_box['height'] / 2)
78 | blur_width = int(bounding_box['width'])
79 | blur_height = int(bounding_box['height'])
80 | ## region of interest (ROI), or area to blur
81 | roi = img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width]
82 |
83 | # ADD BLURRED BBOXES
84 | # set blur to (31,31) or (51,51) based on amount of blur desired
85 | blur_image = cv2.GaussianBlur(roi,(51,51),0)
86 | img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width] = blur_image
87 | ## draw/place bounding boxes on image
88 | #start_point = (int(x0), int(y0))
89 | #end_point = (int(x1), int(y1))
90 | #cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
91 |
92 | (text_width, text_height), _ = cv2.getTextSize(
93 | f"{class_name} | {confidence}",
94 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
95 |
96 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
97 | thickness=-1)
98 |
99 | text_location = (int(x0), int(y0))
100 |
101 | cv2.putText(img, f"{class_name} | {confidence}",
102 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
103 | color=(255,255,255), thickness=2)
104 |
105 | return img, detections
106 |
107 |
108 | # Main loop; infers sequentially until you press "q"
109 | while 1:
110 | # On "q" keypress, exit
111 | if(cv2.waitKey(1) == ord('q')):
112 | break
113 |
114 | # Capture start time to calculate fps
115 | start = time.time()
116 |
117 | # Synchronously get a prediction from the Roboflow Infer API
118 | image, detections = infer()
119 | # And display the inference results
120 | cv2.imshow('image', image)
121 |
122 | # Print frames per second
123 | print((1/(time.time()-start)), " fps")
124 | print(detections)
125 |
126 | # Release resources when finished
127 | video.release()
128 | cv2.destroyAllWindows()
129 |
--------------------------------------------------------------------------------
/stream/draw_stream.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Change '0' to '1' or '2' if it cannot find your webcam
36 | video = cv2.VideoCapture(0)
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
79 |
80 | (text_width, text_height), _ = cv2.getTextSize(
81 | f"{class_name} | {confidence}",
82 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
83 |
84 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
85 | thickness=-1)
86 |
87 | text_location = (int(x0), int(y0))
88 |
89 | cv2.putText(img, f"{class_name} | {confidence}",
90 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
91 | color=(255,255,255), thickness=2)
92 |
93 | return img, detections
94 |
95 |
96 | # Main loop; infers sequentially until you press "q"
97 | while 1:
98 | # On "q" keypress, exit
99 | if(cv2.waitKey(1) == ord('q')):
100 | break
101 |
102 | # Capture start time to calculate fps
103 | start = time.time()
104 |
105 | # Synchronously get a prediction from the Roboflow Infer API
106 | image, detections = infer()
107 | # And display the inference results
108 | cv2.imshow('image', image)
109 |
110 | # Print frames per second
111 | print((1/(time.time()-start)), " fps")
112 | print(detections)
113 |
114 | # Release resources when finished
115 | video.release()
116 | cv2.destroyAllWindows()
117 |
--------------------------------------------------------------------------------
/stream/fill_stream.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Change '0' to '1' or '2' if it cannot find your webcam
36 | video = cv2.VideoCapture(0)
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | # setting thickness to -1 --> filled bounding box with the specified color
79 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=-1)
80 |
81 | (text_width, text_height), _ = cv2.getTextSize(
82 | f"{class_name} | {confidence}",
83 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
84 |
85 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
86 | thickness=-1)
87 |
88 | text_location = (int(x0), int(y0))
89 |
90 | cv2.putText(img, f"{class_name} | {confidence}",
91 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
92 | color=(255,255,255), thickness=2)
93 |
94 | return img, detections
95 |
96 |
97 | # Main loop; infers sequentially until you press "q"
98 | while 1:
99 | # On "q" keypress, exit
100 | if(cv2.waitKey(1) == ord('q')):
101 | break
102 |
103 | # Capture start time to calculate fps
104 | start = time.time()
105 |
106 | # Synchronously get a prediction from the Roboflow Infer API
107 | image, detections = infer()
108 | # And display the inference results
109 | cv2.imshow('image', image)
110 |
111 | # Print frames per second
112 | print((1/(time.time()-start)), " fps")
113 | print(detections)
114 |
115 | # Release resources when finished
116 | video.release()
117 | cv2.destroyAllWindows()
118 |
--------------------------------------------------------------------------------
/stream/writetext_stream.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Change '0' to '1' or '2' if it cannot find your webcam
36 | video = cv2.VideoCapture(0)
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 |
73 | (text_width, text_height), _ = cv2.getTextSize(
74 | f"{class_name} | {confidence}",
75 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
76 |
77 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
78 | thickness=-1)
79 |
80 | text_location = (int(x0), int(y0))
81 |
82 | cv2.putText(img, f"{class_name} | {confidence}",
83 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
84 | color=(255,255,255), thickness=2)
85 |
86 | return img, detections
87 |
88 |
89 | # Main loop; infers sequentially until you press "q"
90 | while 1:
91 | # On "q" keypress, exit
92 | if(cv2.waitKey(1) == ord('q')):
93 | break
94 |
95 | # Capture start time to calculate fps
96 | start = time.time()
97 |
98 | # Synchronously get a prediction from the Roboflow Infer API
99 | image, detections = infer()
100 | # And display the inference results
101 | cv2.imshow('image', image)
102 |
103 | # Print frames per second
104 | print((1/(time.time()-start)), " fps")
105 | print(detections)
106 |
107 | # Release resources when finished
108 | video.release()
109 | cv2.destroyAllWindows()
110 |
--------------------------------------------------------------------------------
/streamlit/requirements.txt:
--------------------------------------------------------------------------------
1 | aioice==0.9.0
2 | aiortc==1.5.0
3 | altair==4.2.2
4 | attrs==23.1.0
5 | av==10.0.0
6 | blinker==1.6.2
7 | cachetools==5.3.0
8 | certifi==2022.12.7
9 | cffi==1.15.1
10 | chardet==4.0.0
11 | charset-normalizer==3.1.0
12 | click==8.1.3
13 | contourpy==1.0.7
14 | cryptography==40.0.2
15 | cycler==0.10.0
16 | decorator==5.1.1
17 | dnspython==2.3.0
18 | entrypoints==0.4
19 | fonttools==4.39.3
20 | gitdb==4.0.10
21 | GitPython==3.1.31
22 | google-crc32c==1.5.0
23 | idna==2.10
24 | ifaddr==0.2.0
25 | importlib-metadata==6.6.0
26 | Jinja2==3.1.2
27 | jsonschema==4.17.3
28 | kiwisolver==1.4.4
29 | markdown-it-py==2.2.0
30 | MarkupSafe==2.1.2
31 | matplotlib==3.7.1
32 | mdurl==0.1.2
33 | numpy==1.24.3
34 | opencv-python==4.7.0.72
35 | opencv-python-headless==4.7.0.72
36 | packaging==23.1
37 | pandas==1.5.3
38 | Pillow==9.5.0
39 | protobuf==3.20.3
40 | pyarrow==11.0.0
41 | pycparser==2.21
42 | pydeck==0.8.1b0
43 | pyee==9.0.4
44 | Pygments==2.15.1
45 | pylibsrtp==0.8.0
46 | Pympler==1.0.1
47 | pyOpenSSL==23.1.1
48 | pyparsing==2.4.7
49 | pyrsistent==0.19.3
50 | python-dateutil==2.8.2
51 | python-dotenv==1.0.0
52 | pytz==2023.3
53 | pytz-deprecation-shim==0.1.0.post0
54 | PyYAML==6.0
55 | requests==2.28.2
56 | requests-toolbelt==0.10.1
57 | rich==13.3.4
58 | roboflow==1.0.5
59 | six==1.16.0
60 | smmap==5.0.0
61 | streamlit==1.21.0
62 | streamlit-webrtc==0.45.0
63 | supervision==0.6.0
64 | toml==0.10.2
65 | toolz==0.12.0
66 | tornado==6.3.1
67 | tqdm==4.65.0
68 | typing_extensions==4.5.0
69 | tzdata==2023.3
70 | tzlocal==4.3
71 | urllib3==1.26.15
72 | validators==0.20.0
73 | wget==3.2
74 | zipp==3.15.0
--------------------------------------------------------------------------------
/streamlit/streamlit_app.py:
--------------------------------------------------------------------------------
1 | import streamlit as st
2 | import requests
3 | import base64
4 | import io
5 | from PIL import Image, ImageDraw, ImageFont
6 | import glob
7 | import cv2
8 | from base64 import decodebytes
9 | from io import BytesIO
10 | import numpy as np
11 | import pandas as pd
12 | import matplotlib.pyplot as plt
13 | from roboflow import Roboflow
14 |
15 |
16 | ## store initial session state values
17 | workspace_id, model_id, version_number, private_api_key = ('', '', '', '')
18 | if 'confidence_threshold' not in st.session_state:
19 | st.session_state['confidence_threshold'] = '40'
20 | if 'overlap_threshold' not in st.session_state:
21 | st.session_state['overlap_threshold'] = '30'
22 | if 'workspace_id' not in st.session_state:
23 | st.session_state['workspace_id'] = ''
24 | if 'model_id' not in st.session_state:
25 | st.session_state['model_id'] = ''
26 | if 'version_number' not in st.session_state:
27 | st.session_state['version_number'] = ''
28 | if 'private_api_key' not in st.session_state:
29 | st.session_state['private_api_key'] = ''
30 | if 'include_bbox' not in st.session_state:
31 | st.session_state['include_bbox'] = 'Yes'
32 | if 'include_class' not in st.session_state:
33 | st.session_state['include_class'] = 'Show Labels'
34 | if 'box_type' not in st.session_state:
35 | st.session_state['box_type'] = 'regular'
36 |
37 | ##########
38 | #### Set up main app logic
39 | ##########
40 | def drawBoxes(model_object, img_object, uploaded_file, show_bbox, show_class_label,
41 | show_box_type, font = cv2.FONT_HERSHEY_SIMPLEX):
42 |
43 | collected_predictions = pd.DataFrame(columns=['class', 'confidence', 'x0', 'x1', 'y0', 'y1', 'box area'])
44 |
45 | if isinstance(uploaded_file, str):
46 | img = cv2.imread(uploaded_file)
47 | # perform inference on the selected image
48 | predictions = model_object.predict(uploaded_file, confidence=int(st.session_state['confidence_threshold']),
49 | overlap=st.session_state['overlap_threshold'])
50 | else:
51 | predictions = model_object.predict(uploaded_file, confidence=int(st.session_state['confidence_threshold']),
52 | overlap=st.session_state['overlap_threshold'])
53 |
54 | predictions_json = predictions.json()
55 | # drawing bounding boxes with the Pillow library
56 | # https://docs.roboflow.com/inference/hosted-api#response-object-format
57 | for bounding_box in predictions:
58 | x0 = bounding_box['x'] - bounding_box['width'] / 2
59 | x1 = bounding_box['x'] + bounding_box['width'] / 2
60 | y0 = bounding_box['y'] - bounding_box['height'] / 2
61 | y1 = bounding_box['y'] + bounding_box['height'] / 2
62 | class_name = bounding_box['class']
63 | confidence_score = bounding_box['confidence']
64 | box = (x0, x1, y0, y1)
65 | collected_predictions = collected_predictions.append({'class':class_name, 'confidence':confidence_score,
66 | 'x0':int(x0), 'x1':int(x1), 'y0':int(y0), 'y1':int(y1), 'box area':box},
67 | ignore_index=True)
68 | # position coordinates: start = (x0, y0), end = (x1, y1)
69 | # color = RGB-value for bounding box color, (0,0,0) is "black"
70 | # thickness = stroke width/thickness of bounding box
71 | start_point = (int(x0), int(y0))
72 | end_point = (int(x1), int(y1))
73 | if show_box_type == 'regular':
74 | if show_bbox == 'Yes':
75 | # draw/place bounding boxes on image
76 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
77 |
78 | if show_class_label == 'Show Labels':
79 | # add class name with filled background
80 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + 40, int(y0) - 20), color=(0,0,0),
81 | thickness=-1)
82 | cv2.putText(img,
83 | class_name,#text to place on image
84 | (int(x0), int(y0) - 5),#location of text
85 | font,#font
86 | 0.4,#font scale
87 | (255,255,255),#text color
88 | thickness=1#thickness/"weight" of text
89 | )
90 |
91 | if show_box_type == 'fill':
92 | if show_bbox == 'Yes':
93 | # draw/place bounding boxes on image
94 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=-1)
95 |
96 | if show_class_label == 'Show Labels':
97 | # add class name with filled background
98 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + 40, int(y0) - 20), color=(0,0,0),
99 | thickness=-1)
100 | cv2.putText(img,
101 | class_name,#text to place on image
102 | (int(x0), int(y0) - 5),#location of text
103 | font,#font
104 | 0.4,#font scale
105 | (255,255,255),#text color
106 | thickness=1#thickness/"weight" of text
107 | )
108 |
109 | if show_box_type == 'blur':
110 | if show_bbox == 'Yes':
111 | # draw/place bounding boxes on image
112 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
113 |
114 | box = [(x0, y0), (x1, y1)]
115 | blur_x = int(bounding_box['x'] - bounding_box['width'] / 2)
116 | blur_y = int(bounding_box['y'] - bounding_box['height'] / 2)
117 | blur_width = int(bounding_box['width'])
118 | blur_height = int(bounding_box['height'])
119 | ## region of interest (ROI), or area to blur
120 | roi = img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width]
121 |
122 | # ADD BLURRED BBOXES
123 | # set blur to (31,31) or (51,51) based on amount of blur desired
124 | blur_image = cv2.GaussianBlur(roi,(51,51),0)
125 | img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width] = blur_image
126 |
127 | if show_class_label == 'Show Labels':
128 | # add class name with filled background
129 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + 40, int(y0) - 20), color=(0,0,0),
130 | thickness=-1)
131 | cv2.putText(img,
132 | class_name,#text to place on image
133 | (int(x0), int(y0) - 5),#location of text
134 | font,#font
135 | 0.4,#font scale
136 | (255,255,255),#text color
137 | thickness=1#thickness/"weight" of text
138 | )
139 |
140 | # convert from openCV2 to PIL. Notice the COLOR_BGR2RGB which means that
141 | # the color is converted from BGR to RGB when going from OpenCV image to PIL image
142 | color_converted = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
143 | pil_image = Image.fromarray(color_converted)
144 |
145 | return pil_image, collected_predictions, predictions_json
146 |
147 |
148 | def run_inference():
149 | rf = Roboflow(api_key=st.session_state['private_api_key'])
150 | project = rf.workspace(st.session_state['workspace_id']).project(st.session_state['model_id'])
151 | project_metadata = project.get_version_information()
152 | # dataset = project.version(st.session_state['version_number']).download("yolov5")
153 | version = project.version(st.session_state['version_number'])
154 | model = version.model
155 |
156 | project_type = st.write(f"#### Project Type: {project.type}")
157 |
158 | for version_number in range(len(project_metadata)):
159 | try:
160 | if int(project_metadata[version_number]['model']['id'].split('/')[1]) == int(version.version):
161 | project_endpoint = st.write(f"#### Inference Endpoint: {project_metadata[version_number]['model']['endpoint']}")
162 | model_id = st.write(f"#### Model ID: {project_metadata[version_number]['model']['id']}")
163 | version_name = st.write(f"#### Version Name: {project_metadata[version_number]['name']}")
164 | input_img_size = st.write(f"Input Image Size for Model Training (pixels, px):")
165 | width_metric, height_metric = st.columns(2)
166 | width_metric.metric(label='Pixel Width', value=project_metadata[version_number]['preprocessing']['resize']['width'])
167 | height_metric.metric(label='Pixel Height', value=project_metadata[version_number]['preprocessing']['resize']['height'])
168 |
169 | if project_metadata[version_number]['model']['fromScratch']:
170 | train_checkpoint = 'Checkpoint'
171 | st.write(f"#### Version trained from {train_checkpoint}")
172 | elif project_metadata[version_number]['model']['fromScratch'] is False:
173 | train_checkpoint = 'Scratch'
174 | train_checkpoint = st.write(f"#### Version trained from {train_checkpoint}")
175 | else:
176 | train_checkpoint = 'Not Yet Trained'
177 | train_checkpoint = st.write(f"#### Version is {train_checkpoint}")
178 | except KeyError:
179 | continue
180 |
181 | ## Subtitle.
182 | st.write('### Inferenced/Prediction Image')
183 |
184 | ## Pull in default image or user-selected image.
185 | if uploaded_file is None:
186 | # Default image.
187 | default_img_path = "images/test_box.jpg"
188 | image = Image.open(default_img_path)
189 | original_image = image
190 | open_cv_image = cv2.imread(default_img_path)
191 | original_opencv_image = open_cv_image
192 | # Display response image.
193 | pil_image_drawBoxes, df_drawBoxes, json_values = drawBoxes(model, default_img_path, default_img_path,
194 | st.session_state['include_bbox'],
195 | st.session_state['include_class'],
196 | st.session_state['box_type'])
197 |
198 | else:
199 | # User-selected image.
200 | image = Image.open(uploaded_file)
201 | original_image = image
202 | opencv_convert = image.convert('RGB')
203 | open_cv_image = np.array(opencv_convert)
204 | # Convert RGB to BGR: OpenCV deals with BGR images rather than RGB
205 | open_cv_image = open_cv_image[:, :, ::-1].copy()
206 | # Convert PIL image to byte-string so it can be sent for prediction to the Roboflow Python Package
207 | b = io.BytesIO()
208 | image.save(b, format='JPEG')
209 | im_bytes = b.getvalue()
210 | # Display response image.
211 | pil_image_drawBoxes, df_drawBoxes, json_values = drawBoxes(model, open_cv_image, im_bytes,
212 | st.session_state['include_bbox'],
213 | st.session_state['include_class'],
214 | st.session_state['box_type'])
215 |
216 | st.image(pil_image_drawBoxes,
217 | use_column_width=True)
218 | # Display original image.
219 | st.write("#### Original Image")
220 | st.image(original_image,
221 | use_column_width=True)
222 |
223 | json_tab, statistics_tab, project_tab = st.tabs(["Results & JSON Output", "Prediction Statistics", "Project Info"])
224 |
225 | with json_tab:
226 | ## Display results dataframe in main app.
227 | st.write('### Prediction Results (Pandas DataFrame)')
228 | st.dataframe(df_drawBoxes)
229 | ## Display the JSON in main app.
230 | st.write('### JSON Output')
231 | st.write(json_values)
232 |
233 | with statistics_tab:
234 | ## Summary statistics section in main app.
235 | st.write('### Summary Statistics')
236 | st.metric(label='Number of Bounding Boxes (ignoring overlap thresholds)', value=f"{len(df_drawBoxes.index)}")
237 | st.metric(label='Average Confidence Level of Bounding Boxes:', value=f"{(np.round(np.mean(df_drawBoxes['confidence'].to_numpy()),4))}")
238 |
239 | ## Histogram in main app.
240 | st.write('### Histogram of Confidence Levels')
241 | fig, ax = plt.subplots()
242 | ax.hist(df_drawBoxes['confidence'], bins=10, range=(0.0,1.0))
243 | st.pyplot(fig)
244 |
245 | with project_tab:
246 | st.write(f"Annotation Group Name: {project.annotation}")
247 | col1, col2, col3 = st.columns(3)
248 | for version_number in range(len(project_metadata)):
249 | try:
250 | if int(project_metadata[version_number]['model']['id'].split('/')[1]) == int(version.version):
251 | col1.write(f'Total images in the version: {version.images}')
252 | col1.metric(label='Augmented Train Set Image Count', value=version.splits['train'])
253 | col2.metric(label='mean Average Precision (mAP)', value=f"{float(project_metadata[version_number]['model']['map'])}%")
254 | col2.metric(label='Precision', value=f"{float(project_metadata[version_number]['model']['precision'])}%")
255 | col2.metric(label='Recall', value=f"{float(project_metadata[version_number]['model']['recall'])}%")
256 | col3.metric(label='Train Set Image Count', value=project.splits['train'])
257 | col3.metric(label='Valid Set Image Count', value=project.splits['valid'])
258 | col3.metric(label='Test Set Image Count', value=project.splits['test'])
259 | except KeyError:
260 | continue
261 |
262 | col4, col5, col6 = st.columns(3)
263 | col4.write('Preprocessing steps applied:')
264 | col4.json(version.preprocessing)
265 | col5.write('Augmentation steps applied:')
266 | col5.json(version.augmentation)
267 | col6.metric(label='Train Set', value=version.splits['train'], delta=f"Increased by Factor of {(version.splits['train'] / project.splits['train'])}")
268 | col6.metric(label='Valid Set', value=version.splits['valid'], delta="No Change")
269 | col6.metric(label='Test Set', value=version.splits['test'], delta="No Change")
270 |
271 | ##########
272 | ##### Set up sidebar.
273 | ##########
274 | # Add in location to select image.
275 | st.sidebar.write("#### Select an image to upload.")
276 | uploaded_file = st.sidebar.file_uploader("",
277 | type=['png', 'jpg', 'jpeg'],
278 | accept_multiple_files=False)
279 |
280 | st.sidebar.write("[Find additional images on Roboflow Universe.](https://universe.roboflow.com/)")
281 | st.sidebar.write("[Improving Your Model with Active Learning](https://help.roboflow.com/implementing-active-learning)")
282 |
283 | ## Add in sliders.
284 | show_bbox = st.sidebar.radio("Show Bounding Boxes:",
285 | options=['Yes', 'No'],
286 | key='include_bbox')
287 |
288 | show_class_label = st.sidebar.radio("Show Class Labels:",
289 | options=['Show Labels', 'Hide Labels'],
290 | key='include_class')
291 |
292 | show_box_type = st.sidebar.selectbox("Display Bounding Boxes As:",
293 | options=('regular', 'fill', 'blur'),
294 | key='box_type')
295 |
296 | confidence_threshold = st.sidebar.slider("Confidence threshold (%): What is the minimum acceptable confidence level for displaying a bounding box?", 0, 100, 40, 1)
297 | overlap_threshold = st.sidebar.slider("Overlap threshold (%): What is the maximum amount of overlap permitted between visible bounding boxes?", 0, 100, 30, 1)
298 |
299 | image = Image.open("./images/roboflow_logo.png")
300 | st.sidebar.image(image,
301 | use_column_width=True)
302 |
303 | image = Image.open("./images/streamlit_logo.png")
304 | st.sidebar.image(image,
305 | use_column_width=True)
306 |
307 | ##########
308 | ##### Set up project access.
309 | ##########
310 |
311 | ## Title.
312 | st.write("# Roboflow Object Detection Tests")
313 |
314 | with st.form("project_access"):
315 | workspace_id = st.text_input('Workspace ID', key='workspace_id',
316 | help='Finding Your Project Information: https://docs.roboflow.com/python#finding-your-project-information-manually',
317 | placeholder='Input Workspace ID')
318 | model_id = st.text_input('Model ID', key='model_id', placeholder='Input Model ID')
319 | version_number = st.text_input('Trained Model Version Number', key='version_number', placeholder='Input Trained Model Version Number')
320 | private_api_key = st.text_input('Private API Key', key='private_api_key', type='password',placeholder='Input Private API Key')
321 | submitted = st.form_submit_button("Verify and Load Model")
322 | if submitted:
323 | st.write("Loading model...")
324 | run_inference()
--------------------------------------------------------------------------------
/text_message/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2022.12.7
2 | chardet==4.0.0
3 | cycler==0.10.0
4 | idna==2.10
5 | kiwisolver>=1.3.1
6 | matplotlib
7 | numpy>=1.18.5
8 | opencv-python>=4.1.2
9 | Pillow>=7.1.2
10 | pyparsing==2.4.7
11 | python-dateutil
12 | python-dotenv
13 | requests
14 | roboflow>=1.0.5
15 | six
16 | urllib3>=1.26.6
17 | wget
18 | tqdm>=4.41.0
19 | PyYAML>=5.3.1
20 | wget
21 | requests_toolbelt
22 | aiohttp-3.8.4
23 | aiohttp-retry-2.8.3
24 | aiosignal-1.3.1
25 | async-timeout-4.0.2
26 | frozenlist-1.3.3
27 | multidict-6.0.4
28 | twilio-8.1.0
29 | PyJWT-2.6.0
30 | yarl-1.9.1
31 | supervision>=0.6.0
--------------------------------------------------------------------------------
/text_message/send_text.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import time
3 | import io
4 | import cv2
5 | import requests
6 | from PIL import Image, ImageOps
7 | from requests_toolbelt.multipart.encoder import MultipartEncoder
8 | import math
9 | from twilio.rest import Client#remember to first pip install twilio
10 |
11 |
12 | # Your Account SID from twilio.com/console
13 | account_sid = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
14 | # Your Auth Token from twilio.com/console
15 | auth_token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
16 | photos_to_take = 5
17 |
18 | # Taking and saving images from a webcam stream
19 | for x in range(photos_to_take):
20 | # Change '0' to '1' or '2' if it cannot find your webcam
21 | video = cv2.VideoCapture(0)
22 | ret, frame = video.read()
23 | photo_path = ''.join(['RF_project/webcamphoto',str(x+1),'.jpg'])
24 | cv2.imwrite(photo_path, frame)
25 | video.release
26 | time.sleep(3)
27 |
28 |
29 | # mirror the images
30 | # Flip the images to match the video format of my labeled images.
31 | for x in range(photos_to_take):
32 | im = Image.open(''.join(['RF_project/webcamphoto',str(x+1),'.jpg']))
33 | mirror_image = ImageOps.mirror(im)
34 | mirror_image.save(''.join(['RF_project/webcamphoto',str(x+1),'.jpg']))
35 |
36 | # Load Image with PIL
37 | response = [None] * photos_to_take
38 | for x in range (photos_to_take):
39 | photo_path = ''.join(['RF_project/webcamphoto',str(x+1),'.jpg'])
40 | img = cv2.imread(photo_path)
41 | image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
42 | pilImage = Image.fromarray(image)
43 |
44 | # Convert to JPEG Buffer
45 | buffered = io.BytesIO()
46 | pilImage.save(buffered, quality=100, format="JPEG")
47 |
48 | # Build multipart form and post request
49 | m = MultipartEncoder(fields={'file': ("imageToUpload", buffered.getvalue(), "image/jpeg")})
50 |
51 | response[x] = requests.post(
52 | "https://detect.roboflow.com/YOUR_MODEL/YOUR_MODEL_ID?api_key=YOUR_API_KEY&confidence=.40",
53 | data=m, headers={'Content-Type': m.content_type}).json()
54 |
55 | # See inference results
56 | print(response)
57 |
58 | def post_process(response):
59 | # Post processing - looking at average count of objects in the images, rounding up.
60 | response_str = str(response)
61 | player_count = math.ceil(response_str.count("tennis")/photos_to_take)
62 | court_count = math.ceil(response_str.count("court")/photos_to_take)
63 |
64 |
65 | # Post processing - looking at average count of objects in the images, rounding up.
66 | response_str = str(response)
67 | player_count = math.ceil(response_str.count("tennis")/photos_to_take)
68 | court_count = math.ceil(response_str.count("court")/photos_to_take)
69 |
70 | # Model used in this example: https://universe.roboflow.com/mixed-sports-area/tennis-court-checker/
71 |
72 | def send_text(player_count, court_count):
73 | court_phrase = "courts."
74 | if court_count == 1:
75 | court_phrase = " court."
76 |
77 | player_phrase = " tennis players detected on "
78 | if player_count == 1:
79 | player_phrase = " tennis player detected on "
80 |
81 | message_body = str(
82 | "There are " + str(player_count) + player_phrase + str(court_count) + court_phrase)
83 |
84 | print(message_body)
85 |
86 | client = Client(account_sid, auth_token)
87 |
88 | message = client.messages.create(to="+XXXXXXXXXX", from_="+XXXXXXXXXX",body=message_body)
89 |
90 | print(message.sid, court_phrase, player_phrase)
91 |
--------------------------------------------------------------------------------
/trigger_power_automate.py:
--------------------------------------------------------------------------------
1 | from roboflow import Roboflow
2 | import argparse
3 | from datetime import datetime
4 | import json
5 | import requests
6 | import base64
7 | import cv2
8 | import os, glob
9 | import time
10 |
11 |
12 | def load_config(email_address=''):
13 | ## load config file for the models
14 | with open(os.pardir + '/roboflow_config.json') as f:
15 | config = json.load(f)
16 |
17 | global ROBOFLOW_API_KEY
18 | global ROBOFLOW_WORKSPACE_ID
19 | global ROBOFLOW_MODEL_ID
20 | global ROBOFLOW_VERSION_NUMBER
21 |
22 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
23 | ROBOFLOW_WORKSPACE_ID = config["ROBOFLOW_WORKSPACE_ID"]
24 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
25 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
26 | if config["EMAIL"]:
27 | EMAIL = config["EMAIL"]
28 | elif email_address != '':
29 | EMAIL = email_address
30 | else:
31 | print('Please Enter a Valid Email Address to send your prediction results to.')
32 |
33 | f.close()
34 |
35 | return EMAIL
36 |
37 |
38 | def run_inference(send_address):
39 | ## obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
40 | ## create Roboflow object: https://docs.roboflow.com/python
41 | rf = Roboflow(api_key=ROBOFLOW_API_KEY)
42 | workspace = rf.workspace(ROBOFLOW_WORKSPACE_ID)
43 | project = workspace.project(ROBOFLOW_MODEL_ID)
44 | version = project.version(ROBOFLOW_VERSION_NUMBER)
45 | model = version.model
46 |
47 | # email to send the results to
48 | email = send_address
49 |
50 | # grab all the .jpg files
51 | extention_images = ".jpg"
52 | get_images = sorted(glob.glob('*' + extention_images))
53 |
54 | # box color and thickness
55 | box_color = (125, 0, 125)
56 | box_thickness = 3
57 | box_scale = 4
58 |
59 | # font settings
60 | font = cv2.FONT_HERSHEY_COMPLEX_SMALL
61 | org = (25, 25)
62 | fontScale = 1
63 | color = (255, 0, 0)
64 | thickness = 2
65 |
66 | try:
67 | for image_paths in get_images:
68 |
69 | object_count = 0
70 |
71 | now = datetime.now() # current date and time
72 | date_time = now.strftime("%m-%d-%Y %H-%M-%S") # generate timestamp
73 |
74 | frame = cv2.imread(image_paths)
75 |
76 | response = model.predict(image_paths, confidence=40, overlap=30).json()
77 |
78 | t0 = time.time()
79 |
80 | for objects in response['predictions']:
81 | # get prediction_name and confidence of each object
82 | object_class = str(objects['class'])
83 | object_confidence = str(round(objects['confidence']*100 , 2)) + "%"
84 |
85 | # pull bbox coordinate points
86 | x0 = objects['x'] - objects['width'] / 2
87 | y0 = objects['y'] - objects['height'] / 2
88 | x1 = objects['x'] + objects['width'] / 2
89 | y1 = objects['y'] + objects['height'] / 2
90 | box = (x0, y0, x1, y1)
91 |
92 | box_start_point = (int(x0), int(y0))
93 | box_end_point = (int(x1), int(y1))
94 |
95 | object_count += 1
96 |
97 | inches_ORG = (int(x0), int(y0-10))
98 |
99 | frame = cv2.putText(frame, 'Class: ' + str(object_class), inches_ORG, font, fontScale, (255,255,255), thickness, cv2.LINE_AA)
100 |
101 | # draw ground truth boxes
102 | frame = cv2.rectangle(frame, box_start_point, box_end_point, box_color, box_thickness)
103 |
104 | # timing: for benchmarking purposes
105 | t = time.time()-t0
106 |
107 | cv2.imwrite(image_paths[:-3]+"prediction.jpg", frame)
108 |
109 | print("IMAGE CONFIRMED")
110 |
111 | with open(image_paths[:-3]+"prediction.jpg", "rb") as image_prediction_file:
112 | encoded_string_prediction = base64.b64encode(image_prediction_file.read())
113 | encoded_string_prediction = encoded_string_prediction.decode('utf-8')
114 | # print(encoded_string_prediction)
115 |
116 | with open(image_paths, "rb") as image_file:
117 | encoded_string = base64.b64encode(image_file.read())
118 | encoded_string = encoded_string.decode('utf-8')
119 | # print(encoded_string)
120 |
121 | url = "https://prod-66.westus.logic.azure.com:443/workflows/42007a30a5954e2ab0af95ac083d58f3/triggers/manual/paths/invoke?api-version=2016-06-01&sp=%2Ftriggers%2Fmanual%2Frun&sv=1.0&sig=F-3LJPi8ocpH49SM_9sI4ESU-KwDXsYFauvpJztQYXI"
122 | myobj = {'email': str(email), 'last_class': str(object_class), 'most_class': str(object_class), 'average_confidence': str(object_confidence), 'number_of_objects': str(object_count), 'timestamp': str(date_time), 'source_base_64': str(encoded_string), 'tested_base_64': str(encoded_string_prediction)}
123 |
124 | x = requests.post(url, json = myobj)
125 |
126 | print(x.text)
127 |
128 | except:
129 | print("IMAGE ERROR")
130 | pass
131 |
132 |
133 | confirm_send_address = load_config()
134 | infer = run_inference(confirm_send_address)
135 |
--------------------------------------------------------------------------------
/video/blur_video.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Replace with path to video file
36 | video = cv2.VideoCapture("path/to/video.mp4")
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | box = [(x0, y0), (x1, y1)]
76 | blur_x = int(bounding_box['x'] - bounding_box['width'] / 2)
77 | blur_y = int(bounding_box['y'] - bounding_box['height'] / 2)
78 | blur_width = int(bounding_box['width'])
79 | blur_height = int(bounding_box['height'])
80 | ## region of interest (ROI), or area to blur
81 | roi = img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width]
82 |
83 | # ADD BLURRED BBOXES
84 | # set blur to (31,31) or (51,51) based on amount of blur desired
85 | blur_image = cv2.GaussianBlur(roi,(51,51),0)
86 | img[blur_y:blur_y+blur_height, blur_x:blur_x+blur_width] = blur_image
87 | ## draw/place bounding boxes on image
88 | #start_point = (int(x0), int(y0))
89 | #end_point = (int(x1), int(y1))
90 | #cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
91 |
92 | (text_width, text_height), _ = cv2.getTextSize(
93 | f"{class_name} | {confidence}",
94 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
95 |
96 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
97 | thickness=-1)
98 |
99 | text_location = (int(x0), int(y0))
100 |
101 | cv2.putText(img, f"{class_name} | {confidence}",
102 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
103 | color=(255,255,255), thickness=2)
104 |
105 | return img, detections
106 |
107 |
108 | # Main loop; infers sequentially until you press "q"
109 | while 1:
110 | # On "q" keypress, exit
111 | if(cv2.waitKey(1) == ord('q')):
112 | break
113 |
114 | # Capture start time to calculate fps
115 | start = time.time()
116 |
117 | # Synchronously get a prediction from the Roboflow Infer API
118 | image, detections = infer()
119 | # And display the inference results
120 | cv2.imshow('image', image)
121 |
122 | # Print frames per second
123 | print((1/(time.time()-start)), " fps")
124 | print(detections)
125 |
126 | # Release resources when finished
127 | video.release()
128 | cv2.destroyAllWindows()
129 |
--------------------------------------------------------------------------------
/video/boxesOnConveyer.mp4:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/roboflow/roboflow-computer-vision-utilities/9a1af868d77d8add09633e35b0cef1b458239a05/video/boxesOnConveyer.mp4
--------------------------------------------------------------------------------
/video/draw_vid.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Replace with path to video file
36 | video = cv2.VideoCapture("path/to/video.mp4")
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
79 |
80 | (text_width, text_height), _ = cv2.getTextSize(
81 | f"{class_name} | {confidence}",
82 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
83 |
84 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
85 | thickness=-1)
86 |
87 | text_location = (int(x0), int(y0))
88 |
89 | cv2.putText(img, f"{class_name} | {confidence}",
90 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
91 | color=(255,255,255), thickness=2)
92 |
93 | return img, detections
94 |
95 |
96 | # Main loop; infers sequentially until you press "q"
97 | while 1:
98 | # On "q" keypress, exit
99 | if(cv2.waitKey(1) == ord('q')):
100 | break
101 |
102 | # Capture start time to calculate fps
103 | start = time.time()
104 |
105 | # Synchronously get a prediction from the Roboflow Infer API
106 | image, detections = infer()
107 | # And display the inference results
108 | cv2.imshow('image', image)
109 |
110 | # Print frames per second
111 | print((1/(time.time()-start)), " fps")
112 | print(detections)
113 |
114 | # Release resources when finished
115 | video.release()
116 | cv2.destroyAllWindows()
117 |
--------------------------------------------------------------------------------
/video/fill_vid.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Replace with path to video file
36 | video = cv2.VideoCapture("path/to/video.mp4")
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | # setting thickness to -1 --> filled bounding box with the specified color
79 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=-1)
80 |
81 | (text_width, text_height), _ = cv2.getTextSize(
82 | f"{class_name} | {confidence}",
83 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
84 |
85 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
86 | thickness=-1)
87 |
88 | text_location = (int(x0), int(y0))
89 |
90 | cv2.putText(img, f"{class_name} | {confidence}",
91 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
92 | color=(255,255,255), thickness=2)
93 |
94 | return img, detections
95 |
96 |
97 | # Main loop; infers sequentially until you press "q"
98 | while 1:
99 | # On "q" keypress, exit
100 | if(cv2.waitKey(1) == ord('q')):
101 | break
102 |
103 | # Capture start time to calculate fps
104 | start = time.time()
105 |
106 | # Synchronously get a prediction from the Roboflow Infer API
107 | image, detections = infer()
108 | # And display the inference results
109 | cv2.imshow('image', image)
110 |
111 | # Print frames per second
112 | print((1/(time.time()-start)), " fps")
113 | print(detections)
114 |
115 | # Release resources when finished
116 | video.release()
117 | cv2.destroyAllWindows()
118 |
--------------------------------------------------------------------------------
/video/writetext_vid.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Replace with path to video file
36 | video = cv2.VideoCapture("path/to/video.mp4")
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 |
73 | (text_width, text_height), _ = cv2.getTextSize(
74 | f"{class_name} | {confidence}",
75 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
76 |
77 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
78 | thickness=-1)
79 |
80 | text_location = (int(x0), int(y0))
81 |
82 | cv2.putText(img, f"{class_name} | {confidence}",
83 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
84 | color=(255,255,255), thickness=2)
85 |
86 | return img, detections
87 |
88 |
89 | # Main loop; infers sequentially until you press "q"
90 | while 1:
91 | # On "q" keypress, exit
92 | if(cv2.waitKey(1) == ord('q')):
93 | break
94 |
95 | # Capture start time to calculate fps
96 | start = time.time()
97 |
98 | # Synchronously get a prediction from the Roboflow Infer API
99 | image, detections = infer()
100 | # And display the inference results
101 | cv2.imshow('image', image)
102 |
103 | # Print frames per second
104 | print((1/(time.time()-start)), " fps")
105 | print(detections)
106 |
107 | # Release resources when finished
108 | video.release()
109 | cv2.destroyAllWindows()
110 |
--------------------------------------------------------------------------------
/video_classification.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import requests
5 | import time
6 |
7 | # Check to be sure your config file contains details for a Classification Model trained with Roboflow Train
8 | # https://docs.roboflow.com/train | https://docs.roboflow.com/inference-classification/hosted-api
9 | # load config file:
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # (if running locally replace https://classify.roboflow.com/ with eg http://127.0.0.1:9001/)
23 | upload_url = "".join([
24 | "https://classify.roboflow.com/",
25 | ROBOFLOW_MODEL_ID, "/",
26 | ROBOFLOW_VERSION_NUMBER,
27 | "?api_key=",
28 | ROBOFLOW_API_KEY,
29 | "&format=json",
30 | ])
31 |
32 | # Get webcam interface via opencv-python
33 | # Replace with path to video file
34 | video = cv2.VideoCapture("path/to/video.mp4")
35 |
36 | # Infer via the Roboflow Infer API and return the result
37 | def infer():
38 | # Get the current image from the webcam
39 | ret, img = video.read()
40 |
41 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
42 | height, width, channels = img.shape
43 | scale = ROBOFLOW_SIZE / max(height, width)
44 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
45 |
46 | # Encode image to base64 string
47 | retval, buffer = cv2.imencode('.jpg', img)
48 | img_str = base64.b64encode(buffer)
49 |
50 | # Get prediction from Roboflow Infer API
51 | resp = requests.post(upload_url, data=img_str, headers={
52 | "Content-Type": "application/x-www-form-urlencoded"
53 | }, stream=True)
54 | # Convert Response to JSON
55 | predictions = resp.json()
56 |
57 | # Add predictions (class label and confidence score) to image
58 | (text_width, text_height), _ = cv2.getTextSize(
59 | f"{predictions['top']} Confidence: {predictions['confidence']}",
60 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, thickness=2)
61 |
62 | text_location = (5, text_height)
63 |
64 | cv2.putText(img, f"{predictions['top']} | Confidence: {predictions['confidence']}",
65 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6,
66 | color=(255,255,255), thickness=2)
67 |
68 | return img, predictions
69 |
70 |
71 | # Main loop; infers sequentially until you press "q"
72 | while 1:
73 | # On "q" keypress, exit
74 | if(cv2.waitKey(1) == ord('q')):
75 | break
76 |
77 | # Capture start time to calculate fps
78 | start = time.time()
79 |
80 | # Synchronously get a prediction from the Roboflow Infer API
81 | image, detections = infer()
82 | # And display the inference results
83 | cv2.imshow('image', image)
84 |
85 | # Print frames per second
86 | print((1/(time.time()-start)), " fps")
87 | print(detections)
88 |
89 | # Release resources when finished
90 | video.release()
91 | cv2.destroyAllWindows()
92 |
--------------------------------------------------------------------------------
/video_od.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Replace with path to video file
36 | video = cv2.VideoCapture("path/to/video.mp4")
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
79 |
80 | (text_width, text_height), _ = cv2.getTextSize(
81 | f"{class_name} | {confidence}",
82 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
83 |
84 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
85 | thickness=-1)
86 |
87 | text_location = (int(x0), int(y0))
88 |
89 | cv2.putText(img, f"{class_name} | {confidence}",
90 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
91 | color=(255,255,255), thickness=2)
92 |
93 | return img, detections
94 |
95 |
96 | # Main loop; infers sequentially until you press "q"
97 | while 1:
98 | # On "q" keypress, exit
99 | if(cv2.waitKey(1) == ord('q')):
100 | break
101 |
102 | # Capture start time to calculate fps
103 | start = time.time()
104 |
105 | # Synchronously get a prediction from the Roboflow Infer API
106 | image, detections = infer()
107 | # And display the inference results
108 | cv2.imshow('image', image)
109 |
110 | # Print frames per second
111 | print((1/(time.time()-start)), " fps")
112 | print(detections)
113 |
114 | # Release resources when finished
115 | video.release()
116 | cv2.destroyAllWindows()
117 |
--------------------------------------------------------------------------------
/webcam_classification.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import requests
5 | import time
6 |
7 | # Check to be sure your config file contains details for a Classification Model trained with Roboflow Train
8 | # https://docs.roboflow.com/train | https://docs.roboflow.com/inference-classification/hosted-api
9 | # load config file:
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # (if running locally replace https://classify.roboflow.com/ with eg http://127.0.0.1:9001/)
23 | upload_url = "".join([
24 | "https://classify.roboflow.com/",
25 | ROBOFLOW_MODEL_ID, "/",
26 | ROBOFLOW_VERSION_NUMBER,
27 | "?api_key=",
28 | ROBOFLOW_API_KEY,
29 | "&format=json",
30 | ])
31 |
32 | # Get webcam interface via opencv-python
33 | # Change '0' to '1' or '2' if it cannot find your webcam
34 | video = cv2.VideoCapture(0)
35 |
36 | # Infer via the Roboflow Infer API and return the result
37 | def infer():
38 | # Get the current image from the webcam
39 | ret, img = video.read()
40 |
41 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
42 | height, width, channels = img.shape
43 | scale = ROBOFLOW_SIZE / max(height, width)
44 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
45 |
46 | # Encode image to base64 string
47 | retval, buffer = cv2.imencode('.jpg', img)
48 | img_str = base64.b64encode(buffer)
49 |
50 | # Get prediction from Roboflow Infer API
51 | resp = requests.post(upload_url, data=img_str, headers={
52 | "Content-Type": "application/x-www-form-urlencoded"
53 | }, stream=True)
54 | # Convert Response to JSON
55 | predictions = resp.json()
56 |
57 | # Add predictions (class label and confidence score) to image
58 | (text_width, text_height), _ = cv2.getTextSize(
59 | f"{predictions['top']} Confidence: {predictions['confidence']}",
60 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6, thickness=2)
61 |
62 | text_location = (5, text_height)
63 |
64 | cv2.putText(img, f"{predictions['top']} | Confidence: {predictions['confidence']}",
65 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.6,
66 | color=(255,255,255), thickness=2)
67 |
68 | return img, predictions
69 |
70 |
71 | # Main loop; infers sequentially until you press "q"
72 | while 1:
73 | # On "q" keypress, exit
74 | if(cv2.waitKey(1) == ord('q')):
75 | break
76 |
77 | # Capture start time to calculate fps
78 | start = time.time()
79 |
80 | # Synchronously get a prediction from the Roboflow Infer API
81 | image, detections = infer()
82 | # And display the inference results
83 | cv2.imshow('image', image)
84 |
85 | # Print frames per second
86 | print((1/(time.time()-start)), " fps")
87 | print(detections)
88 |
89 | # Release resources when finished
90 | video.release()
91 | cv2.destroyAllWindows()
92 |
--------------------------------------------------------------------------------
/webcam_od.py:
--------------------------------------------------------------------------------
1 | import json
2 | import cv2
3 | import base64
4 | import numpy as np
5 | import requests
6 | import time
7 |
8 |
9 | # load config
10 | with open('../roboflow_config.json') as f:
11 | config = json.load(f)
12 |
13 | ROBOFLOW_API_KEY = config["ROBOFLOW_API_KEY"]
14 | ROBOFLOW_SIZE = config["ROBOFLOW_SIZE"]
15 | ROBOFLOW_MODEL_ID = config["ROBOFLOW_MODEL_ID"]
16 | ROBOFLOW_VERSION_NUMBER = config["ROBOFLOW_VERSION_NUMBER"]
17 |
18 | FRAMERATE = config["FRAMERATE"]
19 | BUFFER = config["BUFFER"]
20 |
21 | # Construct the Roboflow Infer URL
22 | # obtaining your API key: https://docs.roboflow.com/rest-api#obtaining-your-api-key
23 | # (if running locally replace https://detect.roboflow.com/ with eg http://127.0.0.1:9001/)
24 | upload_url = "".join([
25 | "https://detect.roboflow.com/",
26 | ROBOFLOW_MODEL_ID, "/",
27 | ROBOFLOW_VERSION_NUMBER,
28 | "?api_key=",
29 | ROBOFLOW_API_KEY,
30 | "&format=json",
31 | "&stroke=5"
32 | ])
33 |
34 | # Get webcam interface via opencv-python
35 | # Change '0' to '1' or '2' if it cannot find your webcam
36 | video = cv2.VideoCapture(0)
37 |
38 | # Infer via the Roboflow Infer API and return the result
39 | def infer():
40 | # Get the current image from the webcam
41 | ret, img = video.read()
42 |
43 | # Resize (while maintaining the aspect ratio) to improve speed and save bandwidth
44 | height, width, channels = img.shape
45 | scale = ROBOFLOW_SIZE / max(height, width)
46 | img = cv2.resize(img, (round(scale * width), round(scale * height)))
47 |
48 | # Encode image to base64 string
49 | retval, buffer = cv2.imencode('.jpg', img)
50 | img_str = base64.b64encode(buffer)
51 |
52 | # Get prediction from Roboflow Infer API
53 | resp = requests.post(upload_url, data=img_str, headers={
54 | "Content-Type": "application/x-www-form-urlencoded"
55 | }, stream=True)
56 |
57 | predictions = resp.json()
58 | detections = predictions['predictions']
59 |
60 | # Parse result image
61 | # image = np.asarray(bytearray(resp.read()), dtype="uint8")
62 | # image = cv2.imdecode(image, cv2.IMREAD_COLOR)
63 |
64 | # Add predictions (bounding box, class label and confidence score) to image
65 | for bounding_box in detections:
66 | x0 = bounding_box['x'] - bounding_box['width'] / 2
67 | x1 = bounding_box['x'] + bounding_box['width'] / 2
68 | y0 = bounding_box['y'] - bounding_box['height'] / 2
69 | y1 = bounding_box['y'] + bounding_box['height'] / 2
70 | class_name = bounding_box['class']
71 | confidence = bounding_box['confidence']
72 | # position coordinates: start = (x0, y0), end = (x1, y1)
73 | # color = RGB-value for bounding box color, (0,0,0) is "black"
74 | # thickness = stroke width/thickness of bounding box
75 | start_point = (int(x0), int(y0))
76 | end_point = (int(x1), int(y1))
77 | # draw/place bounding boxes on image
78 | cv2.rectangle(img, start_point, end_point, color=(0,0,0), thickness=2)
79 |
80 | (text_width, text_height), _ = cv2.getTextSize(
81 | f"{class_name} | {confidence}",
82 | fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7, thickness=2)
83 |
84 | cv2.rectangle(img, (int(x0), int(y0)), (int(x0) + text_width, int(y0) - text_height), color=(0,0,0),
85 | thickness=-1)
86 |
87 | text_location = (int(x0), int(y0))
88 |
89 | cv2.putText(img, f"{class_name} | {confidence}",
90 | text_location, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.7,
91 | color=(255,255,255), thickness=2)
92 |
93 | return img, detections
94 |
95 |
96 | # Main loop; infers sequentially until you press "q"
97 | while 1:
98 | # On "q" keypress, exit
99 | if(cv2.waitKey(1) == ord('q')):
100 | break
101 |
102 | # Capture start time to calculate fps
103 | start = time.time()
104 |
105 | # Synchronously get a prediction from the Roboflow Infer API
106 | image, detections = infer()
107 | # And display the inference results
108 | cv2.imshow('image', image)
109 |
110 | # Print frames per second
111 | print((1/(time.time()-start)), " fps")
112 | print(detections)
113 |
114 | # Release resources when finished
115 | video.release()
116 | cv2.destroyAllWindows()
117 |
--------------------------------------------------------------------------------