├── .gitattributes ├── .github └── FUNDING.yml ├── .gitignore ├── 3D Models ├── Compact OWL │ ├── Backplate - 2 x Amphenol receptacle.stl │ ├── Backplate - blank.stl │ ├── Backplate - gland.stl │ ├── Backplate - receptacle and ethernet.stl │ ├── Backplate - receptacle only.stl │ ├── Camera Mount.stl │ ├── Frontplate.stl │ ├── Lens Mount.stl │ ├── Main Body.stl │ ├── Tray - base.stl │ └── Tray - lens holder.stl ├── Controllers │ ├── Advanced Controller - Base - 4 OWL.stl │ ├── Advanced Controller - Base - Double OWL.stl │ ├── Advanced Controller - Base - Single OWL.stl │ ├── Advanced Controller - Top - 4 OWL.stl │ ├── Advanced Controller - Top - Double OWL.stl │ ├── Advanced Controller - Top - Single OWL.stl │ ├── Ute Controller - Base.stl │ └── Ute Controller - Top.stl └── Original OWL │ ├── Camera mount.stl │ ├── Enclosure - cable gland.stl │ ├── Enclosure - cover.stl │ ├── Enclosure - single connector.stl │ ├── Enclosure plug.stl │ ├── Raspberry Pi mount.stl │ ├── Relay control board mount.stl │ ├── Solenoid mount - back.stl │ ├── Solenoid mount - front.stl │ ├── Solenoid mount - mid.stl │ └── Voltage regulator mount.stl ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── config ├── DAY_SENSITIVITY_1.ini ├── DAY_SENSITIVITY_2.ini └── DAY_SENSITIVITY_3.ini ├── desktop ├── focus_gui.py └── focus_owl_desktop.sh ├── dev └── clean.sh ├── display └── __init__.py ├── docs ├── 20230331_owl_readme.pdf └── 20240528_owl_readme.pdf ├── environment.yml ├── images ├── owl-background.png └── owl-logo.png ├── logs ├── detections.jsonl └── owl_system_logs.jsonl ├── models ├── README.md ├── install_coral.sh ├── labels.txt └── tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite ├── non_rpi_requirements.txt ├── notes ├── README.md ├── error_manager_py_notes.txt ├── input_manager_py_notes.txt ├── output_manager_py_notes.txt ├── owl_py_notes.txt └── video_manager_py_notes.txt ├── owl.py ├── owl_boot.sh ├── owl_boot_wrapper.sh ├── owl_setup.sh ├── requirements.txt ├── update_owl.sh ├── utils ├── algorithms.py ├── config_manager.py ├── directory_manager.py ├── error_manager.py ├── frame_reader.py ├── greenonbrown.py ├── greenongreen.py ├── image_sampler.py ├── input_manager.py ├── log_manager.py ├── output_manager.py ├── video_manager.py └── vis_manager.py └── version.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # Add funding model platform 2 | 3 | buy_me_a_coffee: geezacoleman -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # pytype static type analyzer 132 | .pytype/ 133 | 134 | # Cython debug symbols 135 | cython_debug/ 136 | -------------------------------------------------------------------------------- /3D Models/Compact OWL/Backplate - 2 x Amphenol receptacle.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Backplate - 2 x Amphenol receptacle.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Backplate - blank.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Backplate - blank.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Backplate - gland.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Backplate - gland.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Backplate - receptacle and ethernet.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Backplate - receptacle and ethernet.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Backplate - receptacle only.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Backplate - receptacle only.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Camera Mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Camera Mount.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Frontplate.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Frontplate.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Lens Mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Lens Mount.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Main Body.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Main Body.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Tray - base.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Tray - base.stl -------------------------------------------------------------------------------- /3D Models/Compact OWL/Tray - lens holder.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Compact OWL/Tray - lens holder.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Base - 4 OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Base - 4 OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Base - Double OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Base - Double OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Base - Single OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Base - Single OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Top - 4 OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Top - 4 OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Top - Double OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Top - Double OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Advanced Controller - Top - Single OWL.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Advanced Controller - Top - Single OWL.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Ute Controller - Base.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Ute Controller - Base.stl -------------------------------------------------------------------------------- /3D Models/Controllers/Ute Controller - Top.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Controllers/Ute Controller - Top.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Camera mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Camera mount.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Enclosure - cable gland.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Enclosure - cable gland.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Enclosure - cover.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Enclosure - cover.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Enclosure - single connector.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Enclosure - single connector.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Enclosure plug.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Enclosure plug.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Raspberry Pi mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Raspberry Pi mount.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Relay control board mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Relay control board mount.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Solenoid mount - back.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Solenoid mount - back.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Solenoid mount - front.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Solenoid mount - front.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Solenoid mount - mid.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Solenoid mount - mid.stl -------------------------------------------------------------------------------- /3D Models/Original OWL/Voltage regulator mount.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/3D Models/Original OWL/Voltage regulator mount.stl -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | authors: 4 | - family-names: "Coleman" 5 | given-names: "Guy" 6 | orcid: "https://orcid.org/0000-0002-8808-2965" 7 | - family-names: "Salter" 8 | given-names: "William" 9 | orcid: "https://orcid.org/0000-0003-1653-5192" 10 | - family-names: "Walsh" 11 | given-names: "Michael" 12 | title: "OpenWeedLocator (OWL)" 13 | version: 1.0.0 14 | doi: 10.5281/zenodo.1234 15 | date-released: 2021-08-24 16 | url: "https://github.com/geezacoleman/OpenWeedLocator" 17 | preferred-citation: 18 | type: article 19 | authors: 20 | - family-names: "Coleman" 21 | given-names: "Guy" 22 | orcid: "https://orcid.org/0000-0002-8808-2965" 23 | - family-names: "Salter" 24 | given-names: "William" 25 | orcid: "https://orcid.org/0000-0003-1653-5192" 26 | - family-names: "Walsh" 27 | given-names: "Michael" 28 | doi: "10.1038/s41598-021-03858-9" 29 | journal: "Scientific Reports" 30 | month: 9 31 | start: 1 # First page number 32 | end: 12 # Last page number 33 | title: "OpenWeedLocator (OWL): an open-source, low-cost device for fallow weed detection" 34 | issue: 170 35 | volume: 12 36 | year: 2022 37 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | guy.coleman@sydney.edu.au. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. 129 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to OWL 2 | Firstly, thank you for taking the time to contribute to the OWL project! 3 | 4 | To make sure everyone is on the same page, has clarity in how to contribute and does so in a respectful and consistent manner we have put together the following set of guidelines. They are mostly just guidelines and if you feel changes are needed, identify them in a pull request to this document. 5 | 6 | ## Table of Contents 7 | * [Code of conduct](#code-of-conduct) 8 | * [How to contribute](#how-to-contribute) 9 | 10 | ## Code of Conduct 11 | This project and everyone involved is governed by the [OWL Code of Conduct](CODE_OF_CONDUCT.md). By participating in the project you are expected to uphold this code. Please report any unacceptable behaviour to project admins. 12 | 13 | ## How to contribute 14 | The best way to start contributing is to build an OWL for yourself! As you find opportunities for improvement, start logging them as issues here. 15 | 16 | There are also a list of active projects that you can take on as whole projects or individual items within each project. These are summarised below. This is not a definitive list though, so if you have other ideas or suggestions just let us know. 17 | 18 | ## Project List 19 | ### [Project 1](https://github.com/geezacoleman/OpenWeedLocator/projects/1): Integrating position information (GPS, GLONASS etc.) information 20 | This project seeks to incorporate position information from either a per-unit and low cost [GPS/GLONASS/Beidou sensor](https://uk.rs-online.com/web/p/gnss-gps-modules/9054630?cm_mmc=UK-PLA-DS3A-_-google-_-CSS_UK_EN_Semiconductors_Whoop-_-GNSS+%26+GPS+Modules_Whoop_OMNISerpNov-_-9054630&matchtype=&pla-532120398712&gclid=CjwKCAiAz--OBhBIEiwAG1rIOtxRjYlN5e7eCDyFBnrZ0RgSKc8NUHGi27VLPxOhBYGM_iqA3I6PkhoCOsUQAvD_BwE&gclsrc=aw.ds) or interpreting NMEA strings from tractor-mounted GPS sensors. Fortunately both seem to return the same standard of data, so supporting the variations should be straightforward. 21 | 22 | A substantial portion of the code is now complete but untested in [`gps-string-reading`](https://github.com/geezacoleman/OpenWeedLocator/tree/gps-string-reading) 23 | 24 | #### Benefits 25 | - on/off delay based on speed to reduce wastage and ensure weeds aren't missed 26 | - recording weed locations for weed mapping and density estimates 27 | - turn compensation 28 | 29 | ### [Project 2](https://github.com/geezacoleman/OpenWeedLocator/projects/2): Optimising OWL enclosure design 30 | The current enclosure has served a good job of supporting the initial OWL testing, but could be improved for efficiency in printing, camera mounting strength and dust/water resistance. This is a great project to start with if you have a background in design. 31 | 32 | **Benefits:** 33 | - reduced print area and cost 34 | - better dust and water resistance by covering the current camera hole 35 | - strong camera mounts 36 | 37 | ### [Project 3](https://github.com/geezacoleman/OpenWeedLocator/projects/3): Upgrading OWL to Green-on-Green (in-crop) Weed Detection 38 | A highly sought after upgrade to the owl, this is the most complex project listed. It involves hardware, software and algorithm development but is quite an achievable set of tasks with significant benefits. The main areas of work include incorporating the Jetson Nano hardware, developing software to run and interpret object detection/image classification models and training initial algorithms on existing data. 39 | 40 | **Benefits:** 41 | - in-crop weed recognition 42 | - more computing power 43 | 44 | ### [Project 4](https://github.com/geezacoleman/OpenWeedLocator/projects/4): Reducing image blur to improve forward travel speed 45 | The highest priority project. Reducing image blur on the Raspberry Pi camera is likely a software issue, but requires dedicated testing of the current camera and developing settings that will ensure reduced image blur in a range of conditions. Reduced blur means higher forward travel speeds with fewer missed weeds. 46 | 47 | **Benefits:** 48 | - greater sensitivity to small weeds 49 | - higher forward travel speeds without missing weeds 50 | - better overall performance 51 | 52 | **If you have any other project ideas, please raise an issue and let us know** 53 | 54 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Guy Coleman 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /config/DAY_SENSITIVITY_1.ini: -------------------------------------------------------------------------------- 1 | [System] 2 | # select your algorithm 3 | algorithm = exhsv 4 | # operate on a video, image or directory of media 5 | input_file_or_directory = 6 | # choose how many relays are connected to the OWL 7 | relay_num = 4 8 | actuation_duration = 0.15 9 | delay = 0 10 | 11 | [Controller] 12 | # choose between 'none', 'ute' or 'advanced' - avoid using '' or "". Just plain text only: none or ute or advanced 13 | controller_type = none 14 | 15 | # for advanced controller 16 | detection_mode_pin_up = 35 17 | detection_mode_pin_down = 36 18 | recording_pin = 38 19 | sensitivity_pin = 40 20 | low_sensitivity_config = config/DAY_SENSITIVITY_1.ini 21 | high_sensitivity_config = config/DAY_SENSITIVITY_3.ini 22 | 23 | # for UteController 24 | switch_purpose = recording 25 | switch_pin = 37 26 | 27 | [Visualisation] 28 | image_loop_time = 5 29 | 30 | [Camera] 31 | resolution_width = 416 32 | resolution_height = 320 33 | exp_compensation = -2 34 | 35 | [GreenOnGreen] 36 | # parameters related to green-on-green detection 37 | model_path = models 38 | confidence = 0.5 39 | class_filter_id = None 40 | 41 | [GreenOnBrown] 42 | # parameters related to green-on-brown detection 43 | exg_min = 25 44 | exg_max = 200 45 | hue_min = 41 46 | hue_max = 80 47 | saturation_min = 52 48 | saturation_max = 218 49 | brightness_min = 62 50 | brightness_max = 188 51 | min_detection_area = 20 52 | invert_hue = False 53 | 54 | [DataCollection] 55 | # all data collection related parameters 56 | # set sample_images True/False to enable/disable image collection 57 | sample_images = False 58 | # image collection, sample method include: 'bbox' | 'square' | 'whole' 59 | sample_method = whole 60 | sample_frequency = 30 61 | save_directory = /media/owl/SanDisk 62 | # set to True to disable weed detection for data collection only 63 | disable_detection = False 64 | # elog fps 65 | log_fps = False 66 | camera_name = cam1 67 | 68 | [Relays] 69 | # defines the relay ID (left) that matches to a boardpin (right) on the Pi. 70 | # Only change if you rewire/change the relay connections. 71 | 0 = 13 72 | 1 = 15 73 | 2 = 16 74 | 3 = 18 75 | 76 | 77 | -------------------------------------------------------------------------------- /config/DAY_SENSITIVITY_2.ini: -------------------------------------------------------------------------------- 1 | [System] 2 | # select your algorithm 3 | algorithm = exhsv 4 | # operate on a video, image or directory of media 5 | input_file_or_directory = 6 | # choose how many relays are connected to the OWL 7 | relay_num = 4 8 | actuation_duration = 0.15 9 | delay = 0 10 | 11 | [Controller] 12 | # choose between 'none', 'ute' or 'advanced' - avoid using '' or "". Just plain text only: none or ute or advanced 13 | controller_type = none 14 | 15 | # for advanced controller 16 | detection_mode_pin_up = 35 17 | detection_mode_pin_down = 36 18 | recording_pin = 38 19 | sensitivity_pin = 40 20 | low_sensitivity_config = config/DAY_SENSITIVITY_2.ini 21 | high_sensitivity_config = config/DAY_SENSITIVITY_3.ini 22 | 23 | # for UteController 24 | switch_purpose = recording 25 | switch_pin = 37 26 | 27 | [Visualisation] 28 | image_loop_time = 5 29 | 30 | [Camera] 31 | resolution_width = 640 32 | resolution_height = 480 33 | exp_compensation = -2 34 | 35 | [GreenOnGreen] 36 | # parameters related to green-on-green detection 37 | model_path = models 38 | confidence = 0.5 39 | class_filter_id = None 40 | 41 | [GreenOnBrown] 42 | # parameters related to green-on-brown detection 43 | exg_min = 25 44 | exg_max = 200 45 | hue_min = 39 46 | hue_max = 83 47 | saturation_min = 50 48 | saturation_max = 220 49 | brightness_min = 60 50 | brightness_max = 190 51 | min_detection_area = 10 52 | invert_hue = False 53 | 54 | [DataCollection] 55 | # all data collection related parameters 56 | # set sample_images True/False to enable/disable image collection 57 | sample_images = False 58 | # image collection, sample method include: 'bbox' | 'square' | 'whole' 59 | sample_method = whole 60 | sample_frequency = 30 61 | save_directory = /media/owl/SanDisk 62 | # set to True to disable weed detection for data collection only 63 | disable_detection = False 64 | # log fps 65 | log_fps = False 66 | camera_name = cam1 67 | 68 | [Relays] 69 | # defines the relay ID (left) that matches to a boardpin (right) on the Pi. 70 | # Only change if you rewire/change the relay connections. 71 | 0 = 13 72 | 1 = 15 73 | 2 = 16 74 | 3 = 18 75 | 76 | 77 | -------------------------------------------------------------------------------- /config/DAY_SENSITIVITY_3.ini: -------------------------------------------------------------------------------- 1 | [System] 2 | # select your algorithm 3 | algorithm = exhsv 4 | # operate on a video, image or directory of media 5 | input_file_or_directory = 6 | # choose how many relays are connected to the OWL 7 | relay_num = 4 8 | actuation_duration = 0.15 9 | delay = 0 10 | 11 | [Controller] 12 | # choose between 'none', 'ute' or 'advanced' - avoid using '' or "". Just plain text only: none or ute or advanced 13 | controller_type = none 14 | 15 | # for advanced controller 16 | detection_mode_pin_up = 35 17 | detection_mode_pin_down = 36 18 | recording_pin = 38 19 | sensitivity_pin = 40 20 | low_sensitivity_config = config/DAY_SENSITIVITY_2.ini 21 | high_sensitivity_config = config/DAY_SENSITIVITY_3.ini 22 | 23 | # for UteController 24 | switch_purpose = recording 25 | switch_pin = 37 26 | 27 | [Visualisation] 28 | image_loop_time = 5 29 | 30 | [Camera] 31 | resolution_width = 416 32 | resolution_height = 320 33 | exp_compensation = -2 34 | 35 | [GreenOnGreen] 36 | # parameters related to green-on-green detection 37 | model_path = models 38 | confidence = 0.5 39 | class_filter_id = None 40 | 41 | [GreenOnBrown] 42 | # parameters related to green-on-brown detection 43 | exg_min = 22 44 | exg_max = 210 45 | hue_min = 35 46 | hue_max = 85 47 | saturation_min = 40 48 | saturation_max = 225 49 | brightness_min = 50 50 | brightness_max = 200 51 | min_detection_area = 5 52 | invert_hue = False 53 | 54 | [DataCollection] 55 | # all data collection related parameters 56 | # set sample_images True/False to enable/disable image collection 57 | sample_images = False 58 | # image collection, sample method include: 'bbox' | 'square' | 'whole' 59 | sample_method = whole 60 | sample_frequency = 30 61 | save_directory = /media/owl/SanDisk 62 | # set to True to disable weed detection for data collection only 63 | disable_detection = False 64 | # log fps 65 | log_fps = False 66 | camera_name = cam1 67 | 68 | [Relays] 69 | # defines the relay ID (left) that matches to a boardpin (right) on the Pi. 70 | # Only change if you rewire/change the relay connections. 71 | 0 = 13 72 | 1 = 15 73 | 2 = 16 74 | 3 = 18 75 | 76 | 77 | -------------------------------------------------------------------------------- /desktop/focus_gui.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import sys 4 | 5 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 6 | 7 | import cv2 8 | import time 9 | import signal 10 | import tkinter as tk 11 | from tkinter import ttk, messagebox 12 | import numpy as np 13 | import threading 14 | from PIL import Image, ImageTk 15 | import collections 16 | import matplotlib.pyplot as plt 17 | from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg 18 | from utils.video_manager import VideoStream 19 | from utils.algorithms import fft_blur 20 | 21 | class OWLFocusGUI: 22 | def __init__(self, root): 23 | self.root = root 24 | self.root.title("Camera Focus Tool") 25 | self.root.geometry("1000x700") 26 | self.root.configure(bg='#f0f0f0') 27 | self.root.protocol("WM_DELETE_WINDOW", self.on_closing) 28 | 29 | self.is_running = True 30 | self.frame = None 31 | self.resolution = (640, 480) 32 | self.exp_compensation = -2 33 | 34 | # Focus tracking: persist best focus value and its frame indefinitely. 35 | self.focus_history = collections.deque(maxlen=250) 36 | self.focus_moving_avg = collections.deque(maxlen=10) 37 | self.best_focus = float('-inf') 38 | self.best_frame = None 39 | self.last_avg_focus = float('-inf') 40 | 41 | self.create_widgets() 42 | self.camera_thread = threading.Thread(target=self.camera_loop, daemon=True) 43 | self.camera_thread.start() 44 | self.update_gui() 45 | 46 | def create_widgets(self): 47 | main_frame = ttk.Frame(self.root) 48 | main_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=10) 49 | 50 | # Video Feed Section 51 | video_frame = ttk.LabelFrame(main_frame, text="Camera Feed") 52 | video_frame.pack(pady=(0,10)) 53 | self.video_width = 640 54 | self.video_height = 480 55 | self.video_container = tk.Frame(video_frame, width=self.video_width, height=self.video_height) 56 | self.video_container.pack(padx=5, pady=5) 57 | self.video_container.pack_propagate(False) 58 | self.video_label = tk.Label(self.video_container) 59 | self.video_label.pack(fill=tk.BOTH, expand=True) 60 | 61 | # Middle Section 62 | mid_frame = ttk.Frame(main_frame) 63 | mid_frame.pack(fill=tk.X, pady=(0,10)) 64 | mid_frame.columnconfigure(0, weight=1) 65 | mid_frame.columnconfigure(1, weight=1) 66 | mid_frame.columnconfigure(2, weight=1) 67 | 68 | # Current focus value. 69 | current_frame = ttk.Frame(mid_frame) 70 | current_frame.grid(row=0, column=0, padx=10) 71 | ttk.Label(current_frame, text="Current:", font=("Arial", 12, "bold")).pack(side=tk.LEFT) 72 | self.focus_value_container = tk.Frame(current_frame, bg="#f0f0f0", padx=5, pady=2) 73 | self.focus_value_container.pack(side=tk.LEFT, padx=(5,0)) 74 | self.focus_value_label = tk.Label(self.focus_value_container, text="0.0", font=("Arial", 16), bg="#f0f0f0") 75 | self.focus_value_label.pack() 76 | 77 | # Best focus value. 78 | best_frame = ttk.Frame(mid_frame) 79 | best_frame.grid(row=0, column=1, padx=10) 80 | ttk.Label(best_frame, text="Best:", font=("Arial", 12, "bold")).pack(side=tk.LEFT) 81 | self.best_focus_label = ttk.Label(best_frame, text="0.0", font=("Arial", 16)) 82 | self.best_focus_label.pack(side=tk.LEFT, padx=(5,0)) 83 | 84 | # Buttons: Reset Best and Display Best. 85 | button_frame = ttk.Frame(mid_frame) 86 | button_frame.grid(row=0, column=2, padx=10) 87 | self.reset_button = ttk.Button(button_frame, text="Reset Best", command=self.reset_best_focus) 88 | self.reset_button.pack(side=tk.LEFT, padx=5) 89 | self.display_button = ttk.Button(button_frame, text="Display Best", command=self.display_best) 90 | self.display_button.pack(side=tk.LEFT, padx=5) 91 | 92 | # Graph Section: Focus History. 93 | graph_frame = ttk.LabelFrame(main_frame, text="Focus History") 94 | graph_frame.pack(fill=tk.BOTH, expand=True) 95 | graph_frame.configure(relief=tk.FLAT) 96 | self.fig = plt.Figure(figsize=(5, 2), dpi=100) 97 | self.fig.patch.set_facecolor('#f0f0f0') 98 | self.ax = self.fig.add_subplot(111) 99 | self.ax.set_facecolor('#f0f0f0') 100 | self.ax.set_xlabel('Frame') 101 | self.ax.set_ylabel('Focus') 102 | self.ax.grid(True) 103 | self.canvas = FigureCanvasTkAgg(self.fig, master=graph_frame) 104 | self.canvas.draw() 105 | self.canvas.get_tk_widget().pack(fill=tk.BOTH, expand=True) 106 | graph_frame.bind("", self.on_graph_frame_configure) 107 | 108 | # Exit Button at the bottom. 109 | exit_frame = ttk.Frame(main_frame) 110 | exit_frame.pack(fill=tk.X, pady=(5,0)) 111 | self.exit_button = ttk.Button(exit_frame, text="Exit", command=self.on_closing) 112 | self.exit_button.pack(side=tk.RIGHT) 113 | 114 | def on_graph_frame_configure(self, event): 115 | new_width = event.width 116 | new_height = event.height 117 | dpi = self.fig.get_dpi() 118 | self.fig.set_size_inches(new_width/dpi, new_height/dpi) 119 | self.canvas.draw() 120 | 121 | def reset_best_focus(self): 122 | self.best_focus = float('-inf') 123 | self.best_frame = None 124 | self.update_focus_display() 125 | 126 | def display_best(self): 127 | if self.best_frame is None: 128 | messagebox.showinfo("No Best Frame", "No best frame recorded yet.") 129 | return 130 | win = tk.Toplevel(self.root) 131 | win.title("Best Focus Frame") 132 | frame_rgb = cv2.cvtColor(self.best_frame, cv2.COLOR_BGR2RGB) 133 | img = Image.fromarray(frame_rgb) 134 | photo = ImageTk.PhotoImage(image=img) 135 | lbl = ttk.Label(win, image=photo) 136 | lbl.image = photo 137 | lbl.pack() 138 | 139 | def camera_loop(self): 140 | try: 141 | self.cap = VideoStream(resolution=self.resolution, exp_compensation=self.exp_compensation) 142 | self.cap.start() 143 | time.sleep(1) 144 | while self.is_running: 145 | frame = self.cap.read() 146 | if frame is None: 147 | time.sleep(0.1) 148 | continue 149 | grey = cv2.cvtColor(frame.copy(), cv2.COLOR_BGR2GRAY) 150 | focus_val = fft_blur(grey, size=30) 151 | self.focus_history.append(focus_val) 152 | self.focus_moving_avg.append(focus_val) 153 | current_avg_focus = np.mean(self.focus_moving_avg) 154 | if current_avg_focus > self.best_focus: 155 | self.best_focus = current_avg_focus 156 | self.best_frame = frame.copy() 157 | self.last_avg_focus = current_avg_focus 158 | self.frame = frame.copy() 159 | time.sleep(0.01) 160 | except Exception as e: 161 | print(f"Error in camera loop: {e}") 162 | finally: 163 | self.release_camera() 164 | 165 | def update_gui(self): 166 | if not self.is_running: 167 | return 168 | if self.frame is not None: 169 | frame_rgb = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB) 170 | img = Image.fromarray(frame_rgb) 171 | img = img.resize((self.video_width, self.video_height), Image.LANCZOS) 172 | self.photo = ImageTk.PhotoImage(image=img) 173 | self.video_label.configure(image=self.photo) 174 | self.update_focus_display() 175 | self.update_focus_graph() 176 | self.root.after(33, self.update_gui) 177 | 178 | def update_focus_display(self): 179 | if self.focus_moving_avg: 180 | current_avg_focus = np.mean(self.focus_moving_avg) 181 | self.focus_value_label.configure(text=f"{current_avg_focus:.1f}") 182 | self.best_focus_label.configure(text=f"{self.best_focus:.1f}") 183 | if current_avg_focus >= self.best_focus: 184 | new_bg = "#90EE90" 185 | elif current_avg_focus < self.last_avg_focus: 186 | new_bg = "#FFB6C1" # Red if focus deteriorates. 187 | else: 188 | new_bg = "#f0f0f0" 189 | self.focus_value_container.configure(bg=new_bg) 190 | self.focus_value_label.configure(bg=new_bg) 191 | 192 | def update_focus_graph(self): 193 | if len(self.focus_history) > 1: 194 | self.ax.clear() 195 | x_data = list(range(len(self.focus_history))) 196 | y_data = list(self.focus_history) 197 | self.ax.plot(x_data, y_data, 'b-', linewidth=1, alpha=0.7) 198 | if len(self.focus_history) >= 10: 199 | window_size = 10 200 | y_avg = np.convolve(y_data, np.ones(window_size) / window_size, mode='valid') 201 | x_avg = list(range(window_size - 1, len(y_data))) 202 | self.ax.plot(x_avg, y_avg, 'r-', linewidth=2, label='10-frame Avg') 203 | if self.best_focus > float('-inf'): 204 | self.ax.axhline(y=self.best_focus, color='g', linestyle='--', linewidth=1, label='Best Focus') 205 | self.ax.set_xlabel('Frame') 206 | self.ax.set_ylabel('Focus') 207 | self.ax.grid(True, alpha=0.3) 208 | self.ax.legend(loc='upper right') 209 | if y_data: 210 | max_focus = max(y_data) 211 | min_focus = min(y_data) 212 | range_focus = max_focus - min_focus 213 | margin = max(range_focus * 0.1, 1) 214 | y_min = min_focus - margin 215 | y_max = max_focus + margin 216 | self.ax.set_ylim(y_min, y_max) 217 | self.canvas.draw() 218 | 219 | def release_camera(self): 220 | if hasattr(self, 'cap') and self.cap is not None: 221 | try: 222 | self.cap.stop() 223 | except Exception: 224 | pass 225 | self.cap = None 226 | 227 | def on_closing(self): 228 | self.is_running = False 229 | self.root.after(100, self.release_camera) 230 | self.focus_history.clear() 231 | self.focus_moving_avg.clear() 232 | self.root.destroy() 233 | 234 | def main(): 235 | root = tk.Tk() 236 | app = OWLFocusGUI(root) 237 | def signal_handler(sig, frame): 238 | app.on_closing() 239 | sys.exit(0) 240 | signal.signal(signal.SIGINT, signal_handler) 241 | signal.signal(signal.SIGTERM, signal_handler) 242 | root.mainloop() 243 | 244 | if __name__ == "__main__": 245 | main() 246 | -------------------------------------------------------------------------------- /desktop/focus_owl_desktop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RED='\033[0;31m' 4 | GREEN='\033[0;32m' 5 | ORANGE='\033[0;33m' 6 | NC='\033[0m' 7 | 8 | # Kill any existing owl.py process 9 | if pgrep -f "owl.py" > /dev/null; then 10 | echo -e "${ORANGE}[INFO] Stopping existing owl.py process...${NC}" 11 | pkill -f "owl.py" 12 | sleep 2 13 | 14 | if pgrep -f "owl.py" > /dev/null; then 15 | echo -e "${RED}[ERROR] Failed to stop owl.py process. Please try again or stop it manually.${NC}" 16 | exit 1 17 | else 18 | echo -e "${GREEN}[INFO] Successfully stopped existing owl.py process.${NC}" 19 | fi 20 | fi 21 | 22 | VENV_ACTIVATE="$HOME/.virtualenvs/owl/bin/activate" 23 | 24 | if [ ! -f "$VENV_ACTIVATE" ]; then 25 | echo -e "${RED}[ERROR] Virtual environment not found at $VENV_ACTIVATE${NC}" 26 | echo -e "${RED}Please run the OWL setup script first.${NC}" 27 | exit 1 28 | fi 29 | 30 | source "$VENV_ACTIVATE" 31 | 32 | FOCUS_SCRIPT="$HOME/owl/desktop/focus_gui.py" 33 | 34 | if [ ! -f "$FOCUS_SCRIPT" ]; then 35 | echo -e "${RED}[ERROR] OWL script not found at $FOCUS_SCRIPT${NC}" 36 | exit 1 37 | fi 38 | 39 | echo -e "${GREEN}[INFO] Starting OWL focus mode...${NC}" 40 | "$FOCUS_SCRIPT" -------------------------------------------------------------------------------- /dev/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /home/owl 4 | 5 | # Removing user histories and sensitive files 6 | echo "[INFO] Removing history files" 7 | sudo rm -rvf /root/.bash_history /home/owl/.bash_history /root/.viminfo /home/owl/.viminfo /root/.lesshst /home/owl/.lesshst 8 | sudo rm -rvf /root/.ssh /home/owl/.ssh /root/.gnupg /home/owl/.gnupg 9 | 10 | # Clearing network information 11 | echo "[INFO] Clearing network information" 12 | sudo rm -rvf /etc/NetworkManager/system-connections/* 13 | 14 | # Emptying user-specific and system-wide temporary data 15 | echo "[INFO] Emptying temporary storage" 16 | sudo rm -rvf /tmp/* /var/tmp/* 17 | 18 | # Removing logs 19 | echo "[INFO] Removing logs" 20 | sudo rm -rvf /var/log/* 21 | 22 | # Clear command history for the current session 23 | history -c 24 | 25 | read -p "Zero free space? (y/n): " choice 26 | case "$choice" in 27 | y|Y ) 28 | echo "[INFO] Zeroing free space" 29 | sudo dd if=/dev/zero of=/bigfile bs=1M status=progress 30 | sudo rm /bigfile 31 | df -h # Display disk usage after zeroing 32 | echo "[INFO] Free space zeroed successfully";; 33 | n|N ) 34 | echo "[INFO] Zeroing skipped";; 35 | * ) 36 | echo "[ERROR] Invalid input. Please enter y or n.";; 37 | esac 38 | 39 | # Shutting down the system 40 | echo "[INFO] Shutting down in 5 seconds" 41 | sleep 5 42 | sudo shutdown -h now -------------------------------------------------------------------------------- /display/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/display/__init__.py -------------------------------------------------------------------------------- /docs/20230331_owl_readme.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/docs/20230331_owl_readme.pdf -------------------------------------------------------------------------------- /docs/20240528_owl_readme.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/docs/20240528_owl_readme.pdf -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: owl 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - python=3.12 7 | - numpy 8 | - pandas 9 | - imutils 10 | - tqdm 11 | - python-dateutil 12 | - pytz 13 | - six 14 | - wcwidth 15 | - pip 16 | - pip: 17 | - blessed==1.20.0 18 | - colorzero==2.0 19 | - glob2==0.7 20 | - gpiozero==1.6.2 21 | - opencv-contrib-python 22 | -------------------------------------------------------------------------------- /images/owl-background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/images/owl-background.png -------------------------------------------------------------------------------- /images/owl-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/images/owl-logo.png -------------------------------------------------------------------------------- /logs/detections.jsonl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/logs/detections.jsonl -------------------------------------------------------------------------------- /logs/owl_system_logs.jsonl: -------------------------------------------------------------------------------- 1 | {"timestamp": "2024-11-06 14:10:02,225", "level": "INFO", "logger": "__main__", "message": "Initializing OWL...", "module": "owl", "function": "__init__"} 2 | {"timestamp": "2024-11-06 14:10:02,227", "level": "INFO", "logger": "__main__", "message": "Starting OWL version 2.1.0", "module": "owl", "function": "_log_system_info"} 3 | {"timestamp": "2024-11-06 14:10:02,227", "level": "INFO", "logger": "__main__", "message": "System Information: OS: Raspbian 11 (bullseye)", "module": "owl", "function": "_log_system_info"} 4 | {"timestamp": "2024-11-06 14:10:02,227", "level": "INFO", "logger": "__main__", "message": "Python Version: 3.9.13", "module": "owl", "function": "_log_system_info"} 5 | {"timestamp": "2024-11-06 14:10:02,228", "level": "INFO", "logger": "SystemInfo", "message": "Raspberry Pi Model: 5, Revision: c03111", "module": "version", "function": "get_rpi_info"} 6 | {"timestamp": "2024-11-06 14:10:02,229", "level": "INFO", "logger": "SystemInfo", "message": "Git Commit: 4b825dc642cb6eb9a060e54bf8d69288fbee4904", "module": "version", "function": "get_git_info"} 7 | {"timestamp": "2024-11-06 14:10:02,230", "level": "INFO", "logger": "__main__", "message": "Git information retrieved successfully.", "module": "owl", "function": "_log_system_info"} 8 | {"timestamp": "2024-11-06 14:10:02,240", "level": "INFO", "logger": "__main__", "message": "Raspberry Pi version: 5", "module": "owl", "function": "__init__"} 9 | {"timestamp": "2024-11-06 14:10:02,338", "level": "INFO", "logger": "utils.output_manager", "message": "[INFO] Setting up nozzles...", "module": "output_manager", "function": "__init__"} 10 | {"timestamp": "2024-11-06 14:10:03,360", "level": "INFO", "logger": "utils.output_manager", "message": "[INFO] Nozzle setup complete. Initiating camera...", "module": "output_manager", "function": "__init__"} 11 | {"timestamp": "2024-11-06 14:10:03,374", "level": "WARNING", "logger": "__main__", "message": "High resolution, expect reduced framerate. Resolution set to 640x480.", "module": "owl", "function": "__init__"} 12 | {"timestamp": "2024-11-06 14:10:10,196", "level": "INFO", "logger": "__main__", "message": "[INFO] Stopped.", "module": "owl", "function": "hoot"} 13 | -------------------------------------------------------------------------------- /models/README.md: -------------------------------------------------------------------------------- 1 | # Adding Green-on-Green to the OWL (beta) 2 | Welcome to the first iteration of Green-on-Green or in-crop weed detection with the OWL. This is still an early beta version, so it may require additional troubleshooting. It has been tested and works on both a Raspberry Pi 4, LibreComputer and a Windows desktop computer. 3 | 4 | ## Stage 1| Hardware/Software - Google Coral Installation 5 | In addition to the other software installation to get the OpenWeedLocator running, you will also need to install the Google Coral supporting software onto the Raspberry Pi. Simply run `install_coral.sh` from the command line using the instructions below. 6 | 7 | ### Step 1 8 | Assuming you have cloned the OpenWeedLocator repository and renamed it to `owl`, navigate to the `models` directory on the Raspberry Pi with: 9 | 10 | `owl@raspberrypi:~ $ cd ~/owl/models` 11 | 12 | ### Step 2 13 | Now run the installation file. This will install the `pycoral` library and other important packages to run the Coral. For full instructions on the installation process, we recommend reading the Google Coral [documentation](https://coral.ai/docs/accelerator/get-started/). 14 | 15 | During the installation, you will be asked to confirm performance options and connect the Google Coral USB to the USB3.0 ports (blue). 16 | 17 | `owl@raspberrypi:~ $ chmod +x install_coral.sh && ./install_coral.sh`. 18 | 19 | If you run into errors during the `pycoral` library installation, try running 20 | 21 | ``` 22 | owl@raspberrypi:~ $ workon owl 23 | (owl) owl@raspberrypi:~/owl/models$ pip install pycoral 24 | ``` 25 | 26 | ### Step 3 27 | The final step is to test the installation. 28 | 29 | Open up a Python terminal by running: 30 | ``` 31 | (owl) owl@raspberrypi:~/owl/models$ python 32 | ``` 33 | 34 | Now try running: 35 | ``` 36 | >>> import pycoral 37 | ``` 38 | 39 | If this runs successfully then you're ready to move on to the next step and running object detection models with the OWL. 40 | 41 | ## Stage 2 | Model Training/Deployment - Inference with the Coral 42 | Running weed recognition models on the Google Coral requires the generation of a .tflite model file. The .tflite files are specifically designed to be lightweight and efficient, making them well-suited for deployment on edge devices like the Coral USB TPU. One important thing to note is that .tflite files for the Google Coral are specifically optimized for it, so you cannot simply use any .tflite file. Using a generic .tflite file may result in much slower performance or even failure to run. 43 | 44 | This is an overview of the process from the official Google Coral documentation: 45 | ![image](https://user-images.githubusercontent.com/51358498/226113545-9b642d75-f611-4ff5-a613-5e684822e619.png) 46 | 47 | ### Step 1 48 | To test if the installation has worked, the recommended option is to download a generic model file first from the [Coral model repository](https://coral.ai/models/object-detection/). This will isolate any issues with it running to the OWL or the Google Coral installation, rather than the model training. 49 | 50 | While still in the `models` directory, run this command to download the appropriate model: 51 | ``` 52 | (owl) owl@raspberrypi:~/owl/models$ wget https://raw.githubusercontent.com/google-coral/test_data/master/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite 53 | ``` 54 | 55 | Now change back to the `owl` directory and try running `owl.py` and specifying `gog` for the algorithm. If you don't specify a path to the `.tflite` model file, it will automatically select the first model in the directory when sorted alphabetically. 56 | 57 | **NOTE** If you are testing this inside, the camera settings will likely be too dark (and the image will appear entirely black) so you may also need to specify the `--exp-compensation 4` and `--exp-mode auto`. 58 | 59 | ``` 60 | (owl) owl@raspberrypi:~/owl/models$ cd .. 61 | (owl) owl@raspberrypi:~/owl$python owl.py --show-display --algorithm gog 62 | ``` 63 | 64 | If this runs correctly, a video feed just like the previous green-on-brown approach should appear with a red box around an 'object', which in this case has been filtered to only detect 'potted plants'. If you would like to detect any of the other COCO categories, simply change the `filter_id=63` to a different category. The full list is [available here](https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/). 65 | 66 | Once you have confirmed it is working, you will need to start training and deploying your own weed recognition models. 67 | 68 | There are two main ways to generate optimized, weed recognition .tflite files for the Coral. These are detailed below. 69 | 70 | ### Option 1 | Train a model using Tensorflow 71 | These instructions by EdjeElectronics provide a step-by-step to a working .tflite Edge TPU model file. 72 | * [Google Colab walkthrough](https://colab.research.google.com/github/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi/blob/master/Train_TFLite2_Object_Detction_Model.ipynb) 73 | * [Accompanying YouTube video](https://www.youtube.com/watch?v=XZ7FYAMCc4M&ab_channel=EdjeElectronics) 74 | 75 | There is also the [official Google Colab tutorial](https://colab.research.google.com/github/google-coral/tutorials/blob/master/retrain_ssdlite_mobiledet_qat_tf1.ipynb) from the Coral documentation, that walks you through the entire training process for custom datasets. 76 | 77 | ### Train a YOLO v5/v8 model and export as .tflite 78 | ** NOTE ** it appears this method isn't currently working consistently. Once this resolves, this will be the recommended approach, given the ease of training for YOLO models and the relatively high performance. You can track one of the issues on the Ultralytics repository [here](https://github.com/ultralytics/ultralytics/issues/1185). 79 | 80 | To train a YOLOv5 model from Weed-AI, check out this notebook we have for [Weed-AI datasets](https://colab.research.google.com/github/Weed-AI/Weed-AI/blob/master/weed_ai_yolov5.ipynb)). Once it is trained, you must export it using either of the following commands: 81 | 82 | #### YOLOv5 83 | `!python export.py --weights path/to/your/weights/best.pt --include edgetpu` 84 | #### YOLOv8 85 | `!yolo export model=path/to/your/weights/best.pt format=edgetpu` 86 | 87 | The full explanation for each method is available in the [Ultralytics YOLOv5](https://github.com/ultralytics/yolov5) 88 | or [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) repositories. 89 | 90 | Currently, the `GreenOnGreen` class will simply either load the first (alphabetically) model in the directory if specified with 91 | `algorithm='gog'` or will load the model specified if `algorithm=path/to/model.tflite`. Importantly, all your classes must 92 | appear in the `labels.txt` file. 93 | 94 | This is a very early version of the approach, so it is subject to change. 95 | 96 | ## References 97 | These are some of the sources used in the development of this aspect of the project. 98 | 99 | 1. [PyImageSearch](https://pyimagesearch.com/2019/05/13/object-detection-and-image-classification-with-google-coral-usb-accelerator/) 100 | 2. [Google Coral Guides](https://coral.ai/docs/accelerator/get-started/) 101 | -------------------------------------------------------------------------------- /models/install_coral.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Single .sh file to install Google Coral requirements for the Raspberry Pi 4 | # Adapted from https://coral.ai/docs/accelerator/get-started/#1-install-the-edge-tpu-runtime with assistance from ChatGPT 5 | 6 | # Update and upgrade existing packages 7 | sudo apt-get update 8 | sudo apt-get upgrade 9 | 10 | echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list 11 | 12 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 13 | 14 | sudo apt-get update 15 | 16 | echo "Do you want to install with MAX OPERATING FREQUENCY? Doing so will increase framerate but also device temperature and power consumption." 17 | echo "Check official Google Coral documentation for full differences: https://coral.ai/docs/accelerator/get-started/" 18 | read -r -p "Install MAX OPERATING FREQUENCY? [y/N] " response 19 | if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] 20 | then 21 | echo "Installing MAX OPERATING FREQUENCY..." 22 | sudo apt-get install libedgetpu1-max 23 | else 24 | echo "Installing STANDARD OPERATING FREQUENCY..." 25 | sudo apt-get install libedgetpu1-std 26 | fi 27 | 28 | # Check if Google Coral is installed 29 | while true; do 30 | # Ask user to plug in USB device 31 | echo "Please connect the Google-Coral USB device to the USB 3.0 port. Press [y] then enter to continue." 32 | read -r -p "Continue? [y/N] " response 33 | if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]] 34 | then 35 | break 36 | else 37 | echo "Invalid response. Please try again." 38 | fi 39 | done 40 | 41 | echo "The pycoral library will now be installed." 42 | 43 | sudo apt-get install python3-pycoral 44 | 45 | # Link the system wide installation to the OWL virtual environment 46 | # Find the directories containing pycoral and tflite 47 | PYCORAL_DIRS=$(find /usr/lib/python3/dist-packages -name "*pycoral*" -type d) 48 | TFLITE_DIRS=$(find /usr/lib/python3/dist-packages -name "*tflite*" -type d) 49 | 50 | # Find the site-packages directory of the virtual environment 'owl' 51 | OWL_SITE_PACKAGES=$(python -c "import site; print(site.getsitepackages()[0])" | grep owl | xargs) 52 | 53 | # Copy the directories containing pycoral and tflite to the site-packages directory 54 | for DIR in $PYCORAL_DIRS $TFLITE_DIRS; do 55 | cp -r $DIR $OWL_SITE_PACKAGES 56 | done 57 | 58 | 59 | -------------------------------------------------------------------------------- /models/labels.txt: -------------------------------------------------------------------------------- 1 | 16 object 2 | -------------------------------------------------------------------------------- /models/tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geezacoleman/OpenWeedLocator/c40626b499b0f5b07fc08d3574b086b22077919c/models/tf2_ssd_mobilenet_v2_coco17_ptq_edgetpu.tflite -------------------------------------------------------------------------------- /non_rpi_requirements.txt: -------------------------------------------------------------------------------- 1 | ## IMPORTANT - for NON Raspberry Pi use. Only use this file if you are setting up an environment on a non Rasperry Pi device. ## 2 | 3 | glob2==0.7 4 | imutils==0.5.4 5 | numpy==1.23.4 6 | opencv-python==4.6.0.66 7 | pandas==1.5.1 8 | -------------------------------------------------------------------------------- /notes/README.md: -------------------------------------------------------------------------------- 1 | # OWL Documentation Notes 2 | 3 | This directory contains detailed documentation for each component of the OpenWeedLocator (OWL) system. 4 | 5 | | File | Description | Last Updated | 6 | |--------------------------------------------------|-----------------------------------------------|--------------| 7 | | [owl.py](owl_py_notes.txt) | Main control script and system initialization | 10/11/2024 | 8 | | [error_manager.py](error_manager_py_notes.txt) | Error handling and management system | 10/11/2024 | 9 | | [video_manager.py](video_manager_py_notes.txt) | Camera handling and video stream management | 10/11/2024 | 10 | | [input_manager.py](input_manager_py_notes.txt) | Physical controls and GPIO management | 10/11/2024 | 11 | | [output_manager.py](output_manager_py_notes.txt) | Relay control and status indicators | 10/11/2024 | 12 | 13 | Each note file follows a consistent format: 14 | ``` 15 | ################################################################################ 16 | Notes on 17 | 18 | Summary completed on DD/MM/YYYY 19 | Summary based on commit XXXXXXX 20 | ################################################################################ 21 | 22 | Purpose: 23 | - Core functionality overview 24 | 25 | Classes: 26 | - List of classes 27 | 28 | [Detailed class documentation] 29 | - Methods 30 | - Attributes 31 | - Dependencies 32 | ``` 33 | 34 | These notes are maintained alongside code changes and are updated with each 35 | commit that modifies the corresponding file's functionality. 36 | -------------------------------------------------------------------------------- /notes/error_manager_py_notes.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | Notes on error_manager.py 3 | 4 | Summary completed on 14/11/2024 5 | Summary based on commit 95dfb6b 6 | ################################################################################ 7 | 8 | Purpose: 9 | - Handles all error management for OWL system 10 | - Provides colored terminal output for errors 11 | - Structures error hierarchy for consistent handling 12 | 13 | Class Hierarchy: 14 | - OWLError (Base Exception) 15 | |- StorageError 16 | |- USBError 17 | |- USBMountError 18 | |- USBWriteError 19 | |- NoWritableUSBError 20 | |- StorageSystemError 21 | |- OWLProcessError 22 | |- OWLAlreadyRunningError 23 | |- OWLControllerError 24 | |- ControllerPinError 25 | |- ControllerConfigError 26 | |- OWLConfigError 27 | |- ConfigFileError 28 | |- ConfigSectionError 29 | |- ConfigKeyError 30 | |- ConfigValueError 31 | |- AlgorithmError 32 | |- OpenCVError 33 | |- DependencyError 34 | 35 | OWLError class: 36 | - Base exception for all OWL errors 37 | - Methods: 38 | - __init__ 39 | - colorize 40 | - format_error_header 41 | - format_section 42 | 43 | OWLError class --> __init__ method: 44 | - Takes message and details dictionary 45 | - Sets timestamp and error ID 46 | - Initializes base Exception 47 | 48 | OWLError class --> colorize method: 49 | - Takes text, color, bold flag, underline flag 50 | - Returns ANSI-colored string for terminal output 51 | 52 | StorageError classes: 53 | - USBMountError: USB device mounting failures 54 | - USBWriteError: USB write permission issues 55 | - NoWritableUSBError: No available USB storage 56 | - StorageSystemError: Platform compatibility issues 57 | 58 | OWLProcessError classes: 59 | - OWLAlreadyRunningError: Handles duplicate instances 60 | - Methods: 61 | - get_owl_processes: Lists running OWL instances 62 | 63 | OWLControllerError classes: 64 | - ControllerPinError: GPIO pin configuration issues 65 | - ControllerConfigError: Controller setup problems 66 | 67 | OWLConfigError classes: 68 | - ConfigFileError: Missing/invalid config file 69 | - ConfigSectionError: Missing required sections 70 | - ConfigKeyError: Missing required keys 71 | - ConfigValueError: Invalid configuration values 72 | 73 | AlgorithmError class: 74 | - Handles detection algorithm failures 75 | - Methods: 76 | - handle: Logs error and stops OWL 77 | - Predefined messages for: 78 | - ModuleNotFoundError 79 | - IndexError/FileNotFoundError 80 | - ValueError 81 | 82 | OpenCVError class: 83 | - OpenCV import/initialization failures 84 | - Methods: 85 | - handle: Logs error and exits 86 | 87 | DependencyError class: 88 | - Python package dependency issues 89 | - Methods: 90 | - _format_pip_package_error 91 | - _format_local_file_error 92 | - handle 93 | 94 | Error Display Features: 95 | - Colored terminal output 96 | - Standardized error headers 97 | - Formatted error sections 98 | - Timestamped error IDs 99 | - Detailed error messages 100 | 101 | Dependencies: 102 | - subprocess: Process management 103 | - logging: Error logging 104 | - pathlib: Path handling 105 | - datetime: Timestamp generation 106 | -------------------------------------------------------------------------------- /notes/input_manager_py_notes.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | Notes on input_manager.py 3 | 4 | Summary completed on 14/11/2024 5 | Summary based on commit 95dfb6b 6 | ################################################################################ 7 | 8 | Purpose: 9 | - Manages physical control interfaces for OWL 10 | - Handles GPIO button inputs 11 | - Controls detection and recording states 12 | - Provides platform compatibility checks 13 | 14 | Functions: 15 | - is_raspberry_pi 16 | - get_rpi_version 17 | 18 | Classes: 19 | - UteController 20 | - AdvancedController 21 | 22 | UteController class: 23 | - Single-switch control interface 24 | - Methods: 25 | - __init__ 26 | - update_state 27 | - toggle_state 28 | - weed_detect_indicator 29 | - image_write_indicator 30 | - run 31 | - stop 32 | 33 | UteController class --> __init__ method: 34 | - Takes detection_state, sample_state, stop_flag 35 | - Takes owl_instance, status_indicator 36 | - Takes switch_purpose (recording/detection) 37 | - Takes switch_board_pin, bounce_time 38 | - Initializes GPIO button handlers 39 | 40 | UteController class --> update_state method: 41 | - Checks switch position 42 | - Updates detection or recording state 43 | - Toggles LED indicators 44 | - Controls weed detection/image sampling 45 | 46 | AdvancedController class: 47 | - Multi-switch interface for advanced control 48 | - Methods: 49 | - __init__ 50 | - update_state 51 | - update_recording_state 52 | - update_sensitivity_state 53 | - update_sensitivity_settings 54 | - set_detection_mode 55 | - update_detection_mode_state 56 | - weed_detect_indicator 57 | - image_write_indicator 58 | - run 59 | - stop 60 | - _read_config 61 | 62 | AdvancedController class --> __init__ method: 63 | - Takes recording/sensitivity/detection states 64 | - Takes owl_instance, status_indicator 65 | - Takes config paths for sensitivity settings 66 | - Takes button pin mappings and bounce time 67 | - Initializes multiple GPIO buttons 68 | - Sets up button event handlers 69 | 70 | AdvancedController class --> update_state method: 71 | - Updates all control states: 72 | * Recording state 73 | * Sensitivity state 74 | * Detection mode state 75 | 76 | AdvancedController class --> set_detection_mode method: 77 | - Takes mode parameter (0=detect, 1=off, 2=all on) 78 | - Controls relay states 79 | - Updates status indicators 80 | 81 | Platform Handling: 82 | - Detects Raspberry Pi platform 83 | - Provides GPIO access on Pi 84 | - Testing mode on non-Pi platforms 85 | - Version detection for Pi models 86 | 87 | Dependencies: 88 | - gpiozero: GPIO control (Pi only) 89 | - configparser: Config file reading 90 | - cv2: GUI trackbar updates 91 | - threading: Process control 92 | - logging: Status messages 93 | 94 | GPIO Configuration: 95 | - Button setup with bounce time 96 | - Event handler registration 97 | - State management 98 | - LED indicator control 99 | 100 | Thread Safety: 101 | - State locking for shared variables 102 | - Safe process termination 103 | - Event-based status updates 104 | 105 | Error Handling: 106 | - Platform compatibility checks 107 | - GPIO initialization errors 108 | - Config file reading errors 109 | - Process management safety 110 | -------------------------------------------------------------------------------- /notes/output_manager_py_notes.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | Notes on output_manager.py 3 | 4 | Summary completed on 14/11/2024 5 | Summary based on commit 95dfb6b 6 | ################################################################################ 7 | 8 | Purpose: 9 | - Controls hardware outputs for OWL system 10 | - Manages relay board control and status indicators 11 | - Provides test interfaces for non-Pi development 12 | 13 | Classes: 14 | - TestRelay 15 | - TestBuzzer 16 | - TestLED 17 | - BaseStatusIndicator 18 | - HeadlessStatusIndicator 19 | - UteStatusIndicator 20 | - AdvancedStatusIndicator 21 | - RelayControl 22 | - RelayController 23 | 24 | Entrypoint: 25 | - Has __main__ check for testing status indicators 26 | - Sets testing flag via get_platform_config() if not on Raspberry Pi 27 | - Uses terminal messages instead of GPIO when in test mode 28 | 29 | TestRelay class: 30 | - Simulates relay hardware for testing 31 | - Methods: 32 | - __init__ 33 | - on 34 | - off 35 | 36 | TestRelay class --> __init__ method: 37 | - Takes relay_number and verbose flag 38 | - Stores them as instance attributes 39 | 40 | TestRelay class --> on method: 41 | - Prints "[TEST] Relay {number} ON" if verbose enabled 42 | 43 | TestRelay class --> off method: 44 | - Prints "[TEST] Relay {number} OFF" if verbose enabled 45 | 46 | TestBuzzer class: 47 | - Simulates buzzer hardware for testing 48 | - Methods: 49 | - beep 50 | 51 | TestBuzzer class --> beep method: 52 | - Takes on_time, off_time, n repeats, verbose flag 53 | - Prints "BEEP" n times if verbose enabled 54 | 55 | TestLED class: 56 | - Simulates LED hardware for testing 57 | - Methods: 58 | - __init__ 59 | - blink 60 | - on 61 | - off 62 | 63 | TestLED class --> __init__ method: 64 | - Takes pin number 65 | - Stores as instance attribute 66 | 67 | TestLED class --> blink method: 68 | - Takes on_time, off_time, n repeats, verbose flag 69 | - Prints "BLINK {pin}" n times if verbose enabled 70 | 71 | TestLED class --> on/off methods: 72 | - Print "LED {pin} ON/OFF" respectively 73 | 74 | BaseStatusIndicator class: 75 | - Base class for all status indicators 76 | - Handles storage monitoring and LED control 77 | - Methods: 78 | - __init__ 79 | - start_storage_indicator 80 | - run_update 81 | - update 82 | - error 83 | - stop 84 | 85 | BaseStatusIndicator class --> __init__ method: 86 | - Takes save_directory and no_save flag 87 | - Initializes storage monitoring 88 | - Sets up LED control threads 89 | - Configures system LEDs 90 | 91 | BaseStatusIndicator class --> update method: 92 | - Monitors storage usage 93 | - Updates LED states based on storage 94 | - Triggers errors if storage full 95 | 96 | HeadlessStatusIndicator class: 97 | - Minimal implementation without physical indicators 98 | - Inherits from BaseStatusIndicator 99 | - Only monitors storage capacity 100 | - Methods: 101 | - __init__ 102 | - _update_storage_indicator 103 | 104 | UteStatusIndicator class: 105 | - Two-LED indicator system 106 | - Inherits from BaseStatusIndicator 107 | - Methods: 108 | - __init__ 109 | - _update_storage_indicator 110 | - setup_success 111 | - image_write_indicator 112 | - error 113 | - stop 114 | 115 | UteStatusIndicator class --> __init__ method: 116 | - Takes save_directory and LED pin numbers 117 | - Initializes record and storage LEDs 118 | - Sets up status monitoring 119 | 120 | UteStatusIndicator class --> _update_storage_indicator method: 121 | - Takes percent_full value 122 | - Changes LED blink patterns based on storage level: 123 | * >90%: Solid storage LED 124 | * >85%: Fast blink 125 | * >80%: Medium blink 126 | * >75%: Slow blink 127 | * >50%: Very slow blink 128 | * <50%: Extremely slow blink 129 | 130 | RelayControl class: 131 | - Direct hardware interface for relay board 132 | - Methods: 133 | - __init__ 134 | - relay_on 135 | - relay_off 136 | - beep 137 | - all_on 138 | - all_off 139 | - remove 140 | - clear 141 | - stop 142 | 143 | RelayControl class --> __init__ method: 144 | - Takes relay_dict mapping relays to GPIO pins 145 | - Initializes buzzer on BOARD7 146 | - Creates OutputDevice for each relay 147 | - Sets up test devices if not on Pi 148 | 149 | RelayController class: 150 | - Manages relay timing and job queues 151 | - Methods: 152 | - __init__ 153 | - receive 154 | - consumer 155 | 156 | RelayController class --> __init__ method: 157 | - Takes relay_dict and visualization flags 158 | - Creates job queues for each relay 159 | - Initializes threading conditions 160 | - Starts consumer threads 161 | 162 | RelayController class --> receive method: 163 | - Takes relay, timestamp, location, delay, duration 164 | - Queues spray job for specified relay 165 | - Notifies consumer thread 166 | 167 | RelayController class --> consumer method: 168 | - Runs in separate thread for each relay 169 | - Processes queued spray jobs 170 | - Manages timing and relay states 171 | - Coordinates with visualization system 172 | -------------------------------------------------------------------------------- /notes/owl_py_notes.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | Notes on owl.py 3 | 4 | Summary completed on 10/11/2024 5 | Summary based on commit: 95dfb6b 6 | ################################################################################ 7 | 8 | Purpose: 9 | - Primary control script for OWL system 10 | - Handles real-time weed detection and sprayer control 11 | - Manages configuration, data collection, and hardware interfaces 12 | 13 | Entrypoint: 14 | - Validates Python environment and imports 15 | - Parses command line arguments: 16 | - --show-display: Enable visualization windows 17 | - --focus: Add blur detection to output 18 | - --input: Path to input media (image/video/directory) 19 | - Creates Owl instance with parsed arguments 20 | - Starts detection loop via owl.hoot() 21 | 22 | Owl class: 23 | Main methods: 24 | - __init__: System initialization 25 | - hoot: Main detection loop 26 | - stop: Graceful shutdown 27 | - save_parameters: Save current settings 28 | - _log_system_info: Record system details 29 | 30 | Initialization (__init__): 31 | 1. Configuration 32 | - Validates config file 33 | - Sets up logging 34 | - Initializes detection parameters 35 | - Creates visualization GUI if enabled 36 | 37 | 2. Hardware Setup 38 | - Configures camera (resolution/exposure) 39 | - Maps GPIO pins to relays 40 | - Sets up controller (None/Ute/Advanced) 41 | - Initializes USB storage for data collection 42 | 43 | 3. Detection Setup 44 | - Configures spray zones and trigger lines 45 | - Sets initial algorithm parameters 46 | - Validates hardware capabilities 47 | 48 | Main Loop (hoot): 49 | 1. Frame Processing: 50 | - Acquires frame from camera/file 51 | - Updates detection parameters 52 | - Runs weed detection algorithm 53 | 54 | 2. Detection Response: 55 | - Maps detected weeds to spray zones 56 | - Triggers appropriate relays 57 | - Updates visualization 58 | 59 | 3. Data Collection: 60 | - Records frames/regions based on config 61 | - Manages storage limits 62 | - Logs performance metrics 63 | 64 | 4. User Interface: 65 | - Processes keyboard input 66 | - Updates display windows 67 | - Handles recording controls 68 | 69 | Error Handling: 70 | - Validates Python environment 71 | - Checks hardware compatibility 72 | - Manages GPIO conflicts 73 | - Handles storage issues 74 | - Reports algorithm errors 75 | 76 | Dependencies: 77 | Core: 78 | - OpenCV-Python: Image processing 79 | - NumPy: Array operations 80 | - imutils: Image utilities 81 | 82 | Custom modules: 83 | - error_manager: Error handling 84 | - input_manager: Hardware control 85 | - config_manager: Configuration 86 | - video_manager: Camera interface 87 | - image_sampler: Data collection 88 | - algorithms: Detection methods 89 | 90 | Configuration: 91 | - Uses .ini format 92 | - Validated sections: 93 | - System: Core parameters 94 | - Camera: Image acquisition 95 | - Controller: Hardware interface 96 | - DataCollection: Storage settings 97 | - GreenOnBrown: Detection parameters 98 | - Relays: GPIO mappings 99 | -------------------------------------------------------------------------------- /notes/video_manager_py_notes.txt: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | Notes on video_manager.py 3 | 4 | Summary completed on 14/11/2024 5 | Summary based on commit 95dfb6b 6 | ################################################################################ 7 | 8 | Purpose: 9 | - Manages different camera types for video capture 10 | - Supports Picamera (legacy), Picamera2, and standard webcams 11 | - Provides thread-safe video streaming 12 | 13 | Classes: 14 | - WebcamStream 15 | - PiCamera2Stream 16 | - PiCameraStream 17 | - VideoStream 18 | 19 | WebcamStream class: 20 | - Handles USB/standard webcams via OpenCV 21 | - Methods: 22 | - __init__ 23 | - start 24 | - update 25 | - read 26 | - stop 27 | 28 | WebcamStream class --> __init__ method: 29 | - Takes camera source number 30 | - Initializes VideoCapture stream 31 | - Sets frame dimensions 32 | - Creates thread for frame updates 33 | 34 | WebcamStream class --> update method: 35 | - Runs in separate thread 36 | - Continuously reads frames 37 | - Updates frame buffer 38 | - Handles stream closure 39 | 40 | PiCamera2Stream class: 41 | - Handles newer Raspberry Pi cameras 42 | - Methods: 43 | - __init__ 44 | - start 45 | - update 46 | - read 47 | - stop 48 | 49 | PiCamera2Stream class --> __init__ method: 50 | - Takes resolution, exposure compensation 51 | - Configures camera parameters 52 | - Sets up thread synchronization 53 | - Detects camera model (imx296/imx477/imx708) 54 | 55 | PiCamera2Stream class --> update method: 56 | - Thread-safe frame capture 57 | - Uses condition/lock for synchronization 58 | - Handles camera cleanup 59 | 60 | PiCameraStream class: 61 | - Handles legacy Raspberry Pi camera 62 | - Methods: 63 | - __init__ 64 | - start 65 | - update 66 | - read 67 | - stop 68 | 69 | PiCameraStream class --> __init__ method: 70 | - Takes resolution, exposure compensation 71 | - Sets PiCamera parameters 72 | - Initializes frame buffer 73 | - Creates capture stream 74 | 75 | PiCameraStream class --> update method: 76 | - Continuous frame capture 77 | - Updates frame buffer 78 | - Handles resource cleanup 79 | 80 | VideoStream class: 81 | - Factory class to create appropriate stream 82 | - Methods: 83 | - __init__ 84 | - start 85 | - update 86 | - read 87 | - stop 88 | 89 | VideoStream class --> __init__ method: 90 | - Detects available camera version 91 | - Creates appropriate stream object 92 | - Sets frame dimensions 93 | - Handles initialization errors 94 | 95 | Dependencies: 96 | - cv2: OpenCV camera interface 97 | - picamera/picamera2: Raspberry Pi cameras 98 | - threading: Thread management 99 | - logging: Status and error logging 100 | 101 | Thread Safety: 102 | - Lock for frame buffer access 103 | - Condition for frame synchronization 104 | - Event for thread control 105 | - Daemon threads for cleanup 106 | 107 | Error Handling: 108 | - Camera initialization failures 109 | - Stream read errors 110 | - Resource cleanup on errors 111 | - Camera version compatibility 112 | -------------------------------------------------------------------------------- /owl_boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # automatically determine the home directory, to avoid issues with username 4 | source $HOME/.bashrc 5 | 6 | # activate the 'owl' virtual environment 7 | source $HOME/.virtualenvs/owl/bin/activate 8 | 9 | # change directory to the owl folder 10 | cd $HOME/owl 11 | 12 | # run owl.py in the background and save the log output 13 | LOG_DATE=$(date -u +"%Y-%m-%dT%H-%M-%SZ") 14 | ./owl.py > $HOME/owl/logs/owl_$LOG_DATE.log 2>&1 & 15 | -------------------------------------------------------------------------------- /owl_boot_wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will find the user's home directory, making OWL software more portable. 4 | for dir in /home/*; do 5 | if [ -d "$dir" ]; then 6 | username=$(basename "$dir") 7 | if [ "$username" != "root" ]; then 8 | HOME_DIR="$dir" 9 | break 10 | fi 11 | fi 12 | done 13 | 14 | if [ -z "$HOME_DIR" ]; then 15 | echo "No suitable user directory found." 16 | exit 1 17 | fi 18 | 19 | sudo -u "$username" -H /usr/local/bin/owl_boot.sh -------------------------------------------------------------------------------- /owl_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Define colors for status messages 4 | RED='\033[0;31m' # Red for ERROR messages 5 | ORANGE='\033[0;33m' # Orange for warnings 6 | GREEN='\033[0;32m' # Green for INFO and success messages 7 | NC='\033[0m' # No color (reset) 8 | TICK="${GREEN}[OK]${NC}" 9 | CROSS="${RED}[FAIL]${NC}" 10 | SCRIPT_DIR=$(dirname "$(realpath "$0")") 11 | CURRENT_USER=${SUDO_USER:-$(whoami)} 12 | 13 | if [ "$SUDO_USER" ]; then 14 | echo -e "${RED}[ERROR] This script should not be run with sudo. Please run as normal user.${NC}" 15 | exit 1 16 | fi 17 | 18 | if pgrep -f "owl.py" > /dev/null; then 19 | echo -e "${ORANGE}[WARNING] owl.py is already running from a previous installation." 20 | echo -e "${ORANGE}It is unlikely you need to run this script a second time, consider running 'python owl.py --focus or --show-display instead if you just need the display." 21 | read -p "Otherwise, please enter 'y' to stop the currently running instance of owl.py to continue: (y/n): " stop_choice 22 | if [[ "$stop_choice" =~ ^[Yy]$ ]]; then 23 | pkill -f "owl.py" 24 | echo -e "${GREEN}[INFO] owl.py process has been stopped. Continuing..." 25 | sleep 2 26 | else 27 | echo -e "${RED}[ERROR] Please stop the running owl.py process before running this script." 28 | exit 1 29 | fi 30 | fi 31 | 32 | if [ "$CURRENT_USER" != "owl" ]; then 33 | echo -e "${ORANGE}[WARNING] Current user '$CURRENT_USER' differs from expected 'owl'. Some settings may not work correctly.${NC}" 34 | fi 35 | 36 | # Initialize status tracking variables 37 | STATUS_UPGRADE="" 38 | STATUS_CAMERA="" 39 | STATUS_CAMERA_TEST="" 40 | STATUS_FULL_UPGRADE="" 41 | STATUS_VENV="" 42 | STATUS_OPENCV="" 43 | STATUS_OWL_DEPS="" 44 | STATUS_BOOT_SCRIPTS="" 45 | STATUS_DESKTOP_ICON="" 46 | 47 | ERROR_UPGRADE="" 48 | ERROR_CAMERA="" 49 | ERROR_CAMERA_TEST="" 50 | ERROR_FULL_UPGRADE="" 51 | ERROR_VENV="" 52 | ERROR_OPENCV="" 53 | ERROR_OWL_DEPS="" 54 | ERROR_BOOT_SCRIPTS="" 55 | ERROR_DESKTOP_ICON="" 56 | 57 | # Function to check the exit status of the last executed command 58 | check_status() { 59 | if [ $? -ne 0 ]; then 60 | echo -e "${CROSS} $1 failed." 61 | eval "STATUS_$2='${CROSS}'" 62 | eval "ERROR_$2='$1 failed'" 63 | return 1 64 | else 65 | echo -e "${TICK} $1 completed successfully." 66 | eval "STATUS_$2='${TICK}'" 67 | fi 68 | } 69 | 70 | # Source bashrc to ensure virtualenv commands are available 71 | reload_bashrc() { 72 | if [ -f ~/.bashrc ]; then 73 | source ~/.bashrc 74 | sleep 2 75 | fi 76 | } 77 | 78 | # Function to check if the camera is detected 79 | check_camera_connection() { 80 | echo -e "${GREEN}[INFO] Checking for connected Raspberry Pi camera...${NC}" 81 | while true; do 82 | if rpicam-hello --list-cameras 2>&1 | grep -q "No cameras available"; then 83 | echo -e "${RED}[ERROR] No camera detected!${NC}" 84 | read -p "Please connect a Raspberry Pi camera and press Enter to retry..." temp 85 | else 86 | echo -e "${GREEN}[INFO] Camera detected successfully.${NC}" 87 | STATUS_CAMERA="${TICK}" 88 | return 0 89 | fi 90 | done 91 | } 92 | 93 | # Step 1: Perform a normal system update and upgrade 94 | echo -e "${GREEN}[INFO] Updating and upgrading the system...${NC}" 95 | sudo apt update 96 | sudo apt full-upgrade -y 97 | check_status "System upgrade" "UPGRADE" 98 | 99 | # Step 2: Ensure a camera is connected before proceeding 100 | check_camera_connection 101 | 102 | # Step 3: Test camera functionality 103 | echo -e "${GREEN}[INFO] Testing camera functionality...${NC}" 104 | rpicam-hello > /dev/null 2>&1 105 | if [ $? -ne 0 ]; then 106 | echo -e "${RED}[WARNING] Camera test failed. Running full system upgrade to resolve potential issues...${NC}" 107 | sudo apt full-upgrade -y 108 | check_status "Full system upgrade" "FULL_UPGRADE" 109 | 110 | echo -e "${GREEN}[INFO] Retesting camera after full upgrade...${NC}" 111 | rpicam-hello > /dev/null 2>&1 112 | if [ $? -ne 0 ]; then 113 | echo -e "${RED}[CRITICAL ERROR] Camera still not working after full upgrade. Please log an issue: https://github.com/geezacoleman/OpenWeedLocator/issues${NC}" 114 | STATUS_CAMERA_TEST="${CROSS}" 115 | ERROR_CAMERA_TEST="No camera detected" 116 | else 117 | echo -e "${GREEN}[INFO] Camera test passed after full upgrade.${NC}" 118 | STATUS_CAMERA_TEST="${TICK}" 119 | fi 120 | else 121 | echo -e "${GREEN}[INFO] Camera is working correctly.${NC}" 122 | STATUS_CAMERA_TEST="${TICK}" 123 | fi 124 | 125 | # Step 4: Free up space 126 | echo -e "${GREEN}[INFO] Freeing up space by removing unnecessary packages...${NC}" 127 | sudo apt-get purge -y wolfram-engine libreoffice* 128 | sudo apt-get clean 129 | check_status "Cleaning up" "CLEANUP" 130 | 131 | # Step 5: Set up the virtual environment 132 | echo -e "${GREEN}[INFO] Setting up the virtual environment...${NC}" 133 | 134 | # Add config to bashrc if not already present 135 | if ! grep -q "virtualenv and virtualenvwrapper" /home/$CURRENT_USER/.bashrc; then 136 | cat >> /home/$CURRENT_USER/.bashrc << EOF 137 | # virtualenv and virtualenvwrapper 138 | export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3 139 | export WORKON_HOME=\$HOME/.virtualenvs 140 | source /usr/share/virtualenvwrapper/virtualenvwrapper.sh 141 | EOF 142 | fi 143 | 144 | reload_bashrc 145 | sudo apt-get install -y python3-virtualenv python3-virtualenvwrapper 146 | check_status "Installing virtualenv packages" "VENV" 147 | 148 | reload_bashrc 149 | source /usr/share/virtualenvwrapper/virtualenvwrapper.sh 150 | check_status "Virtualenv configuration" "VENV" 151 | 152 | reload_bashrc 153 | # Step 6: Create and configure the virtual environment 154 | echo -e "${GREEN}[INFO] Creating the 'owl' virtual environment...${NC}" 155 | mkvirtualenv --system-site-packages -p python3 owl 156 | check_status "Creating virtual environment 'owl'" "VENV" 157 | 158 | sleep 1s 159 | 160 | # Step 7: Install OpenCV in the virtual environment 161 | echo -e "${GREEN}[INFO] Installing OpenCV in the 'owl' virtual environment...${NC}" 162 | source $HOME/.virtualenvs/owl/bin/activate 163 | sleep 1s 164 | pip3 install opencv-contrib-python 165 | check_status "Installing OpenCV" "OPENCV" 166 | 167 | sleep 1s 168 | 169 | # Step 8: Install OWL dependencies 170 | echo -e "${GREEN}[INFO] Installing the OWL Python dependencies...${NC}" 171 | cd "$SCRIPT_DIR" 172 | pip install -r requirements.txt 173 | check_status "Installing dependencies from requirements.txt" "OWL_DEPS" 174 | 175 | # Step 9: Make scripts executable and set up boot configuration 176 | echo -e "${GREEN}[INFO] Making scripts executable...${NC}" 177 | chmod a+x owl.py 178 | check_status "Making owl.py executable" "BOOT_SCRIPTS" 179 | 180 | chmod a+x owl_boot.sh 181 | chmod a+x owl_boot_wrapper.sh 182 | check_status "Making boot scripts executable" "BOOT_SCRIPTS" 183 | 184 | echo -e "${GREEN}[INFO] Moving boot scripts...${NC}" 185 | sudo mv owl_boot.sh /usr/local/bin/ 186 | sudo mv owl_boot_wrapper.sh /usr/local/bin/ 187 | check_status "Moving boot scripts" "BOOT_SCRIPTS" 188 | 189 | # Add boot script to cron 190 | echo -e "${GREEN}[INFO] Adding boot script to cron...${NC}" 191 | (crontab -l 2>/dev/null; echo "@reboot /usr/local/bin/owl_boot_wrapper.sh > /home/launch.log 2>&1") | sudo crontab - 192 | check_status "Adding boot script to cron" "BOOT_SCRIPTS" 193 | 194 | # set desktop background - check for wayland or X11 195 | echo -e "${GREEN}[INFO] Setting desktop background...${NC}" 196 | pcmanfm --set-wallpaper $SCRIPT_DIR/images/owl-background.png 197 | check_status "Setting desktop background" "BOOT_SCRIPTS" 198 | sleep 2 199 | 200 | # creating desktop icon for focusing 201 | echo -e "${GREEN}[INFO] Creating OWL Focusing desktop icon...${NC}" 202 | 203 | FOCUS_WRAPPER="${SCRIPT_DIR}/desktop/focus_owl_desktop.sh" 204 | FOCUS_GUI="${SCRIPT_DIR}/desktop/focus_gui.py" 205 | chmod +x "$FOCUS_WRAPPER" 206 | chmod +x "$FOCUS_GUI" 207 | 208 | DESKTOP_DIR="$HOME/Desktop" 209 | if [ ! -d "$DESKTOP_DIR" ]; then 210 | DESKTOP_DIR="$HOME/.local/share/applications" 211 | mkdir -p "$DESKTOP_DIR" 212 | fi 213 | 214 | DESKTOP_FILE="${DESKTOP_DIR}/Focus.desktop" 215 | cat << EOF > "$DESKTOP_FILE" 216 | [Desktop Entry] 217 | Version=1.0 218 | Type=Application 219 | Name=Focus 220 | Comment=Run OWL focusing mode 221 | Exec=${FOCUS_WRAPPER} 222 | Icon=${SCRIPT_DIR}/images/owl-logo.png 223 | Terminal=false 224 | Categories=Utility; 225 | EOF 226 | chmod +x "$DESKTOP_FILE" 227 | echo -e "${GREEN}[INFO] Focus OWL desktop icon created at: ${DESKTOP_FILE}${NC}" 228 | check_status "Creating desktop icon" "DESKTOP_ICON" 229 | 230 | # Final Summary 231 | echo -e "\n${GREEN}[INFO] Installation Summary:${NC}" 232 | echo -e "$STATUS_UPGRADE System Upgrade" 233 | echo -e "$STATUS_CAMERA Camera Detected" 234 | echo -e "$STATUS_CAMERA_TEST Camera Test" 235 | 236 | if [[ -n "$STATUS_FULL_UPGRADE" ]]; then 237 | echo -e "$STATUS_FULL_UPGRADE Full System Upgrade" 238 | fi 239 | 240 | echo -e "$STATUS_VENV Virtual Environment Created" 241 | echo -e "$STATUS_OPENCV OpenCV Installed" 242 | echo -e "$STATUS_OWL_DEPS OWL Dependencies Installed" 243 | echo -e "$STATUS_BOOT_SCRIPTS Boot Scripts Moved" 244 | echo -e "$STATUS_DESKTOP_ICON Desktop Icon Created" 245 | 246 | OWL_VERSION=$(python3 - <=11.2.1,<11.3.0 12 | -------------------------------------------------------------------------------- /update_owl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # color codes 5 | RED='\033[0;31m' 6 | GREEN='\033[0;32m' 7 | ORANGE='\033[0;33m' 8 | NC='\033[0m' 9 | 10 | REPO_DIR=~/owl 11 | REPO_URL=https://github.com/geezacoleman/OpenWeedLocator.git 12 | CONFIG_PATH="config/*.ini" 13 | 14 | echo -e "${ORANGE}[INFO] Starting OWL updater...${NC}" 15 | 16 | # 1) stop any running owl.py instances 17 | if pgrep -f "owl.py" > /dev/null; then 18 | echo -e "${ORANGE}[INFO] Stopping existing owl.py process...${NC}" 19 | pkill -f "owl.py" 20 | sleep 2 21 | if pgrep -f "owl.py" > /dev/null; then 22 | echo -e "${RED}[ERROR] Failed to stop owl.py. Please stop it manually and retry.${NC}" 23 | exit 1 24 | fi 25 | echo -e "${GREEN}[INFO] owl.py stopped.${NC}" 26 | fi 27 | 28 | # 2) update repo 29 | echo -e "${ORANGE}[INFO] Updating existing OWL repository...${NC}" 30 | cd "$REPO_DIR" 31 | 32 | # record old version 33 | OLD_VERSION=$(python3 - </dev/null 49 | STASHED_CONFIG=true 50 | echo -e "${GREEN}[SUCCESS] Your config/*.ini changes have been stashed as stash@{0}.${NC}" 51 | fi 52 | 53 | # other uncommitted changes? 54 | if ! git diff --quiet -- . && ! git diff --cached --quiet -- .; then 55 | echo -e "${ORANGE}[WARNING] You have other uncommitted changes:${NC}" 56 | git status --short 57 | 58 | read -p "Would you like to see a summary of those changes? [y/N]: " detail 59 | if [[ "$detail" =~ ^[Yy] ]]; then 60 | echo -e "${ORANGE}[INFO] Showing change stats...${NC}" 61 | git diff --stat 62 | fi 63 | 64 | # now ask about stashing 65 | read -p "Stash these changes before pulling? [y/N]: " yn 66 | case "$yn" in 67 | [Yy]* ) 68 | git stash push -m "auto-stash before OWL update" 69 | echo -e "${GREEN}[SUCCESS] Other changes stashed.${NC}" 70 | ;; 71 | * ) 72 | echo -e "${RED}[ERROR] Please commit or stash manually, then rerun.${NC}" 73 | exit 1 74 | ;; 75 | esac 76 | fi 77 | 78 | 79 | echo -e "${ORANGE}[INFO] Pulling latest from origin/${CURRENT_BRANCH}...${NC}" 80 | if git pull origin "$CURRENT_BRANCH"; then 81 | echo -e "${GREEN}[SUCCESS] Repository updated.${NC}" 82 | else 83 | echo -e "${RED}[ERROR] git pull failed. Resolve and retry.${NC}" 84 | exit 1 85 | fi 86 | 87 | # re-apply the INI stash if made 88 | if [ "$STASHED_CONFIG" = true ]; then 89 | echo -e "${ORANGE}[INFO] Re-applying your config/*.ini changes...${NC}" 90 | if git stash pop stash@{0}; then 91 | echo -e "${GREEN}[SUCCESS] config/*.ini restored and merged successfully.${NC}" 92 | else 93 | echo -e "${RED}[WARNING] Conflicts occurred while merging your INI files.${NC}" 94 | echo -e "${RED}Please open each affected file under ~/owl/config/, look for '<<<<<<<', fix them, then:${NC}" 95 | echo -e "${RED} cd ~/owl && git add config/*.ini && git commit${NC}" 96 | exit 1 97 | fi 98 | fi 99 | 100 | # record new version 101 | NEW_VERSION=$(python3 - < /dev/null; then 115 | source "$(which workon)" 116 | workon owl 117 | else 118 | echo -e "${ORANGE}[WARNING] virtualenvwrapper not found; activate your venv manually if needed.${NC}" 119 | fi 120 | pip install -r requirements.txt 121 | 122 | SCRIPT_DIR=$(dirname "$(realpath "$0")") 123 | FOCUS_WRAPPER="${SCRIPT_DIR}/desktop/focus_owl_desktop.sh" 124 | FOCUS_GUI="${SCRIPT_DIR}/desktop/focus_gui.py" 125 | chmod +x "$FOCUS_WRAPPER" 126 | chmod +x "$FOCUS_GUI" 127 | 128 | echo -e "${GREEN}[COMPLETE] Upgraded version: ${OLD_VERSION} → ${NEW_VERSION}${NC}" 129 | -------------------------------------------------------------------------------- /utils/algorithms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | 4 | ### Adding a new algorithm ### 5 | """ 6 | To add a new algorithm the only requirement is that it accepts a BGR (opencv) image and returns a grayscale 7 | image as an output. If it returns a binary image (like hsv) then it must return a boolean True in addition to the image 8 | as it has already been thresholded. 9 | """ 10 | ############################## 11 | 12 | def exg(image): 13 | """ 14 | Takes an image and processes it using ExG. Returns a single channel exG output. 15 | Developed by Woebbecke et al. 1995. 16 | :return: grayscale image 17 | """ 18 | # using array slicing to split into channels 19 | blue = image[:, :, 0].astype(np.float32) 20 | green = image[:, :, 1].astype(np.float32) 21 | red = image[:, :, 2].astype(np.float32) 22 | # cv2.imshow('blue', blue.astype('uint8')) 23 | # cv2.imshow('green', green.astype('uint8')) 24 | # cv2.imshow('red', red.astype('uint8')) 25 | 26 | image_out = 2 * green - red - blue 27 | image_out = np.clip(image_out, 0, 255) 28 | image_out = image_out.astype('uint8') 29 | 30 | # cv2.imshow('ExG', imgOut) 31 | return image_out 32 | 33 | def maxg(image): 34 | ''' 35 | Takes an input image in int8 format and calculates the 'maxg' algorithm based on the following publication: 36 | 'Weed Identification Using Deep Learning and Image Processing in Vegetable Plantation', Jin et al. 2021 37 | :param image: image as a BGR array (i.e. opened with opencv not PIL) 38 | :return: grayscale image 39 | ''' 40 | # using array slicing to split into channels with float32 for calculation 41 | blue = image[:, :, 0].astype(np.float32) 42 | green = image[:, :, 1].astype(np.float32) 43 | red = image[:, :, 2].astype(np.float32) 44 | 45 | image_out = 24 * green - 19 * red - 2 * blue 46 | image_out = (image_out / np.amax(image_out)) * 255 # scale image between 0 - 255 47 | image_out = image_out.astype('uint8') 48 | 49 | return image_out 50 | 51 | def exg_standardised(image): 52 | ''' 53 | Takes an input image in int8 format and calculates the standardised ExG algorithm 54 | :param image: image as a BGR array (i.e. opened with opencv not PIL) 55 | :return: returns a grayscale image 56 | ''' 57 | blue = image[:, :, 0].astype(np.float32) 58 | green = image[:, :, 1].astype(np.float32) 59 | red = image[:, :, 2].astype(np.float32) 60 | channel_sum = red + green + blue 61 | channel_sum = np.where(channel_sum == 0, 1, channel_sum) 62 | 63 | b = blue / channel_sum 64 | g = green / channel_sum 65 | r = red / channel_sum 66 | 67 | image_out = 255 * (2 * g - r - b) 68 | image_out = np.where(image_out < 0, 0, image_out) 69 | image_out = np.where(image_out > 255, 255, image_out) 70 | 71 | image_out = image_out.astype('uint8') 72 | # cv2.imshow('ExG Standardised', imgOut) 73 | 74 | return image_out 75 | 76 | def exg_standardised_hue(image, 77 | hue_min=30, 78 | hue_max=90, 79 | brightness_min=10, 80 | brightness_max=220, 81 | saturation_min=30, 82 | saturation_max=255, 83 | invert_hue=False): 84 | ''' 85 | Takes an image and performs a combined ExG + HSV algorithm 86 | :param image: image as a BGR array (i.e. opened with opencv not PIL) 87 | :param hue_min: minimum hue value 88 | :param hue_max: maximum hue value 89 | :param brightness_min: minimum 'value' or brightness value 90 | :param brightness_max: maximum 'value' or brightness value 91 | :param saturation_min: minimum saturation 92 | :param saturation_max: maximum saturation 93 | :param invert_hue: inverts the hue threshold to exclude anything within the thresholds 94 | :return: returns a grayscale image 95 | ''' 96 | 97 | blue = image[:, :, 0].astype(np.float32) 98 | green = image[:, :, 1].astype(np.float32) 99 | red = image[:, :, 2].astype(np.float32) 100 | 101 | channel_sum = red + green + blue 102 | channel_sum = np.where(channel_sum == 0, 1, channel_sum) 103 | 104 | b = blue / channel_sum 105 | g = green / channel_sum 106 | r = red / channel_sum 107 | 108 | image_out = 255 * (2 * g - r - b) 109 | image_out = np.where(image_out < 0, 0, image_out) 110 | image_out = np.where(image_out > 255, 255, image_out) 111 | 112 | image_out = image_out.astype('uint8') 113 | 114 | hsv_thresh, _ = hsv(image, 115 | hue_min=hue_min, hue_max=hue_max, 116 | brightness_min=brightness_min, brightness_max=brightness_max, 117 | saturation_min=saturation_min, saturation_max=saturation_max, 118 | invert_hue=invert_hue) 119 | image_out = hsv_thresh & image_out 120 | # cv2.imshow('exhu', imgOut) 121 | 122 | return image_out 123 | 124 | def exgr(image): 125 | ''' 126 | performs the ExGR algorithm on the input image 127 | :param image: image as a BGR array (i.e. opened with opencv not PIL) 128 | :return: returns a grayscale image 129 | ''' 130 | green = image[:, :, 1].astype(np.float32) 131 | red = image[:, :, 2].astype(np.float32) 132 | 133 | exg_image = exg(image) 134 | image_out = exg_image - (1.4 * red - green) 135 | 136 | image_out = np.clip(image_out, 0, 255) 137 | image_out = image_out.astype('uint8') 138 | 139 | return image_out 140 | 141 | def hsv(image, 142 | hue_min=30, 143 | hue_max=90, 144 | brightness_min=10, 145 | brightness_max=220, 146 | saturation_min=30, 147 | saturation_max=255, 148 | invert_hue=False): 149 | 150 | """ 151 | Performs an HSV thresholding operation on the input image 152 | :param image: image as a BGR array (i.e. opened with opencv not PIL) 153 | :param hue_min: minimum hue threshold 154 | :param hue_max: maximum hue threshold 155 | :param brightness_min: minimum 'brightness' or 'value' threshold 156 | :param brightness_max: maximum 'brightness' or 'value' threshold 157 | :param saturation_min: minimum saturation threshold 158 | :param saturation_max: maximum saturation threshold 159 | :param invert_hue: inverts the hue threshold to exclude anything within the thresholds 160 | :return: returns a binary image and boolean thresholded or not 161 | """ 162 | 163 | image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 164 | hue = image[:, :, 0] 165 | sat = image[:, :, 1] 166 | val = image[:, :, 2] 167 | 168 | hue_thresh = cv2.inRange(hue, hue_min, hue_max) 169 | sat_thresh = cv2.inRange(sat, saturation_min, saturation_max) 170 | val_thresh = cv2.inRange(val, brightness_min, brightness_max) 171 | 172 | # allow users to select purple/red colour ranges by excluding green 173 | if invert_hue: 174 | hue_thresh = cv2.bitwise_not(hue_thresh) 175 | 176 | out_thresh = sat_thresh & val_thresh & hue_thresh 177 | # cv2.imshow('HSV Out', outThresh) 178 | return out_thresh, True 179 | 180 | # for NIR images only 181 | def gndvi(image): 182 | """ 183 | Takes an image and processes it using GNDVI. Returns a single channel grayscale scaled output. 184 | :return: 185 | """ 186 | # using array slicing to split into channel 187 | green = image[:, :, 1].astype(np.float32) 188 | NIR = image[:, :, 2].astype(np.float32) 189 | 190 | image_out = (NIR - green) / (NIR + green) 191 | image_out = cv2.normalize(image_out, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) 192 | image_out = image_out.astype('uint8') 193 | cv2.imshow('gndvi', image_out) 194 | return image_out 195 | 196 | 197 | # Other vegetation indices are listed here, but have NOT been tested. 198 | def veg(image): 199 | blue = image[:, :, 0].astype(np.float32) 200 | green = image[:, :, 1].astype(np.float32) 201 | red = image[:, :, 2].astype(np.float32) 202 | 203 | image_out = green / ((red ** 0.667) * (blue ** 0.333)) 204 | image_out = cv2.normalize(image_out, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) 205 | image_out = np.clip(image_out, 0, 255) 206 | image_out = image_out.astype('uint8') 207 | 208 | return image_out 209 | 210 | def cive(image): 211 | blue = image[:, :, 0].astype(np.float32) 212 | green = image[:, :, 1].astype(np.float32) 213 | red = image[:, :, 2].astype(np.float32) 214 | 215 | image_out = 0.441 * red - 0.881 * green + 0.385 * blue + 18.78745 216 | #image_out = cv2.normalize(imgOut, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) 217 | image_out = np.clip(image_out, 0, 255) 218 | image_out = image_out.astype('uint8') 219 | 220 | return image_out 221 | 222 | def clahe_sat_val(image): 223 | image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 224 | hue = image[:, :, 0] 225 | sat = image[:, :, 1] 226 | val = image[:, :, 2] 227 | 228 | clahe = cv2.createCLAHE(clipLimit=20, tileGridSize=(64,64)) 229 | satCL = clahe.apply(sat) 230 | valCL = clahe.apply(val) 231 | 232 | claheImage = cv2.merge([hue, satCL, valCL]) 233 | claheImage = cv2.cvtColor(claheImage, cv2.COLOR_HSV2BGR) 234 | #cv2.imshow('CLAHE', claheImage) 235 | return claheImage 236 | 237 | def dgci(image): 238 | image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) 239 | 240 | hue = image[:, :, 0].astype(np.float32) 241 | sat = image[:, :, 1].astype(np.float32) 242 | val = image[:, :, 2].astype(np.float32) 243 | 244 | np.seterr(divide='ignore', invalid='ignore') 245 | imgOut = ((hue - 60)/(60 + (1 - sat) + (1 - val)))/3 246 | 247 | imgOut = imgOut.astype('uint8') 248 | imgOut = cv2.normalize(imgOut, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) 249 | 250 | return imgOut 251 | 252 | ##### BLUR ALGORITHMS 253 | # some algorithms developed with the help of Chat-GPT! 254 | # used before passing image into blur algorithms 255 | def normalize_brightness(image, intensity=0.8): 256 | img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV) 257 | img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0]) 258 | img_yuv[:, :, 0] = np.clip(intensity * img_yuv[:, :, 0], 0, 255) 259 | normalized = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) 260 | 261 | # Return the normalized image 262 | #stacked = np.hstack((image, normalized)) 263 | #cv2.imshow('normalised', stacked) 264 | #cv2.waitKey(0) 265 | 266 | return normalized 267 | 268 | def fft_blur(image, size=60): 269 | """ 270 | Adapted from: 271 | https://pyimagesearch.com/2020/06/15/opencv-fast-fourier-transform-fft-for-blur-detection-in-images-and-video-streams/ 272 | """ 273 | (h, w) = image.shape 274 | (cX, cY) = (int(w / 2.0), int(h / 2.0)) 275 | fft = np.fft.fft2(image) 276 | fftShift = np.fft.fftshift(fft) 277 | 278 | fftShift[cY - size:cY + size, cX - size:cX + size] = 0 279 | fftShift = np.fft.ifftshift(fftShift) 280 | recon = np.fft.ifft2(fftShift) 281 | 282 | magnitude = 20 * np.log(np.abs(recon)) 283 | mean = np.mean(magnitude) 284 | 285 | return mean 286 | 287 | def laplacian_blur(image): 288 | grey = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY) 289 | blurriness = cv2.Laplacian(grey, cv2.CV_64F).var() 290 | 291 | return blurriness 292 | 293 | 294 | def variance_of_gradient_blur(image): 295 | grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 296 | sobelx = cv2.Sobel(grey, cv2.CV_64F, 1, 0, ksize=3) 297 | sobely = cv2.Sobel(grey, cv2.CV_64F, 0, 1, ksize=3) 298 | gradient_magnitude = np.sqrt(np.square(sobelx) + np.square(sobely)) 299 | blurriness = np.var(gradient_magnitude) 300 | 301 | return blurriness 302 | 303 | 304 | def tenengrad_blur(image): 305 | grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 306 | sobelx = cv2.Sobel(grey, cv2.CV_64F, 1, 0, ksize=5) 307 | sobely = cv2.Sobel(grey, cv2.CV_64F, 0, 1, ksize=5) 308 | gradient_magnitude = np.sqrt(np.square(sobelx) + np.square(sobely)) 309 | blurriness = np.sum(np.square(gradient_magnitude)) / (grey.shape[0] * grey.shape[1]) 310 | 311 | return blurriness 312 | 313 | 314 | def entropy_blur(image): 315 | grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 316 | hist = cv2.calcHist([grey], [0], None, [256], [0, 256]) 317 | hist_norm = hist / (grey.shape[0] * grey.shape[1]) 318 | hist_norm = hist_norm[hist_norm != 0] 319 | blurriness = -np.sum(hist_norm * np.log2(hist_norm)) 320 | 321 | return blurriness 322 | 323 | 324 | def wavelet_blur(image): 325 | import pywt 326 | grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 327 | coeffs = pywt.dwt2(grey, 'haar') 328 | LL, (LH, HL, HH) = coeffs 329 | blurriness = np.sum(np.square(LL)) / (grey.shape[0] * grey.shape[1]) 330 | 331 | return blurriness 332 | 333 | 334 | def gradient_blur(image): 335 | grey = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) 336 | sobelx = cv2.Sobel(grey, cv2.CV_64F, 1, 0, ksize=3) 337 | sobely = cv2.Sobel(grey, cv2.CV_64F, 0, 1, ksize=3) 338 | gradient_magnitude = np.sqrt(np.square(sobelx) + np.square(sobely)) 339 | blurriness = np.sum(gradient_magnitude) / (grey.shape[0] * grey.shape[1]) 340 | 341 | return blurriness -------------------------------------------------------------------------------- /utils/config_manager.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from configparser import ConfigParser, Error as ConfigParserError 3 | from typing import Dict, Set, Tuple 4 | 5 | import logging 6 | import utils.error_manager as errors 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | class ConfigValidator: 11 | """Validates OWL configuration files""" 12 | 13 | REQUIRED_CONFIG = { 14 | 'System': { 15 | 'required_keys': {'algorithm', 'relay_num', 'actuation_duration', 'delay'}, 16 | 'optional_keys': {'input_file_or_directory'} 17 | }, 18 | 'Controller': { 19 | # Base requirements for all controller types 20 | 'required_keys': {'controller_type'}, 21 | 'optional_keys': { 22 | 'detection_mode_pin_up', 23 | 'detection_mode_pin_down', 24 | 'recording_pin', 25 | 'sensitivity_pin', 26 | 'low_sensitivity_config', 27 | 'high_sensitivity_config', 28 | 'switch_purpose', 29 | 'switch_pin' 30 | }, 31 | # Type-specific requirements 32 | 'type_specific': { 33 | 'none': { 34 | 'required_keys': set(), 35 | 'optional_keys': set() 36 | }, 37 | 'ute': { 38 | 'required_keys': {'switch_pin', 'switch_purpose'}, 39 | 'optional_keys': set() 40 | }, 41 | 'advanced': { 42 | 'required_keys': { 43 | 'detection_mode_pin_up', 44 | 'detection_mode_pin_down', 45 | 'recording_pin', 46 | 'sensitivity_pin', 47 | 'low_sensitivity_config', 48 | 'high_sensitivity_config' 49 | }, 50 | 'optional_keys': set() 51 | } 52 | } 53 | }, 54 | 'Camera': { 55 | 'required_keys': {'resolution_width', 'resolution_height'}, 56 | 'optional_keys': {'exp_compensation'} 57 | }, 58 | 'GreenOnBrown': { 59 | 'required_keys': { 60 | 'exg_min', 'exg_max', 'hue_min', 'hue_max', 61 | 'saturation_min', 'saturation_max', 'brightness_min', 'brightness_max', 62 | 'min_detection_area' 63 | }, 64 | 'optional_keys': {'invert_hue'} 65 | }, 66 | 'DataCollection': { 67 | 'required_keys': {'sample_images', 'sample_method', 'save_directory'}, 68 | 'optional_keys': {'sample_frequency', 'disable_detection', 'log_fps', 'camera_name'} 69 | }, 70 | 'Relays': { 71 | 'required_keys': {'0', '1', '2', '3'}, 72 | 'optional_keys': set() 73 | } 74 | } 75 | 76 | VALUE_VALIDATORS = { 77 | # 8-bit values (0-255) 78 | 'exg_min': ('int', 0, 255), 79 | 'exg_max': ('int', 0, 255), 80 | 'saturation_min': ('int', 0, 255), 81 | 'saturation_max': ('int', 0, 255), 82 | 'brightness_min': ('int', 0, 255), 83 | 'brightness_max': ('int', 0, 255), 84 | # Hue values (0-180) 85 | 'hue_min': ('int', 0, 180), 86 | 'hue_max': ('int', 0, 180), 87 | # Resolution 88 | 'resolution_width': ('int', 1, None), 89 | 'resolution_height': ('int', 1, None), 90 | # Camera settings 91 | 'exp_compensation': ('float', -10, 10), 92 | # Detection confidence 93 | 'confidence': ('float', 0, 1), 94 | # GPIO pins 95 | 'switch_pin': ('pin', 1, 40), 96 | 'detection_mode_pin_up': ('pin', 1, 40), 97 | 'detection_mode_pin_down': ('pin', 1, 40), 98 | 'recording_pin': ('pin', 1, 40), 99 | 'sensitivity_pin': ('pin', 1, 40), 100 | } 101 | 102 | VALID_ALGORITHMS = {'exg', 'exgr', 'maxg', 'nexg', 'exhsv', 'hsv', 'gndvi', 'gog'} 103 | VALID_CONTROLLER_TYPES = {'none', 'ute', 'advanced'} 104 | VALID_SWITCH_PURPOSES = {'recording', 'sensitivity'} 105 | 106 | # to check for valid ranges 107 | THRESHOLD_PAIRS = [ 108 | ('exg_min', 'exg_max'), 109 | ('hue_min', 'hue_max'), 110 | ('saturation_min', 'saturation_max'), 111 | ('brightness_min', 'brightness_max') 112 | ] 113 | 114 | @classmethod 115 | def validate_controller(cls, config: ConfigParser) -> Tuple[bool, Dict[str, Dict[str, str]]]: 116 | """Validate controller configuration.""" 117 | controller_errors: Dict[str, Dict[str, str]] = {} # Type hint for errors dictionary 118 | controller_type = config.get('Controller', 'controller_type', fallback='').lower() 119 | 120 | # Validate controller type 121 | if not controller_type: 122 | return False, {'Controller': {'controller_type': 'Controller type must be specified'}} 123 | 124 | if controller_type not in cls.VALID_CONTROLLER_TYPES: 125 | return False, {'Controller': { 126 | 'controller_type': f'Invalid controller type. Must be one of: {", ".join(sorted(cls.VALID_CONTROLLER_TYPES))}' 127 | }} 128 | 129 | # For UTE controller, validate switch_purpose 130 | if controller_type == 'ute' and config.has_option('Controller', 'switch_purpose'): 131 | switch_purpose = config.get('Controller', 'switch_purpose').lower() 132 | if switch_purpose not in cls.VALID_SWITCH_PURPOSES: 133 | if 'Controller' not in controller_errors: 134 | controller_errors['Controller'] = {} 135 | controller_errors['Controller'][ 136 | 'switch_purpose'] = f'Must be one of: {", ".join(sorted(cls.VALID_SWITCH_PURPOSES))}' 137 | 138 | # For advanced controller, validate config files exist 139 | if controller_type == 'advanced': 140 | for config_key in ['low_sensitivity_config', 'high_sensitivity_config']: 141 | if config.has_option('Controller', config_key): 142 | config_path = Path(config.get('Controller', config_key)) 143 | if not config_path.exists(): 144 | if 'Controller' not in controller_errors: 145 | controller_errors['Controller'] = {} 146 | controller_errors['Controller'][config_key] = f'Config file does not exist: {config_path}' 147 | 148 | return not bool(controller_errors), controller_errors 149 | 150 | @classmethod 151 | def get_controller_requirements(cls, controller_type: str) -> Tuple[set, set]: 152 | """Get combined base and type-specific requirements for a controller.""" 153 | base_required = cls.REQUIRED_CONFIG['Controller']['required_keys'] 154 | base_optional = cls.REQUIRED_CONFIG['Controller']['optional_keys'] 155 | 156 | type_config = cls.REQUIRED_CONFIG['Controller']['type_specific'].get( 157 | controller_type, 158 | {'required_keys': set(), 'optional_keys': set()} 159 | ) 160 | 161 | return ( 162 | base_required | type_config['required_keys'], 163 | base_optional | type_config['optional_keys'] 164 | ) 165 | 166 | @classmethod 167 | def validate_algorithm(cls, config: ConfigParser) -> Tuple[bool, Dict[str, Dict[str, str]]]: 168 | """Validate algorithm selection.""" 169 | algorithm = config.get('System', 'algorithm', fallback='').lower() 170 | if not algorithm: 171 | return False, {'System': {'algorithm': 'Algorithm must be specified'}} 172 | 173 | if algorithm not in cls.VALID_ALGORITHMS: 174 | return False, {'System': { 175 | 'algorithm': f'Invalid algorithm. Must be one of: {", ".join(sorted(cls.VALID_ALGORITHMS))}' 176 | }} 177 | 178 | return True, {} 179 | 180 | @classmethod 181 | def validate_thresholds(cls, config: ConfigParser) -> Tuple[bool, Dict[str, Dict[str, str]]]: 182 | """ 183 | Validate threshold relationships and detection ranges. 184 | Returns (is_valid, errors) 185 | """ 186 | ACCEPTABLE_RANGE = 5 187 | threshold_errors = {} 188 | section_errors = {} 189 | 190 | # Validate min < max for all threshold pairs 191 | for min_key, max_key in cls.THRESHOLD_PAIRS: 192 | try: 193 | min_val = config.getint('GreenOnBrown', min_key) 194 | max_val = config.getint('GreenOnBrown', max_key) 195 | 196 | if min_val >= max_val: 197 | section_errors[f"{min_key}_{max_key}"] = ( 198 | f"{min_key} ({min_val}) must be less than {max_key} ({max_val})" 199 | ) 200 | except (ValueError, ConfigParserError): 201 | # Skip if values aren't valid integers - this will be caught by value validation 202 | continue 203 | 204 | # Validate detection ranges overlap 205 | algorithm = config.get('System', 'algorithm', fallback='').lower() 206 | 207 | # For HSV-based algorithms, check HSV ranges make sense together 208 | if algorithm in {'hsv', 'exhsv'}: 209 | try: 210 | hue_range = range(config.getint('GreenOnBrown', 'hue_min'), 211 | config.getint('GreenOnBrown', 'hue_max')) 212 | sat_range = range(config.getint('GreenOnBrown', 'saturation_min'), 213 | config.getint('GreenOnBrown', 'saturation_max')) 214 | val_range = range(config.getint('GreenOnBrown', 'brightness_min'), 215 | config.getint('GreenOnBrown', 'brightness_max')) 216 | 217 | # Check if ranges are too restrictive 218 | if len(hue_range) < ACCEPTABLE_RANGE: 219 | section_errors['hue_range'] = 'Hue range is too narrow for reliable detection' 220 | if len(sat_range) < ACCEPTABLE_RANGE: 221 | section_errors['saturation_range'] = 'Saturation range is too narrow for reliable detection' 222 | if len(val_range) < ACCEPTABLE_RANGE: 223 | section_errors['brightness_range'] = 'Brightness range is too narrow for reliable detection' 224 | 225 | except (ValueError, ConfigParserError): 226 | # Skip if values aren't valid integers - this will be caught by value validation 227 | pass 228 | 229 | # For EXG-based algorithms, check EXG range 230 | if algorithm in {'exg', 'exgr', 'maxg', 'nexg', 'exhsv'}: 231 | try: 232 | exg_range = range(config.getint('GreenOnBrown', 'exg_min'), 233 | config.getint('GreenOnBrown', 'exg_max')) 234 | 235 | if len(exg_range) < ACCEPTABLE_RANGE: 236 | section_errors['exg_range'] = 'ExG range is too narrow for reliable detection' 237 | 238 | except (ValueError, ConfigParserError): 239 | pass 240 | 241 | if section_errors: 242 | threshold_errors['GreenOnBrown'] = section_errors 243 | 244 | return not bool(threshold_errors), threshold_errors 245 | 246 | @classmethod 247 | def validate_value(cls, key: str, value: str, used_pins: Set[int]) -> Tuple[bool, str]: 248 | """Validate a single config value.""" 249 | if key not in cls.VALUE_VALIDATORS: 250 | return True, "" 251 | 252 | val_type, min_val, max_val = cls.VALUE_VALIDATORS[key] 253 | 254 | try: 255 | if val_type == 'int': 256 | val = int(value) 257 | if min_val is not None and val < min_val: 258 | return False, f"Value must be >= {min_val}" 259 | if max_val is not None and val > max_val: 260 | return False, f"Value must be <= {max_val}" 261 | 262 | elif val_type == 'float': 263 | val = float(value) 264 | if min_val is not None and val < min_val: 265 | return False, f"Value must be >= {min_val}" 266 | if max_val is not None and val > max_val: 267 | return False, f"Value must be <= {max_val}" 268 | 269 | elif val_type == 'pin': 270 | val = int(value) 271 | if min_val is not None and val < min_val: 272 | return False, f"Pin must be >= {min_val}" 273 | if max_val is not None and val > max_val: 274 | return False, f"Pin must be <= {max_val}" 275 | if val in used_pins: 276 | return False, f"Pin {val} is already in use" 277 | used_pins.add(val) 278 | 279 | except ValueError: 280 | return False, f"Must be a valid {val_type}" 281 | 282 | return True, "" 283 | 284 | @classmethod 285 | def validate_relays(cls, config: ConfigParser) -> Tuple[bool, Dict[str, Dict[str, str]], list[str]]: 286 | """ 287 | Validate relay configuration between System.relay_num and Relays section. 288 | Returns: 289 | Tuple containing: 290 | - bool: whether validation passed 291 | - Dict[str, Dict[str, str]]: nested dictionary of section -> {key: error_message} 292 | - list[str]: list of warning messages 293 | """ 294 | try: 295 | relay_num = config.getint('System', 'relay_num') 296 | if relay_num < 0: 297 | return False, {'System': {'relay_num': 'Must be a non-negative integer'}}, [] 298 | except ValueError: 299 | return False, {'System': {'relay_num': 'Must be a valid integer'}}, [] 300 | 301 | # Get available relays (keys should be '0', '1', etc.) 302 | available_relays = set(config['Relays'].keys()) 303 | 304 | # Validate relay keys are proper integers 305 | try: 306 | for relay in available_relays: 307 | _ = int(relay) 308 | except ValueError: 309 | return False, {'Relays': {'format': 'Relay keys must be integers (0, 1, 2, etc.)'}}, [] 310 | 311 | configured_relays = {str(i) for i in range(relay_num)} 312 | 313 | # Check if requesting more relays than configured 314 | if relay_num > len(available_relays): 315 | return False, { 316 | 'System': { 317 | 'relay_num': f'Requests {relay_num} relays but only {len(available_relays)} are configured in [Relays] section' 318 | } 319 | }, [] 320 | 321 | # If requesting fewer relays than configured, generate warning about unused relays 322 | warnings = [] 323 | if relay_num < len(available_relays): 324 | unused_relays = available_relays - configured_relays 325 | warnings.append( 326 | f"Only using {relay_num} relays but {len(available_relays)} are configured. " 327 | f"Unused relays: {', '.join(sorted(unused_relays))}" 328 | ) 329 | 330 | # Validate that required relay numbers exist 331 | missing_relays = configured_relays - available_relays 332 | if missing_relays: 333 | return False, { 334 | 'Relays': { 335 | 'missing': f'Missing configurations for relays: {", ".join(sorted(missing_relays))}' 336 | } 337 | }, [] 338 | 339 | return True, {}, warnings 340 | 341 | @classmethod 342 | def load_and_validate_config(cls, config_path: Path) -> ConfigParser: 343 | """Load and validate configuration file.""" 344 | config = ConfigParser() 345 | used_pins = set() 346 | validation_errors = {} 347 | 348 | # File existence and parsing must still raise immediately 349 | # as we can't continue without a valid file 350 | if not config_path.exists(): 351 | raise errors.ConfigFileError(config_path, "File does not exist") 352 | 353 | try: 354 | files_read = config.read(config_path) 355 | if not files_read: 356 | raise errors.ConfigFileError(config_path, "File could not be read") 357 | except ConfigParserError as e: 358 | raise errors.ConfigFileError(config_path, f"Parse error: {str(e)}") 359 | 360 | # Create working copy of config requirements 361 | working_config = dict(cls.REQUIRED_CONFIG) 362 | 363 | # Validate controller specific rules 364 | is_valid, controller_errors = cls.validate_controller(config) 365 | if not is_valid: 366 | validation_errors.update(controller_errors) 367 | 368 | # Update controller requirements based on type 369 | controller_type = config.get('Controller', 'controller_type', fallback='').lower() 370 | required_keys, optional_keys = cls.get_controller_requirements(controller_type) 371 | working_config['Controller'] = { 372 | 'required_keys': required_keys, 373 | 'optional_keys': optional_keys 374 | } 375 | 376 | # Validate algorithm 377 | is_valid, algorithm_errors = cls.validate_algorithm(config) 378 | if not is_valid: 379 | validation_errors.update(algorithm_errors) 380 | 381 | # Threshold validation 382 | is_valid, threshold_errors = cls.validate_thresholds(config) 383 | if not is_valid: 384 | validation_errors.update(threshold_errors) 385 | 386 | # Check required sections 387 | missing_sections = set(working_config.keys()) - set(config.sections()) 388 | if missing_sections: 389 | validation_errors['missing_sections'] = { 390 | 'sections': f"Missing required sections: {', '.join(missing_sections)}" 391 | } 392 | 393 | # Validate sections and values 394 | for section in config.sections(): 395 | section_errors = {} 396 | for key, value in config[section].items(): 397 | is_valid, error_msg = cls.validate_value(key, value, used_pins) 398 | if not is_valid: 399 | section_errors[key] = value + f" - {error_msg}" 400 | if section_errors: 401 | validation_errors[section] = section_errors 402 | 403 | # Validate relay configuration 404 | is_valid, relay_errors, relay_warnings = cls.validate_relays(config) 405 | if not is_valid: 406 | validation_errors.update(relay_errors) 407 | 408 | # Log any relay warnings 409 | for warning in relay_warnings: 410 | logger.warning(warning) 411 | 412 | # Check required keys in each section 413 | for section, requirements in working_config.items(): 414 | if section not in config.sections(): 415 | continue # Skip if section is missing - we've already recorded this error 416 | 417 | config_keys = set(config[section].keys()) 418 | required_keys = {k.lower() for k in requirements['required_keys']} 419 | optional_keys = {k.lower() for k in requirements['optional_keys']} 420 | 421 | missing_keys = required_keys - config_keys 422 | if missing_keys: 423 | if section not in validation_errors: 424 | validation_errors[section] = {} 425 | validation_errors[section].update({ 426 | k: "Required key missing" for k in missing_keys 427 | }) 428 | 429 | unknown_keys = config_keys - (required_keys | optional_keys) 430 | if unknown_keys: 431 | logger.warning( 432 | f"Unknown keys in section [{section}]: {', '.join(unknown_keys)}" 433 | ) 434 | 435 | # Raise all validation errors at once 436 | if validation_errors: 437 | raise errors.ConfigValueError(validation_errors, config_path) 438 | 439 | logger.info(f"Successfully loaded and validated config: {config_path}") 440 | return config -------------------------------------------------------------------------------- /utils/directory_manager.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import utils.error_manager as errors 3 | import platform 4 | import time 5 | import os 6 | 7 | from utils.log_manager import LogManager 8 | 9 | 10 | class DirectorySetup: 11 | def __init__(self, save_directory): 12 | self.logger = LogManager.get_logger(__name__) 13 | self.save_directory = save_directory 14 | self.save_subdirectory = None 15 | 16 | def setup_directories(self, max_retries=5, retry_delay=2): 17 | for attempt in range(max_retries): 18 | try: 19 | return self._try_setup_directories() 20 | except (errors.USBMountError, errors.USBWriteError, errors.NoWritableUSBError) as e: 21 | self.logger.info(f"[INFO] Attempt {attempt + 1} failed: {str(e)}. Retrying in {retry_delay} seconds...") 22 | time.sleep(retry_delay) 23 | 24 | raise errors.NoWritableUSBError() 25 | 26 | def _try_setup_directories(self): 27 | self.save_subdirectory = os.path.join(self.save_directory, datetime.now().strftime('%Y%m%d')) 28 | if not os.path.ismount(self.save_directory): 29 | return self._handle_mount_error() 30 | 31 | os.makedirs(self.save_subdirectory, exist_ok=True) 32 | if not self.test_file_write(): 33 | raise errors.USBWriteError("Failed to write test file") 34 | 35 | self.logger.info(f"[SUCCESS] Directory setup complete: {self.save_subdirectory}") 36 | return self.save_directory, self.save_subdirectory 37 | 38 | def _handle_mount_error(self): 39 | """ 40 | Handle USB mount errors on Raspberry Pi systems. 41 | Searches /media directory for mounted, writable USB drives. 42 | """ 43 | if platform.system() != 'Linux': 44 | raise errors.StorageSystemError(platform=platform.system()) 45 | 46 | media_dir = '/media' 47 | try: 48 | mounted_drives = self._find_mounted_drives(media_dir) 49 | except OSError as e: 50 | raise errors.USBMountError(device=media_dir) from e 51 | 52 | for drive_path in mounted_drives: 53 | if self._try_setup_drive(drive_path): 54 | return self.save_directory, self.save_subdirectory 55 | 56 | raise errors.NoWritableUSBError(searched_paths=[media_dir]) 57 | 58 | def _find_mounted_drives(self, media_dir: str) -> list[str]: 59 | """Find all mounted drives in the media directory.""" 60 | mounted_drives = [] 61 | 62 | try: 63 | for username in os.listdir(media_dir): 64 | user_media_dir = os.path.join(media_dir, username) 65 | if not os.path.isdir(user_media_dir): 66 | continue 67 | 68 | for drive in os.listdir(user_media_dir): 69 | drive_path = os.path.join(user_media_dir, drive) 70 | if os.path.ismount(drive_path): 71 | mounted_drives.append(drive_path) 72 | except OSError as e: 73 | self.logger.error(f"Error accessing media directory: {e}", exc_info=True) 74 | 75 | return mounted_drives 76 | 77 | def _try_setup_drive(self, drive_path: str) -> bool: 78 | """ 79 | Try to setup a specific drive for writing. 80 | 81 | Returns: 82 | bool: True if drive is writable and setup successful 83 | """ 84 | self.save_directory = drive_path 85 | self.save_subdirectory = os.path.join( 86 | self.save_directory, 87 | datetime.now().strftime('%Y%m%d') 88 | ) 89 | 90 | try: 91 | os.makedirs(self.save_subdirectory, exist_ok=True) 92 | if self.test_file_write(): 93 | self.logger.info(f'Connected to {drive_path} and it is writable.') 94 | return True 95 | self.logger.error(f'{drive_path} is connected but not writable.') 96 | except PermissionError: 97 | self.logger.error(f'Failed to access {drive_path}', exc_info=True) 98 | 99 | return False 100 | 101 | def test_file_write(self): 102 | test_file_path = os.path.join(self.save_subdirectory, 'test_write.txt') 103 | try: 104 | with open(test_file_path, 'w') as f: 105 | f.write('Test write successful') 106 | os.remove(test_file_path) 107 | return True 108 | except Exception as e: 109 | self.logger.error(f"[ERROR] Failed to write test file: {e}", exc_info=True) 110 | return False -------------------------------------------------------------------------------- /utils/frame_reader.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | import cv2 4 | 5 | from imutils.video import FileVideoStream 6 | from utils.log_manager import LogManager 7 | 8 | class FrameReader: 9 | def __init__(self, path, resolution=(640, 480), loop_time=5): 10 | ''' 11 | FrameReader allows users to provide a directory of images, video or a single image to OWL for testing 12 | and visualisation purposes. 13 | :param path: path to the media (single image, directory of images or video) 14 | :param loop_time: the delay between image display if using a directory) 15 | ''' 16 | 17 | self.loop_time = loop_time 18 | self.loop_start_time = time.time() 19 | self.resolution = resolution 20 | self.curr_image = None 21 | self.files = None 22 | 23 | self.logger = LogManager.get_logger(__name__) 24 | 25 | if os.path.isdir(path): 26 | self.files = iter(os.listdir(path)) 27 | self.path = path 28 | self.cam = None 29 | self.input_type = "directory" 30 | self.single_image = False 31 | 32 | elif os.path.isfile(path): 33 | if path.endswith(('.png', '.jpg', '.jpeg')): 34 | self.cam = cv2.resize(cv2.imread(path), self.resolution, interpolation=cv2.INTER_AREA) 35 | self.input_type = "image" 36 | self.single_image = True 37 | 38 | else: 39 | self.cam = FileVideoStream(path).start() 40 | self.input_type = "video" 41 | self.single_image = False 42 | else: 43 | self.logger.error("Path must be a directory or a file", exc_info=True) 44 | raise ValueError(f'[ERROR] Invalid path to image/s: {path}') 45 | 46 | def read(self): 47 | if self.single_image: 48 | return self.cam 49 | 50 | elif self.files: 51 | if self.curr_image is None or (time.time() - self.loop_start_time) > self.loop_time: 52 | try: 53 | image = next(self.files) 54 | self.curr_image = cv2.imread(os.path.join(self.path, image)) 55 | self.curr_image = cv2.resize(self.curr_image, self.resolution, interpolation=cv2.INTER_AREA) 56 | 57 | self.loop_start_time = time.time() 58 | 59 | except StopIteration: 60 | self.files = iter(os.listdir(self.path)) # restart from first image 61 | return self.read() 62 | 63 | return self.curr_image 64 | 65 | else: 66 | frame = self.cam.read() 67 | frame = cv2.resize(frame, self.resolution, interpolation=cv2.INTER_AREA) 68 | 69 | return frame 70 | 71 | def reset(self): 72 | if self.input_type == "directory": 73 | # reset the iterator to the beginning of the directory 74 | self.files = iter(os.listdir(self.path)) 75 | self.curr_image = None 76 | 77 | elif self.input_type == "video": 78 | # stop the current video stream and start a new one 79 | self.cam.stop() 80 | self.cam = FileVideoStream(self.path).start() 81 | 82 | self.loop_start_time = time.time() # reset the loop timer 83 | 84 | def stop(self): 85 | if not self.single_image and self.cam: 86 | self.cam.stop() 87 | -------------------------------------------------------------------------------- /utils/greenonbrown.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from utils.algorithms import exg, exg_standardised, exg_standardised_hue, hsv, exgr, gndvi, maxg 3 | import numpy as np 4 | import cv2 5 | 6 | 7 | class GreenOnBrown: 8 | def __init__(self, algorithm='exg', label_file='models/labels.txt'): 9 | self.algorithm = algorithm 10 | self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) 11 | 12 | # Dictionary mapping algorithm names to functions 13 | self.algorithms = { 14 | 'exg': exg, 15 | 'exgr': exgr, 16 | 'maxg': maxg, 17 | 'nexg': exg_standardised, 18 | 'exhsv': exg_standardised_hue, 19 | 'hsv': hsv, 20 | 'gndvi': gndvi 21 | } 22 | 23 | def inference(self, image, 24 | exg_min=30, 25 | exg_max=250, 26 | hue_min=30, 27 | hue_max=90, 28 | brightness_min=5, 29 | brightness_max=200, 30 | saturation_min=30, 31 | saturation_max=255, 32 | min_detection_area=1, 33 | show_display=False, 34 | algorithm='exg', 35 | invert_hue=False, 36 | label='WEED'): 37 | threshed_already = False 38 | 39 | # Retrieve the function based on the algorithm name 40 | func = self.algorithms.get(algorithm, exg_standardised_hue) 41 | 42 | # Handle special cases for functions with additional parameters 43 | if algorithm == 'exhsv': 44 | output = func(image, hue_min=hue_min, hue_max=hue_max, brightness_min=brightness_min, 45 | brightness_max=brightness_max, saturation_min=saturation_min, 46 | saturation_max=saturation_max, invert_hue=invert_hue) 47 | elif algorithm == 'hsv': 48 | output, threshed_already = func(image, hue_min=hue_min, hue_max=hue_max, brightness_min=brightness_min, 49 | brightness_max=brightness_max, saturation_min=saturation_min, 50 | saturation_max=saturation_max, invert_hue=invert_hue) 51 | else: 52 | output = func(image) 53 | 54 | weed_centres = [] 55 | boxes = [] 56 | 57 | if not threshed_already: 58 | output = np.clip(output, exg_min, exg_max) 59 | output = np.uint8(np.abs(output)) 60 | if show_display: 61 | cv2.imshow("HSV Threshold on ExG", output) 62 | threshold_out = cv2.adaptiveThreshold(output, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 63 | 31, 2) 64 | # threshold_out = cv2.threshold(output, exg_min, exg_max, cv2.THRESH_BINARY) 65 | threshold_out = cv2.morphologyEx(threshold_out, cv2.MORPH_CLOSE, self.kernel, iterations=1) 66 | else: 67 | threshold_out = cv2.morphologyEx(output, cv2.MORPH_CLOSE, self.kernel, iterations=5) 68 | 69 | contours, _ = cv2.findContours(threshold_out, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) 70 | 71 | for c in contours: 72 | if cv2.contourArea(c) > min_detection_area: 73 | x, y, w, h = cv2.boundingRect(c) 74 | boxes.append([x, y, w, h]) 75 | weed_centres.append([x + w // 2, y + h // 2]) 76 | 77 | if show_display: 78 | image_out = image.copy() 79 | for box in boxes: 80 | startX, startY, boxW, boxH = box 81 | endX = startX + boxW 82 | endY = startY + boxH 83 | cv2.putText(image_out, label, (startX, startY + 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2) 84 | cv2.rectangle(image_out, (int(startX), int(startY)), (endX, endY), (0, 0, 255), 2) 85 | 86 | return contours, boxes, weed_centres, image_out 87 | 88 | return contours, boxes, weed_centres, image 89 | -------------------------------------------------------------------------------- /utils/greenongreen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from pycoral.adapters.common import input_size 3 | from pycoral.adapters.detect import get_objects 4 | from pycoral.utils.dataset import read_label_file 5 | from pycoral.utils.edgetpu import make_interpreter 6 | from pycoral.utils.edgetpu import run_inference 7 | from pathlib import Path 8 | 9 | import cv2 10 | 11 | 12 | class GreenOnGreen: 13 | def __init__(self, model_path='models', label_file='models/labels.txt'): 14 | if model_path is None: 15 | print('[WARNING] No model directory or path provided with --model-path flag. ' 16 | 'Attempting to load from default...') 17 | model_path = 'models' 18 | self.model_path = Path(model_path) 19 | 20 | if self.model_path.is_dir(): 21 | model_files = list(self.model_path.glob('*.tflite')) 22 | if not model_files: 23 | raise FileNotFoundError('No .tflite model files found. Please provide a directory or .tflite file.') 24 | 25 | else: 26 | self.model_path = model_files[0] 27 | print(f'[INFO] Using {self.model_path.stem} model...') 28 | 29 | elif self.model_path.suffix == '.tflite': 30 | print(f'[INFO] Using {self.model_path.stem} model...') 31 | 32 | else: 33 | print(f'[WARNING] Specified model path {model_path} is unsupported, attempting to use default...') 34 | 35 | model_files = Path('models').glob('*.tflite') 36 | try: 37 | self.model_path = next(model_files) 38 | print(f'[INFO] Using {self.model_path.stem} model...') 39 | 40 | except StopIteration: 41 | print('[ERROR] No model files found.') 42 | 43 | self.labels = read_label_file(label_file) 44 | self.interpreter = make_interpreter(self.model_path.as_posix()) 45 | self.interpreter.allocate_tensors() 46 | self.inference_size = input_size(self.interpreter) 47 | self.objects = None 48 | 49 | def inference(self, image, confidence=0.5, filter_id=0): 50 | cv2_im_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) 51 | cv2_im_rgb = cv2.resize(cv2_im_rgb, self.inference_size) 52 | run_inference(self.interpreter, cv2_im_rgb.tobytes()) 53 | self.objects = get_objects(self.interpreter, confidence) 54 | self.filter_id = filter_id 55 | 56 | height, width, channels = image.shape 57 | scale_x, scale_y = width / self.inference_size[0], height / self.inference_size[1] 58 | self.weed_centers = [] 59 | self.boxes = [] 60 | 61 | for det_object in self.objects: 62 | if det_object.id == self.filter_id: 63 | bbox = det_object.bbox.scale(scale_x, scale_y) 64 | 65 | startX, startY = int(bbox.xmin), int(bbox.ymin) 66 | endX, endY = int(bbox.xmax), int(bbox.ymax) 67 | boxW = endX - startX 68 | boxH = endY - startY 69 | 70 | # save the bounding box 71 | self.boxes.append([startX, startY, boxW, boxH]) 72 | # compute box center 73 | centerX = int(startX + (boxW / 2)) 74 | centerY = int(startY + (boxH / 2)) 75 | self.weed_centers.append([centerX, centerY]) 76 | 77 | percent = int(100 * det_object.score) 78 | label = f'{percent}% {self.labels.get(det_object.id, det_object.id)}' 79 | cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2) 80 | cv2.putText(image, label, (startX, startY + 30), 81 | cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), 2) 82 | else: 83 | pass 84 | # print(self.weedCenters) 85 | return None, self.boxes, self.weed_centers, image 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /utils/image_sampler.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | import numpy as np 4 | 5 | from datetime import datetime 6 | from multiprocessing import Process, Queue 7 | from multiprocessing.queues import Empty 8 | from utils.log_manager import LogManager 9 | 10 | 11 | class ImageRecorder: 12 | def __init__(self, save_directory, mode='whole', max_queue=200, new_process_threshold=90, max_processes=4): 13 | self.save_directory = save_directory 14 | self.mode = mode 15 | self.queue = Queue(maxsize=max_queue) 16 | self.new_process_threshold = new_process_threshold 17 | self.max_processes = max_processes 18 | self.processes = [] 19 | self.running = True 20 | self.logger = LogManager.get_logger(__name__) 21 | 22 | self.start_new_process() 23 | 24 | def start_new_process(self): 25 | if len(self.processes) < self.max_processes: 26 | p = Process(target=self.save_images) 27 | p.start() 28 | self.processes.append(p) 29 | self.logger.info(f"[INFO] Started new process, total processes: {len(self.processes)}") 30 | else: 31 | self.logger.warning("[INFO] Maximum number of processes reached.") 32 | 33 | def save_images(self): 34 | while self.running or not self.queue.empty(): 35 | try: 36 | frame, frame_id, boxes, centres = self.queue.get(timeout=3) 37 | 38 | except Empty: 39 | if not self.running: 40 | break 41 | continue 42 | 43 | except KeyboardInterrupt: 44 | self.logger.info("[INFO] KeyboardInterrupt received in save_images. Exiting.") 45 | break 46 | 47 | # Process and save images based on mode 48 | self.process_frame(frame, frame_id, boxes, centres) 49 | 50 | def process_frame(self, frame, frame_id, boxes, centres): 51 | timestamp = datetime.utcnow().strftime('%Y-%m-%dT%H%M%S.%f')[:-3] + 'Z' 52 | if self.mode == 'whole': 53 | self.save_frame(frame, frame_id, timestamp) 54 | elif self.mode == 'bbox': 55 | self.save_bboxes(frame, frame_id, boxes, timestamp) 56 | elif self.mode == 'square': 57 | self.save_squares(frame, frame_id, centres, timestamp) 58 | 59 | def save_frame(self, frame, frame_id, timestamp): 60 | filename = f"{timestamp}_frame_{frame_id}.png" 61 | filepath = os.path.join(self.save_directory, filename) 62 | cv2.imwrite(filepath, frame) 63 | 64 | def save_bboxes(self, frame, frame_id, boxes, timestamp): 65 | for contour_id, box in enumerate(boxes): 66 | startX, startY, width, height = box 67 | cropped_image = frame[startY:startY+height, startX:startX+width] 68 | filename = f"{timestamp}_frame_{frame_id}_n_{str(contour_id)}.png" 69 | filepath = os.path.join(self.save_directory, filename) 70 | cv2.imwrite(filepath, cropped_image) 71 | 72 | def save_squares(self, frame, frame_id, centres, timestamp): 73 | side_length = min(200, frame.shape[0]) 74 | halfLength = side_length // 2 75 | for contour_id, centre in enumerate(centres): 76 | startX = max(centre[0] - np.random.randint(10, halfLength), 0) 77 | startY = max(centre[1] - np.random.randint(10, halfLength), 0) 78 | endX = startX + side_length 79 | endY = startY + side_length 80 | if endX > frame.shape[1]: 81 | startX = frame.shape[1] - side_length 82 | if endY > frame.shape[0]: 83 | startY = frame.shape[0] - side_length 84 | square_image = frame[startY:endY, startX:endX] 85 | filename = f"{timestamp}_frame_{frame_id}_n_{str(contour_id)}.png" 86 | filepath = os.path.join(self.save_directory, filename) 87 | cv2.imwrite(filepath, square_image) 88 | 89 | def add_frame(self, frame, frame_id, boxes, centres): 90 | if not self.queue.full(): 91 | self.queue.put((frame, frame_id, boxes, centres)) 92 | else: 93 | self.logger.info("[INFO] Queue is full, spinning up new process. Frame skipped.") 94 | 95 | if self.queue.qsize() > self.new_process_threshold and len(self.processes) < self.max_processes: 96 | self.start_new_process() 97 | 98 | def stop(self): 99 | """Stop image recording processes and clean up resources.""" 100 | self.running = False 101 | 102 | try: 103 | while not self.queue.empty(): 104 | self.queue.get_nowait() 105 | except Exception as e: 106 | self.logger.warning(f"Failed to clear queue: {e}") 107 | 108 | self.queue.close() 109 | self.queue.join_thread() 110 | 111 | for p in self.processes: 112 | try: 113 | p.join(timeout=1) 114 | if p.is_alive(): 115 | p.terminate() 116 | p.join(timeout=0.5) 117 | except Exception as e: 118 | self.logger.error(f"Failed to stop process: {e}") 119 | 120 | self.processes.clear() 121 | self.logger.info("[INFO] ImageRecorder stopped.") 122 | 123 | def terminate(self): 124 | """Force terminate all image recording processes.""" 125 | self.running = False 126 | for p in self.processes: 127 | if p.is_alive(): 128 | try: 129 | p.terminate() 130 | p.join(timeout=0.5) 131 | except Exception as e: 132 | self.logger.error(f"Failed to terminate process: {e}") 133 | 134 | self.processes.clear() 135 | self.queue.close() 136 | self.queue.join_thread() 137 | self.logger.info("[INFO] All recording processes terminated forcefully.") -------------------------------------------------------------------------------- /utils/input_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | import platform 3 | import configparser 4 | import subprocess 5 | import cv2 6 | import logging 7 | 8 | logger = logging.getLogger(__name__) 9 | 10 | def is_raspberry_pi() -> bool: 11 | """Check if system is running on Raspberry Pi""" 12 | platform_str = platform.platform().lower() 13 | return 'rpi' in platform_str or 'aarch' in platform_str 14 | 15 | # Determine if we're in testing mode and import GPIO if needed 16 | testing = not is_raspberry_pi() 17 | if not testing: 18 | from gpiozero import Button, LED 19 | else: 20 | platform_name = platform.system() if platform.system() == "Windows" else "unrecognized" 21 | logger.warning( 22 | f"The system is running on a {platform_name} platform. GPIO disabled. Test mode active.") 23 | 24 | class UteController: 25 | def __init__(self, detection_state, 26 | sample_state, 27 | stop_flag, 28 | owl_instance, 29 | status_indicator, 30 | switch_purpose='recording', 31 | switch_board_pin='BOARD37', 32 | bounce_time=1.0): 33 | 34 | self.switch = Button(switch_board_pin, bounce_time=bounce_time) 35 | self.switch_purpose = switch_purpose 36 | 37 | self.detection_state = detection_state 38 | self.sample_state = sample_state 39 | 40 | self.owl = owl_instance 41 | self.status_indicator = status_indicator 42 | self.status_indicator.start_storage_indicator() 43 | 44 | self.stop_flag = stop_flag 45 | 46 | # Set up a single handler for both press and release 47 | self.switch.when_pressed = self.toggle_state 48 | self.switch.when_released = self.toggle_state 49 | 50 | # Initialize state based on initial switch position 51 | self.update_state() 52 | 53 | def update_state(self): 54 | is_active = self.switch.is_pressed 55 | 56 | if self.switch_purpose == 'detection': 57 | with self.detection_state.get_lock(): 58 | self.detection_state.value = is_active 59 | self.owl.disable_detection = not is_active 60 | if is_active: 61 | self.status_indicator.enable_weed_detection() 62 | else: 63 | self.status_indicator.disable_weed_detection() 64 | 65 | elif self.switch_purpose == 'recording': 66 | with self.sample_state.get_lock(): 67 | self.sample_state.value = is_active 68 | self.owl.sample_images = is_active 69 | if is_active: 70 | self.status_indicator.enable_image_recording() 71 | else: 72 | self.status_indicator.disable_image_recording() 73 | 74 | def toggle_state(self): 75 | self.update_state() 76 | 77 | def weed_detect_indicator(self): 78 | self.status_indicator.weed_detect_indicator() 79 | 80 | def image_write_indicator(self): 81 | self.status_indicator.image_write_indicator() 82 | 83 | def run(self): 84 | try: 85 | while not self.stop_flag.value: 86 | time.sleep(0.1) # sleep to reduce CPU usage 87 | except KeyboardInterrupt: 88 | logger.info("[INFO] KeyboardInterrupt received in controller run loop. Exiting.") 89 | self.stop() # Ensure the stop flag is set 90 | except Exception as e: 91 | logger.error(f"Error in controller run loop: {e}", exc_info=True) 92 | 93 | def stop(self): 94 | with self.stop_flag.get_lock(): 95 | self.stop_flag.value = True 96 | 97 | 98 | class AdvancedController: 99 | def __init__(self, recording_state, 100 | sensitivity_state, 101 | detection_mode_state, 102 | stop_flag, 103 | owl_instance, 104 | status_indicator, 105 | low_sensitivity_config, 106 | high_sensitivity_config, 107 | detection_mode_bpin_down='BOARD35', 108 | detection_mode_bpin_up='BOARD36', 109 | recording_bpin='BOARD38', 110 | sensitivity_bpin='BOARD40', 111 | bounce_time=1.0): 112 | 113 | self.recording_switch = Button(recording_bpin, bounce_time=bounce_time) 114 | self.sensitivity_switch = Button(sensitivity_bpin, bounce_time=bounce_time) 115 | self.detection_mode_switch_up = Button(detection_mode_bpin_up, bounce_time=bounce_time) 116 | self.detection_mode_switch_down = Button(detection_mode_bpin_down, bounce_time=bounce_time) 117 | 118 | self.recording_state = recording_state 119 | self.sensitivity_state = sensitivity_state 120 | self.detection_mode_state = detection_mode_state 121 | 122 | self.stop_flag = stop_flag 123 | 124 | # set up instances for owl and status 125 | self.owl = owl_instance 126 | self.status_indicator = status_indicator 127 | self.status_indicator.start_storage_indicator() 128 | 129 | self.low_sensitivity_settings = self._read_config(low_sensitivity_config) 130 | self.high_sensitivity_settings = self._read_config(high_sensitivity_config) 131 | 132 | # Set up switch handlers 133 | self.recording_switch.when_pressed = self.update_recording_state 134 | self.recording_switch.when_released = self.update_recording_state 135 | self.sensitivity_switch.when_pressed = self.update_sensitivity_state 136 | self.sensitivity_switch.when_released = self.update_sensitivity_state 137 | self.detection_mode_switch_up.when_pressed = lambda: self.set_detection_mode(2) # All solenoids on 138 | self.detection_mode_switch_up.when_released = lambda: self.set_detection_mode(1) # Off 139 | self.detection_mode_switch_down.when_pressed = lambda: self.set_detection_mode(0) # Detection on 140 | self.detection_mode_switch_down.when_released = lambda: self.set_detection_mode(1) # Off 141 | 142 | # Initialize states based on initial switch positions 143 | self.update_state() 144 | 145 | def update_state(self): 146 | try: 147 | self.update_recording_state() 148 | self.update_sensitivity_state() 149 | self.update_detection_mode_state() 150 | except KeyboardInterrupt: 151 | logger.info("[INFO] KeyboardInterrupt received in update_state. Exiting.") 152 | raise # Propagate to hoot() 153 | except Exception as e: 154 | logger.error(f"Error in update_state: {e}", exc_info=True) 155 | 156 | def update_recording_state(self): 157 | self.status_indicator.generic_notification() 158 | with self.recording_state.get_lock(): 159 | self.recording_state.value = self.recording_switch.is_pressed 160 | if self.recording_state.value: 161 | self.status_indicator.enable_image_recording() 162 | self.owl.sample_images = True 163 | else: 164 | self.status_indicator.disable_image_recording() 165 | self.owl.sample_images = False 166 | 167 | def update_sensitivity_state(self): 168 | with self.sensitivity_state.get_lock(): 169 | self.sensitivity_state.value = self.sensitivity_switch.is_pressed 170 | self.update_sensitivity_settings() 171 | 172 | def update_sensitivity_settings(self): 173 | self.status_indicator.generic_notification() 174 | settings = self.low_sensitivity_settings if self.sensitivity_state.value else self.high_sensitivity_settings 175 | 176 | # Update Owl instance settings 177 | self.owl.exg_min = settings['exg_min'] 178 | self.owl.exg_max = settings['exg_max'] 179 | self.owl.hue_min = settings['hue_min'] 180 | self.owl.hue_max = settings['hue_max'] 181 | self.owl.saturation_min = settings['saturation_min'] 182 | self.owl.saturation_max = settings['saturation_max'] 183 | self.owl.brightness_min = settings['brightness_min'] 184 | self.owl.brightness_max = settings['brightness_max'] 185 | 186 | # Update trackbars if show_display is True 187 | if self.owl.show_display: 188 | cv2.setTrackbarPos("ExG-Min", self.owl.window_name, self.owl.exg_min) 189 | cv2.setTrackbarPos("ExG-Max", self.owl.window_name, self.owl.exg_max) 190 | cv2.setTrackbarPos("Hue-Min", self.owl.window_name, self.owl.hue_min) 191 | cv2.setTrackbarPos("Hue-Max", self.owl.window_name, self.owl.hue_max) 192 | cv2.setTrackbarPos("Sat-Min", self.owl.window_name, self.owl.saturation_min) 193 | cv2.setTrackbarPos("Sat-Max", self.owl.window_name, self.owl.saturation_max) 194 | cv2.setTrackbarPos("Bright-Min", self.owl.window_name, self.owl.brightness_min) 195 | cv2.setTrackbarPos("Bright-Max", self.owl.window_name, self.owl.brightness_max) 196 | 197 | def set_detection_mode(self, mode): 198 | try: 199 | with self.detection_mode_state.get_lock(): 200 | self.detection_mode_state.value = mode 201 | 202 | self.status_indicator.generic_notification() 203 | 204 | if mode == 0: # Detection on 205 | self.status_indicator.enable_weed_detection() 206 | self.owl.disable_detection = False 207 | elif mode == 2: # All solenoids on 208 | self.status_indicator.disable_weed_detection() 209 | self.owl.relay_controller.relay.all_on() 210 | self.owl.disable_detection = True 211 | else: # Off or any unexpected value 212 | self.status_indicator.disable_weed_detection() 213 | self.owl.relay_controller.relay.all_off() 214 | self.owl.disable_detection = True 215 | except KeyboardInterrupt: 216 | logger.info("[INFO] KeyboardInterrupt received in set_detection_mode. Exiting.") 217 | raise 218 | except Exception as e: 219 | logger.error(f"Error in set_detection_mode: {e}", exc_info=True) 220 | 221 | def update_detection_mode_state(self): 222 | if self.detection_mode_switch_up.is_pressed: 223 | self.set_detection_mode(2) # All solenoids on 224 | elif self.detection_mode_switch_down.is_pressed: 225 | self.set_detection_mode(0) # Detection on 226 | else: 227 | self.set_detection_mode(1) # Off 228 | 229 | def weed_detect_indicator(self): 230 | self.status_indicator.weed_detect_indicator() 231 | 232 | def image_write_indicator(self): 233 | self.status_indicator.image_write_indicator() 234 | 235 | def run(self): 236 | try: 237 | while not self.stop_flag.value: 238 | time.sleep(0.1) # sleep to reduce CPU usage 239 | except KeyboardInterrupt: 240 | logger.info("[INFO] KeyboardInterrupt received in controller run loop. Exiting.") 241 | self.stop() # Ensure the stop flag is set 242 | except Exception as e: 243 | logger.error(f"Error in controller run loop: {e}", exc_info=True) 244 | 245 | def stop(self): 246 | with self.stop_flag.get_lock(): 247 | self.stop_flag.value = True 248 | 249 | def _read_config(self, config_file): 250 | config = configparser.ConfigParser() 251 | config.read(config_file) 252 | return { 253 | 'exg_min': config.getint('GreenOnBrown', 'exg_min'), 254 | 'exg_max': config.getint('GreenOnBrown', 'exg_max'), 255 | 'hue_min': config.getint('GreenOnBrown', 'hue_min'), 256 | 'hue_max': config.getint('GreenOnBrown', 'hue_max'), 257 | 'saturation_min': config.getint('GreenOnBrown', 'saturation_min'), 258 | 'saturation_max': config.getint('GreenOnBrown', 'saturation_max'), 259 | 'brightness_min': config.getint('GreenOnBrown', 'brightness_min'), 260 | 'brightness_max': config.getint('GreenOnBrown', 'brightness_max') 261 | } 262 | 263 | def get_rpi_version(): 264 | try: 265 | cmd = ["cat", "/proc/device-tree/model"] 266 | model = subprocess.check_output(cmd).decode('utf-8').rstrip('\x00').strip() 267 | 268 | if 'Pi 5' in model: 269 | return 'rpi-5' 270 | elif 'Pi 4' in model: 271 | return 'rpi-4' 272 | elif 'Pi 3' in model: 273 | return 'rpi-3' 274 | else: 275 | return 'non-rpi' 276 | 277 | except FileNotFoundError: 278 | return 'non-rpi' 279 | except subprocess.CalledProcessError: 280 | 281 | raise ValueError("Error reading Raspberry Pi version.") 282 | 283 | 284 | -------------------------------------------------------------------------------- /utils/log_manager.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | import queue 4 | import sys 5 | 6 | from pathlib import Path 7 | from queue import Queue 8 | from threading import Thread, Event 9 | from typing import Dict, Any 10 | from time import time 11 | from logging.handlers import RotatingFileHandler 12 | 13 | 14 | class JSONFormatter(logging.Formatter): 15 | """Formats log records as JSON strings""" 16 | 17 | def format(self, record: logging.LogRecord) -> str: 18 | message = { 19 | 'timestamp': self.formatTime(record, self.datefmt), 20 | 'level': record.levelname, 21 | 'logger': record.name, 22 | 'message': record.getMessage(), 23 | 'module': record.module, 24 | 'function': record.funcName 25 | } 26 | 27 | # Add any extra context passed with the log 28 | if hasattr(record, 'detection_data'): 29 | message['detection_data'] = record.detection_data 30 | 31 | return json.dumps(message) 32 | 33 | 34 | class ConsoleFormatter(logging.Formatter): 35 | """Human-readable formatter for console output""" 36 | 37 | def format(self, record: logging.LogRecord) -> str: 38 | return f"{self.formatTime(record, self.datefmt)} - {record.levelname} - [{record.name}] - {record.getMessage()}" 39 | 40 | 41 | class LogManager: 42 | """Centralized logging management for OWL""" 43 | _instance = None 44 | _initialized = False 45 | 46 | BACKUP_COUNT = 100 47 | MAX_BYTES = 10 * 1024 * 1024 # 10MB per file 48 | 49 | def __new__(cls): 50 | if cls._instance is None: 51 | cls._instance = super().__new__(cls) 52 | return cls._instance 53 | 54 | def __init__(self): 55 | if self._initialized: 56 | return 57 | 58 | self._initialized = True 59 | self.detection_queue = Queue(maxsize=1000) 60 | self.stop_event = Event() 61 | self.batch_size = 100 62 | self.flush_interval = 1.0 # seconds 63 | self.last_flush = time() 64 | 65 | # Define instance-wide loggers 66 | self.logger = logging.getLogger("LogManager") 67 | self.detection_logger = logging.getLogger("detection") 68 | 69 | # Start the detection processing thread 70 | self.worker = Thread(target=self._process_detection_queue, daemon=True) 71 | self.worker.start() 72 | 73 | @classmethod 74 | def setup(cls, log_dir: Path, log_level: str = 'INFO') -> None: 75 | """Initialize the logging system""" 76 | instance = cls() 77 | 78 | log_dir.mkdir(exist_ok=True) 79 | 80 | root_logger = logging.getLogger() 81 | root_logger.setLevel(log_level) 82 | root_logger.handlers = [] 83 | 84 | console_handler = logging.StreamHandler(sys.stdout) 85 | console_handler.setFormatter(ConsoleFormatter( 86 | fmt='%(asctime)s - %(levelname)s - [%(name)s] - %(message)s', 87 | datefmt='%Y-%m-%d %H:%M:%S')) 88 | root_logger.addHandler(console_handler) 89 | 90 | main_handler = RotatingFileHandler( 91 | filename=log_dir / 'owl.jsonl', 92 | maxBytes=cls.MAX_BYTES, # 10MB 93 | backupCount=cls.BACKUP_COUNT) 94 | 95 | main_handler.setFormatter(JSONFormatter()) 96 | root_logger.addHandler(main_handler) 97 | 98 | detection_handler = RotatingFileHandler( 99 | filename=log_dir / 'detections.jsonl', 100 | maxBytes=cls.MAX_BYTES, # 10MB 101 | backupCount=cls.BACKUP_COUNT) 102 | 103 | detection_handler.setFormatter(JSONFormatter()) 104 | 105 | # Configure detection logger 106 | detection_logger = logging.getLogger('detection') 107 | detection_logger.handlers = [detection_handler] 108 | detection_logger.propagate = False # Don't propagate to root logger 109 | 110 | # Update the instance-level loggers 111 | instance.logger = root_logger 112 | instance.detection_logger = detection_logger 113 | 114 | @classmethod 115 | def get_logger(cls, name: str) -> logging.Logger: 116 | """Get a logger instance for a module""" 117 | return logging.getLogger(name) 118 | 119 | def log_detection(self, frame_id: int, detections: Dict[str, Any]) -> None: 120 | """Queue a detection event for logging""" 121 | self.detection_queue.put({ 122 | 'timestamp': time(), 123 | 'frame_id': frame_id, 124 | 'detections': detections 125 | }) 126 | 127 | def _process_detection_queue(self) -> None: 128 | """Background worker to process detection events""" 129 | batch = [] 130 | 131 | while not self.stop_event.is_set(): 132 | try: 133 | event = self.detection_queue.get(timeout=0.1) 134 | batch.append(event) 135 | 136 | # Flush if batch is full or interval exceeded 137 | if len(batch) >= self.batch_size or time() - self.last_flush >= self.flush_interval: 138 | self._flush_detection_batch(batch) 139 | batch.clear() 140 | self.last_flush = time() 141 | 142 | except queue.Empty: 143 | pass # No event, continue loop 144 | 145 | except Exception as e: 146 | self.logger.error(f"Error in processing detection queue: {e}", exc_info=True) 147 | 148 | if batch: 149 | self._flush_detection_batch(batch) 150 | 151 | def _flush_detection_batch(self, batch: list) -> None: 152 | """Write batch of detection events to log""" 153 | if batch: 154 | self.detection_logger.info( 155 | f"Processed batch of {len(batch)} detections", 156 | extra={'detection_data': batch} 157 | ) 158 | 159 | def stop(self) -> None: 160 | """Stop the background worker""" 161 | self.stop_event.set() 162 | self.worker.join() 163 | -------------------------------------------------------------------------------- /utils/output_manager.py: -------------------------------------------------------------------------------- 1 | from threading import Thread, Event, Condition, Lock 2 | from utils.vis_manager import RelayVis 3 | from utils.error_manager import OWLAlreadyRunningError 4 | from utils.log_manager import LogManager 5 | from enum import Enum 6 | from collections import deque 7 | from typing import Optional 8 | 9 | import subprocess 10 | import shutil 11 | import time 12 | import logging 13 | import platform 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | def get_platform_config() -> tuple[bool, Optional[Exception]]: 18 | """Determine platform and return testing status and lgpio error type""" 19 | system_platform = platform.platform().lower() 20 | is_raspberry_pi = 'rpi' in system_platform or 'aarch' in system_platform 21 | 22 | if is_raspberry_pi: 23 | from gpiozero import Buzzer, OutputDevice, LED 24 | import lgpio 25 | return False, lgpio.error 26 | 27 | is_windows = platform.system() == "Windows" 28 | system_name = "Windows" if is_windows else "unrecognized" 29 | logger.warning( 30 | f"The system is running on a {system_name} platform. GPIO disabled. Test mode active." 31 | ) 32 | return True, None 33 | 34 | testing, lgpioERROR = get_platform_config() 35 | 36 | # Import GPIO components only if needed 37 | if not testing: 38 | from gpiozero import Buzzer, OutputDevice, LED 39 | 40 | # two test classes to run the analysis on a desktop computer if a "win32" platform is detected 41 | class TestRelay: 42 | def __init__(self, relay_number, verbose=False): 43 | self.relay_number = relay_number 44 | self.verbose = verbose 45 | 46 | def on(self): 47 | if self.verbose: 48 | print(f"[TEST] Relay {self.relay_number} ON") 49 | 50 | def off(self): 51 | if self.verbose: 52 | print(f"[TEST] Relay {self.relay_number} OFF") 53 | 54 | class TestBuzzer: 55 | def beep(self, on_time, off_time, n=1, verbose=False): 56 | for i in range(n): 57 | if verbose: 58 | print('BEEP') 59 | 60 | class TestLED: 61 | def __init__(self, pin): 62 | self.pin = pin 63 | 64 | def blink(self, on_time=0.1, off_time=0.1, n=1, verbose=False, background=True): 65 | if n is None: 66 | n = 1 67 | 68 | for i in range(n): 69 | if verbose: 70 | print(f'BLINK {self.pin}') 71 | 72 | def on(self): 73 | print(f'LED {self.pin} ON') 74 | 75 | def off(self): 76 | print(f'LED {self.pin} OFF') 77 | 78 | 79 | class BaseStatusIndicator: 80 | def __init__(self, save_directory, no_save=False): 81 | self.logger = LogManager.get_logger(__name__) 82 | 83 | self.save_directory = save_directory 84 | self.no_save = no_save 85 | self.testing = True if testing else False 86 | self.storage_used = None 87 | self.storage_total = None 88 | self.update_event = Event() 89 | self.running = True 90 | self.thread = None 91 | self.DRIVE_FULL = False 92 | 93 | self.error_code = None 94 | self.flashing_thread = None 95 | self._set_led_trigger("ACT", "none") 96 | self._set_led_trigger("PWR", "none") 97 | 98 | def start_storage_indicator(self): 99 | self.thread = Thread(target=self.run_update) 100 | self.thread.start() 101 | 102 | def run_update(self): 103 | while self.running: 104 | self.update() 105 | self.update_event.wait(10.5) 106 | self.update_event.clear() 107 | 108 | def update(self): 109 | if self.save_directory is not None: 110 | self.storage_total, self.storage_used, _ = shutil.disk_usage(self.save_directory) 111 | percent_full = (self.storage_used / self.storage_total) 112 | self._update_storage_indicator(percent_full) 113 | 114 | elif self.no_save: 115 | pass 116 | 117 | else: 118 | self.error(6) 119 | 120 | def error(self, error_code): 121 | self.error_code = error_code 122 | if self.flashing_thread is None or not self.flashing_thread.is_alive(): 123 | self.flashing_thread = Thread(target=self._flash_error_code) 124 | self.flashing_thread.start() 125 | 126 | def _flash_error_code(self): 127 | while self.running: 128 | for _ in range(self.error_code): 129 | self._blink_leds() 130 | time.sleep(0.2) # Interval between flashes 131 | time.sleep(2) # Pause after each sequence 132 | 133 | def _blink_leds(self): 134 | self._set_led_state("ACT", 1) 135 | self._set_led_state("PWR", 1) 136 | time.sleep(0.2) 137 | self._set_led_state("ACT", 0) 138 | self._set_led_state("PWR", 0) 139 | 140 | def _set_led_state(self, led, state): 141 | if not self.testing: 142 | LED_PATHS = { 143 | "ACT": "/sys/class/leds/ACT/brightness", 144 | "PWR": "/sys/class/leds/PWR/brightness" 145 | } 146 | try: 147 | subprocess.run( 148 | ['sudo', 'sh', '-c', f'echo {1 if state else 0} > {LED_PATHS[led]}'], 149 | check=True 150 | ) 151 | except subprocess.CalledProcessError as e: 152 | self.logger.error(msg=f"Error: Could not set {led} LED. {e}", exc_info=True) 153 | 154 | # Method to set LED trigger to 'none' to ensure manual control. 155 | # Based on: https://howtoraspberrypi.com/controler-led-verte-raspberry-pi-2/ 156 | def _set_led_trigger(self, led, trigger): 157 | if not self.testing: 158 | LED_TRIGGER_PATHS = { 159 | "ACT": "/sys/class/leds/ACT/trigger", 160 | "PWR": "/sys/class/leds/PWR/trigger" 161 | } 162 | try: 163 | subprocess.run( 164 | ['sudo', 'sh', '-c', f'echo {trigger} > {LED_TRIGGER_PATHS[led]}'], 165 | check=True 166 | ) 167 | except subprocess.CalledProcessError as e: 168 | self.logger.error(f"Error: Could not set {led} trigger to {trigger}.", exc_info=True) 169 | 170 | def _update_storage_indicator(self, percent_full): 171 | self.logger.warning("Called _update_storage_indicator() but it's not implemented.") 172 | raise NotImplementedError("This method should be implemented by subclasses") 173 | 174 | def stop(self): 175 | """Stop all threads and ensure resources are cleaned up.""" 176 | self.running = False 177 | self.update_event.set() # Wake up storage indicator thread 178 | 179 | if self.thread and self.thread.is_alive(): 180 | self.thread.join(timeout=1) # Ensure thread stops 181 | 182 | if self.flashing_thread and self.flashing_thread.is_alive(): 183 | self.flashing_thread.join(timeout=1) # Ensure flashing thread stops 184 | 185 | self._cleanup_leds() 186 | logger.info("[INFO] StatusIndicator stopped.") 187 | 188 | def _cleanup_leds(self): 189 | """Turn off LEDs and reset their states.""" 190 | try: 191 | self._set_led_state("ACT", 0) 192 | self._set_led_state("PWR", 0) 193 | except Exception as e: 194 | logger.error(f"Failed to clean up LEDs: {e}") 195 | 196 | 197 | class HeadlessStatusIndicator(BaseStatusIndicator): 198 | def __init__(self, save_directory=None, no_save=False): 199 | super().__init__(save_directory, no_save) 200 | 201 | def _update_storage_indicator(self, percent_full): 202 | if percent_full >= 0.90: 203 | self.DRIVE_FULL = True 204 | 205 | 206 | class UteStatusIndicator(BaseStatusIndicator): 207 | def __init__(self, save_directory, record_led_pin='BOARD38', storage_led_pin='BOARD40'): 208 | super().__init__(save_directory) 209 | LED_class = LED if not testing else TestLED 210 | self.record_LED = LED_class(pin=record_led_pin) 211 | self.storage_LED = LED_class(pin=storage_led_pin) 212 | 213 | def _update_storage_indicator(self, percent_full): 214 | if percent_full >= 0.90: 215 | self.DRIVE_FULL = True 216 | self.storage_LED.on() 217 | self.record_LED.off() 218 | elif percent_full >= 0.85: 219 | self.storage_LED.blink(on_time=0.2, off_time=0.2, n=None, background=True) 220 | elif percent_full >= 0.80: 221 | self.storage_LED.blink(on_time=0.5, off_time=0.5, n=None, background=True) 222 | elif percent_full >= 0.75: 223 | self.storage_LED.blink(on_time=0.5, off_time=1.5, n=None, background=True) 224 | elif percent_full >= 0.5: 225 | self.storage_LED.blink(on_time=0.5, off_time=3.0, n=None, background=True) 226 | else: 227 | self.storage_LED.blink(on_time=0.5, off_time=4.5, n=None, background=True) 228 | 229 | def setup_success(self): 230 | self.storage_LED.blink(on_time=0.1, off_time=0.2, n=3) 231 | self.record_LED.blink(on_time=0.1, off_time=0.2, n=3) 232 | 233 | def image_write_indicator(self): 234 | self.record_LED.blink(on_time=0.1, n=1, background=True) 235 | 236 | def alert_flash(self): 237 | self.storage_LED.blink(on_time=0.5, off_time=0.5, n=None, background=True) 238 | self.record_LED.blink(on_time=0.5, off_time=0.5, n=None, background=True) 239 | 240 | def error(self, error_code): 241 | self.error_code = error_code 242 | if self.flashing_thread is None or not self.flashing_thread.is_alive(): 243 | self.flashing_thread = Thread(target=self._flash_error_code) 244 | self.flashing_thread.start() 245 | 246 | def _flash_error_code(self): 247 | while self.running: 248 | for _ in range(self.error_code): 249 | self._blink_leds() 250 | self.storage_LED.blink(on_time=0.2, n=1, background=False) # Flash storage LED 251 | self.record_LED.blink(on_time=0.2, n=1, background=False) # Flash record LED 252 | time.sleep(0.2) # Interval between flashes 253 | time.sleep(2) # Pause after each sequence 254 | 255 | def stop(self): 256 | super().stop() 257 | if self.flashing_thread and self.flashing_thread.is_alive(): 258 | self.flashing_thread.join() 259 | self.storage_LED.off() 260 | self.record_LED.off() 261 | 262 | 263 | class AdvancedIndicatorState(Enum): 264 | IDLE = 0 265 | RECORDING = 1 266 | DETECTING = 2 267 | NOTIFICATION = 3 268 | RECORDING_AND_DETECTING = 4 269 | ERROR = 5 270 | 271 | 272 | class AdvancedStatusIndicator(BaseStatusIndicator): 273 | def __init__(self, save_directory, status_led_pin='BOARD37'): 274 | super().__init__(save_directory) 275 | LED_class = LED if not testing else TestLED 276 | self.led = LED_class(pin=status_led_pin) 277 | self.state = AdvancedIndicatorState.IDLE 278 | self.error_queue = deque() 279 | self.state_lock = Lock() 280 | self.weed_detection_enabled = False 281 | self.image_recording_enabled = False 282 | self.flashing_thread = None 283 | 284 | def _update_storage_indicator(self, percent_full): 285 | if percent_full >= 0.90: 286 | self.DRIVE_FULL = True 287 | self.error(1) # Use error code 1 for drive full 288 | 289 | def setup_success(self): 290 | self.led.blink(on_time=0.1, off_time=0.1, n=2) 291 | 292 | def _update_state(self): 293 | if self.state != AdvancedIndicatorState.ERROR: 294 | if self.weed_detection_enabled and self.image_recording_enabled: 295 | self.state = AdvancedIndicatorState.RECORDING_AND_DETECTING 296 | elif self.weed_detection_enabled: 297 | self.state = AdvancedIndicatorState.DETECTING 298 | elif self.image_recording_enabled: 299 | self.state = AdvancedIndicatorState.RECORDING 300 | else: 301 | self.state = AdvancedIndicatorState.IDLE 302 | 303 | def enable_weed_detection(self): 304 | with self.state_lock: 305 | self.weed_detection_enabled = True 306 | self._update_state() 307 | 308 | def disable_weed_detection(self): 309 | with self.state_lock: 310 | self.weed_detection_enabled = False 311 | self._update_state() 312 | 313 | def enable_image_recording(self): 314 | with self.state_lock: 315 | self.image_recording_enabled = True 316 | self._update_state() 317 | 318 | def disable_image_recording(self): 319 | with self.state_lock: 320 | self.image_recording_enabled = False 321 | self._update_state() 322 | 323 | def image_write_indicator(self): 324 | with self.state_lock: 325 | if self.state not in [AdvancedIndicatorState.ERROR, AdvancedIndicatorState.DETECTING, 326 | AdvancedIndicatorState.RECORDING_AND_DETECTING]: 327 | try: 328 | self.led.blink(on_time=0.1, off_time=0.1, n=1, background=True) 329 | except KeyboardInterrupt: 330 | logger.info("[INFO] KeyboardInterrupt received during image_write_indicator. Turning off LED.") 331 | self.led.off() 332 | raise 333 | except Exception as e: 334 | logger.error(f"Error in image_write_indicator: {e}", exc_info=True) 335 | 336 | def weed_detect_indicator(self): 337 | with self.state_lock: 338 | if self.state in [AdvancedIndicatorState.DETECTING, AdvancedIndicatorState.RECORDING_AND_DETECTING]: 339 | try: 340 | self.led.blink(on_time=0.05, off_time=0.05, n=1, background=True) 341 | except KeyboardInterrupt: 342 | logger.info("[INFO] KeyboardInterrupt received during weed_detect_indicator. Turning off LED.") 343 | self.led.off() 344 | raise 345 | except Exception as e: 346 | logger.error(f"Error in weed_detect_indicator: {e}", exc_info=True) 347 | 348 | def generic_notification(self): 349 | try: 350 | with self.state_lock: 351 | init_state = self.state 352 | self.state = AdvancedIndicatorState.NOTIFICATION 353 | self.led.off() # Reset LED state before notification 354 | 355 | self.led.blink(on_time=0.1, off_time=0.1, n=2, background=False) 356 | self.state = init_state 357 | except KeyboardInterrupt: 358 | logger.info("[INFO] KeyboardInterrupt received during generic_notification. Turning off LED.") 359 | self.led.off() 360 | raise 361 | except Exception as e: 362 | logger.error(f"Error in generic_notification: {e}", exc_info=True) 363 | 364 | def error(self, error_code): 365 | self.error_code = error_code 366 | with self.state_lock: 367 | self.state = AdvancedIndicatorState.ERROR 368 | if self.flashing_thread is None or not self.flashing_thread.is_alive(): 369 | self.flashing_thread = Thread(target=self._flash_error_code) 370 | self.flashing_thread.start() 371 | 372 | def _flash_error_code(self): 373 | try: 374 | while self.running: 375 | for _ in range(self.error_code): 376 | self._blink_leds() 377 | time.sleep(0.2) 378 | time.sleep(2) 379 | except KeyboardInterrupt: 380 | logger.info("[INFO] KeyboardInterrupt received in _flash_error_code. Exiting.") 381 | except Exception as e: 382 | logger.error(f"Error in _flash_error_code: {e}", exc_info=True) 383 | finally: 384 | self._cleanup_leds() 385 | 386 | def stop(self): 387 | super().stop() 388 | if self.flashing_thread and self.flashing_thread.is_alive(): 389 | self.flashing_thread.join() 390 | self.led.off() 391 | 392 | 393 | # control class for the relay board 394 | class RelayControl: 395 | def __init__(self, relay_dict): 396 | self.logger = LogManager.get_logger(__name__) 397 | 398 | self.testing = True if testing else False 399 | self.relay_dict = relay_dict 400 | self.on = False 401 | 402 | # used to toggle activation of GPIO pins for LEDs 403 | self.field_data_recording = False 404 | 405 | if not self.testing: 406 | try: 407 | self.buzzer = Buzzer(pin='BOARD7') 408 | 409 | except Exception as e: 410 | if isinstance(e, lgpioERROR) and 'GPIO busy' in str(e): 411 | raise OWLAlreadyRunningError("OWL instance may already be running.") from e 412 | else: 413 | raise 414 | 415 | for relay, board_pin in self.relay_dict.items(): 416 | self.relay_dict[relay] = OutputDevice(pin=f'BOARD{board_pin}') 417 | 418 | else: 419 | self.buzzer = TestBuzzer() 420 | for relay, board_pin in self.relay_dict.items(): 421 | self.relay_dict[relay] = TestRelay(board_pin) 422 | 423 | def relay_on(self, relay_number, verbose=True): 424 | relay = self.relay_dict[relay_number] 425 | relay.on() 426 | 427 | if verbose: 428 | print(f"Relay {relay_number} ON") 429 | 430 | def relay_off(self, relay_number, verbose=True): 431 | relay = self.relay_dict[relay_number] 432 | relay.off() 433 | 434 | if verbose: 435 | print(f"Relay {relay_number} OFF") 436 | 437 | def beep(self, duration=0.2, repeats=2): 438 | self.buzzer.beep(on_time=duration, off_time=(duration / 2), n=repeats) 439 | 440 | def all_on(self, verbose=False): 441 | for relay in self.relay_dict.keys(): 442 | self.relay_on(relay, verbose=verbose) 443 | 444 | def all_off(self, verbose=False): 445 | for relay in self.relay_dict.keys(): 446 | self.relay_off(relay, verbose=verbose) 447 | 448 | def remove(self, relay_number): 449 | self.relay_dict.pop(relay_number, None) 450 | 451 | def clear(self): 452 | self.relay_dict = {} 453 | 454 | def stop(self): 455 | self.clear() 456 | self.all_off() 457 | 458 | # this class does the hard work of receiving detection 'jobs' and queuing them to be actuated. It only turns a nozzle on 459 | # if the sprayDur has not elapsed or if the nozzle isn't already on. 460 | class RelayController: 461 | def __init__(self, relay_dict, vis=False, status_led=None): 462 | self.logger = LogManager.get_logger(__name__) 463 | 464 | self.relay_dict = relay_dict 465 | self.vis = vis 466 | self.status_led = status_led 467 | # instantiate relay control with supplied relay dictionary to map to correct board pins 468 | try: 469 | self.relay = RelayControl(self.relay_dict) 470 | except OWLAlreadyRunningError: 471 | self.logger.error("Failed to initialize RelayControl: OWL is already running and using GPIO pin 7.") 472 | raise 473 | self.relay_queue_dict = {} 474 | self.relay_condition_dict = {} 475 | 476 | # create a job queue and Condition() for each nozzle 477 | self.logger.info("[INFO] Setting up nozzles...") 478 | self.relay_vis = RelayVis(relays=len(self.relay_dict.keys())) 479 | for relay_number in range(0, len(self.relay_dict)): 480 | self.relay_queue_dict[relay_number] = deque(maxlen=5) 481 | self.relay_condition_dict[relay_number] = Condition() 482 | 483 | # create the consumer threads, setDaemon and start the threads. 484 | relay_thread = Thread(target=self.consumer, args=[relay_number]) 485 | relay_thread.setDaemon(True) 486 | relay_thread.start() 487 | 488 | time.sleep(1) 489 | self.logger.info("[INFO] Nozzle setup complete. Initiating camera...") 490 | self.relay.beep(duration=0.5) 491 | 492 | def receive(self, relay, time_stamp, location=0, delay=0, duration=1): 493 | """ 494 | this method adds a new job to specified relay queue. GPS location data etc to be added. Time stamped 495 | records the true time of weed detection from main thread, which is compared to time of relay activation for accurate 496 | on durations. There will be a minimum on duration of this processing speed ~ 0.3s. Will default to 0 though. 497 | :param relay: relay id (zero based) 498 | :param time_stamp: this is the time of detection 499 | :param location: GPS functionality to be added here 500 | :param delay: on delay to be added in the future 501 | :param duration: duration of spray 502 | """ 503 | input_queue_message = [relay, time_stamp, delay, duration] 504 | input_queue = self.relay_queue_dict[relay] 505 | input_condition = self.relay_condition_dict[relay] 506 | # notifies the consumer thread when something has been added to the queue 507 | with input_condition: 508 | input_queue.append(input_queue_message) 509 | input_condition.notify() 510 | 511 | def consumer(self, relay): 512 | """ 513 | Takes only one parameter - nozzle, which enables the selection of the deque, condition from the dictionaries. 514 | The consumer method is threaded for each nozzle and will wait until it is notified that a new job has been added 515 | from the receive method. It will then compare the time of detection with time of spraying to activate that nozzle 516 | for required length of time. 517 | :param relay: relay id number 518 | """ 519 | self.running = True 520 | input_condition = self.relay_condition_dict[relay] 521 | input_condition.acquire() 522 | relay_on = False 523 | relay_queue = self.relay_queue_dict[relay] 524 | 525 | while self.running: 526 | while relay_queue: 527 | job = relay_queue.popleft() 528 | input_condition.release() 529 | # check to make sure time is positive 530 | onDur = 0 if (job[3] - (time.time() - job[1])) <= 0 else (job[3] - (time.time() - job[1])) 531 | 532 | if not relay_on: 533 | time.sleep(job[2]) # add in the delay variable 534 | self.relay.relay_on(relay, verbose=False) 535 | if self.status_led: 536 | self.status_led.blink(on_time=0.1, n=1, background=True) 537 | 538 | if self.vis: 539 | self.relay_vis.update(relay=relay, status=True) 540 | 541 | relay_on = True 542 | 543 | try: 544 | time.sleep(onDur) 545 | 546 | except ValueError: 547 | time.sleep(0) 548 | 549 | input_condition.acquire() 550 | 551 | if len(relay_queue) == 0: 552 | self.relay.relay_off(relay, verbose=False) 553 | 554 | if self.vis: 555 | self.relay_vis.update(relay=relay, status=False) 556 | relay_on = False 557 | 558 | input_condition.wait() 559 | 560 | def stop(self): 561 | self.running = False 562 | 563 | 564 | if __name__ == "__main__": 565 | print("Starting test of status indicators...") 566 | 567 | # Test HeadlessStatusIndicator 568 | print("\nTesting HeadlessStatusIndicator...") 569 | headless_indicator = HeadlessStatusIndicator(save_directory="output") 570 | headless_indicator.show_error(3) # Show an error with 3 flashes 571 | headless_indicator.stop() 572 | 573 | # Test UteStatusIndicator 574 | print("\nTesting UteStatusIndicator...") 575 | ute_indicator = UteStatusIndicator(save_directory="output", record_led_pin='BOARD38', storage_led_pin='BOARD40') 576 | ute_indicator.show_error(4) # Show an error with 4 flashes 577 | ute_indicator.stop() 578 | 579 | # Test AdvancedStatusIndicator 580 | print("\nTesting AdvancedStatusIndicator...") 581 | advanced_indicator = AdvancedStatusIndicator(save_directory="output", status_led_pin='BOARD37') 582 | advanced_indicator.show_error(2) # Show an error with 2 flashes 583 | advanced_indicator.stop() 584 | 585 | print("\nTest complete.") -------------------------------------------------------------------------------- /utils/video_manager.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import time 3 | 4 | from threading import Thread, Event, Condition, Lock 5 | from utils.log_manager import LogManager 6 | 7 | # determine availability of picamera versions 8 | try: 9 | from picamera.array import PiRGBArray 10 | from picamera import PiCamera 11 | PICAMERA_VERSION = 'legacy' 12 | 13 | except Exception as e: 14 | PICAMERA_VERSION = None 15 | 16 | try: 17 | from picamera2 import Picamera2 18 | from libcamera import Transform 19 | import libcamera 20 | PICAMERA_VERSION = 'picamera2' 21 | 22 | except Exception as e: 23 | PICAMERA_VERSION = None 24 | 25 | # class to support webcams 26 | class WebcamStream: 27 | def __init__(self, src=0): 28 | self.logger = LogManager.get_logger(__name__) 29 | self.name = "WebcamStream" 30 | self.logger.info(f'Camera type: {self.name}') 31 | self.stream = cv2.VideoCapture(src) 32 | 33 | self.frame_width = self.stream.get(cv2.CAP_PROP_FRAME_WIDTH) 34 | self.frame_height = self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT) 35 | 36 | # Check if the stream opened successfully 37 | if not self.stream.isOpened(): 38 | self.stream.release() 39 | self.logger.error(f'Unable to open video source: {src}') 40 | raise ValueError("Unable to open video source:", src) 41 | 42 | # read the first frame from the stream 43 | self.grabbed, self.frame = self.stream.read() 44 | if not self.grabbed: 45 | self.stream.release() 46 | self.logger.error(f'Unable to read from video source: {src}') 47 | raise ValueError("Unable to read from video source:", src) 48 | 49 | # initialize the thread name, stop event, and the thread itself 50 | self.stop_event = Event() 51 | self.thread = Thread(target=self.update, name=self.name, args=()) 52 | self.thread.daemon = True 53 | 54 | def start(self): 55 | self.thread.start() 56 | return self 57 | 58 | def update(self): 59 | # keep looping infinitely until the thread is stopped 60 | try: 61 | while not self.stop_event.is_set(): 62 | # Read the next frame from the stream 63 | self.grabbed, self.frame = self.stream.read() 64 | 65 | # If not grabbed, end of the stream has been reached. 66 | if not self.grabbed: 67 | self.stop_event.set() # Ensure the loop stops if no frame is grabbed 68 | except Exception as e: 69 | self.logger.error(f"Exception in WebcamStream update loop: {e}", exc_info=True) 70 | finally: 71 | # Clean up resources after loop is done 72 | self.stream.release() 73 | 74 | def read(self): 75 | # return the frame most recently read 76 | return self.frame 77 | 78 | def stop(self): 79 | self.stop_event.set() 80 | self.thread.join() 81 | 82 | 83 | class PiCamera2Stream: 84 | def __init__(self, src=0, resolution=(416, 320), exp_compensation=-2, **kwargs): 85 | self.logger = LogManager.get_logger(__name__) 86 | self.name = 'Picamera2Stream' 87 | self.logger.info(f'Camera type: {self.name}') 88 | self.size = resolution # picamera2 uses size instead of resolution, keeping this consistent 89 | self.frame_width = None 90 | self.frame_height = None 91 | self.frame = None 92 | self.frame_available = False 93 | 94 | self.stopped = Event() 95 | self.condition = Condition() 96 | self.lock = Lock() 97 | 98 | # set the picamera2 config and controls. Refer to picamera2 documentation for full explanations: 99 | 100 | self.configurations = { 101 | # for those checking closely, using RGB888 may seem incorrect, however libcamera means a BGR format. Check 102 | # https://github.com/raspberrypi/picamera2/issues/848 for full explanation. 103 | "format": 'RGB888', 104 | "size": self.size 105 | } 106 | 107 | self.controls = { 108 | "AeExposureMode": 1, 109 | "AwbMode": libcamera.controls.AwbModeEnum.Daylight, 110 | "ExposureValue": exp_compensation 111 | } 112 | # Or if you prefer split logs for different aspects: 113 | self.logger.info("Setting camera format", extra=dict( 114 | format='RGB888', 115 | image_size=list(self.size), 116 | note='RGB888 represents BGR format in libcamera')) 117 | 118 | self.logger.info("Setting camera controls", extra=dict( 119 | exposure_mode=1, 120 | awb_mode='Daylight', 121 | exposure_value=exp_compensation)) 122 | 123 | # Update config with any additional/overridden parameters 124 | self.controls.update(kwargs) 125 | 126 | # Initialize the camera 127 | self.camera = Picamera2(src) 128 | self.camera_model = self.camera.camera_properties['Model'] 129 | 130 | if self.camera_model == 'imx296': 131 | self.logger.info('[INFO] Using IMX296 Global Shutter Camera') 132 | 133 | elif self.camera_model == 'imx477': 134 | self.logger.info('[INFO] Using IMX477 HQ Camera') 135 | 136 | elif self.camera_model == 'imx708': 137 | self.logger.info('[INFO] Using Raspberry Pi Camera Module 3. Setting focal point at 1.2 m...') 138 | self.controls['AfMode'] = libcamera.controls.AfModeEnum.Manual 139 | self.controls['LensPosition'] = 1.2 140 | 141 | else: 142 | self.logger.info('[INFO] Unrecognised camera module, continuing with default settings.') 143 | 144 | try: 145 | self.config = self.camera.create_preview_configuration(main=self.configurations, 146 | transform=Transform(hflip=True, vflip=True), 147 | queue=False, 148 | controls=self.controls) 149 | self.camera.configure(self.config) 150 | self.camera.start() 151 | 152 | # set dimensions directly from the video feed 153 | self.frame_width = self.camera.camera_configuration()['main']['size'][0] 154 | self.frame_height = self.camera.camera_configuration()['main']['size'][1] 155 | 156 | # allow the camera time to warm up 157 | time.sleep(2) 158 | 159 | except Exception as e: 160 | self.logger.error(f"Failed to initialize PiCamera2: {e}", exc_info=True) 161 | raise 162 | 163 | if self.frame_width != resolution[0] or self.frame_height != resolution[1]: 164 | message = (f"The actual frame size ({self.frame_width}x{self.frame_height}) " 165 | f"differs from the expected resolution ({resolution[0]}x{resolution[1]}).") 166 | self.logger.warning(message) 167 | 168 | def start(self): 169 | # Start the thread to update frames 170 | self.thread = Thread(target=self.update, name=self.name, args=()) 171 | self.thread.daemon = True 172 | self.thread.start() 173 | return self 174 | 175 | def update(self): 176 | try: 177 | while not self.stopped.is_set(): 178 | frame = self.camera.capture_array("main") 179 | if frame is not None: 180 | with self.lock: 181 | self.frame = frame 182 | self.frame_available = True 183 | 184 | with self.condition: 185 | self.condition.notify_all() 186 | 187 | except Exception as e: 188 | self.logger.error(f"Exception in PiCamera2Stream update loop: {e}", exc_info=True) 189 | finally: 190 | self.camera.stop() # Ensure camera resources are released properly 191 | 192 | def read(self): 193 | # return the frame most recently read 194 | with self.condition: 195 | while not self.frame_available: 196 | self.condition.wait() 197 | 198 | while self.lock: 199 | self.frame_available = False 200 | return self.frame 201 | 202 | def stop(self): 203 | self.stopped.set() 204 | self.thread.join() 205 | self.camera.stop() 206 | time.sleep(2) # Allow time for the camera to be released properly 207 | 208 | 209 | class PiCameraStream: 210 | def __init__(self, resolution=(416, 320), exp_compensation=-2, **kwargs): 211 | self.logger = LogManager.get_logger(__name__) 212 | self.name = 'PicameraStream' 213 | self.logger.info(f'Camera type: {self.name}') 214 | self.frame_width = None 215 | self.frame_height = None 216 | 217 | try: 218 | self.camera = PiCamera() 219 | 220 | self.camera.resolution = resolution 221 | self.camera.exposure_mode = 'beach' 222 | self.camera.awb_mode = 'auto' 223 | self.camera.sensor_mode = 0 224 | self.camera.exposure_compensation = exp_compensation 225 | 226 | self.frame_width = self.camera.resolution[0] 227 | self.frame_height = self.camera.resolution[1] 228 | 229 | if self.frame_width != resolution[0] or self.frame_height != resolution[1]: 230 | message = (f"The actual frame size ({self.frame_width}x{self.frame_height}) " 231 | f"differs from the expected resolution ({resolution[0]}x{resolution[1]}).") 232 | self.logger.warning(message) 233 | 234 | # Set optional camera parameters (refer to PiCamera docs) 235 | for (arg, value) in kwargs.items(): 236 | setattr(self.camera, arg, value) 237 | 238 | # Initialize the stream 239 | self.rawCapture = PiRGBArray(self.camera, size=resolution) 240 | self.stream = self.camera.capture_continuous(self.rawCapture, 241 | format="bgr", 242 | use_video_port=True) 243 | 244 | except Exception as e: 245 | self.logger.error(f"Failed to initialize PiCamera: {e}", exc_info=True) 246 | raise 247 | 248 | self.frame = None 249 | self.stopped = Event() 250 | self.thread = Thread(target=self.update, name=self.name, args=()) 251 | self.thread.daemon = True # Thread will close when main program exits 252 | 253 | def start(self): 254 | # Start the thread to read frames from the video stream 255 | self.thread.start() 256 | return self 257 | 258 | def update(self): 259 | try: 260 | for f in self.stream: 261 | self.frame = f.array 262 | self.rawCapture.truncate(0) 263 | 264 | if self.stopped.is_set(): 265 | break 266 | except Exception as e: 267 | self.logger.error(f"Exception in PiCameraStream update loop: {e}", exc_info=True) 268 | 269 | finally: 270 | self.stream.close() 271 | self.rawCapture.close() 272 | self.camera.close() 273 | 274 | def read(self): 275 | # return the frame most recently read 276 | return self.frame 277 | 278 | def stop(self): 279 | # Signal the thread to stop 280 | self.stopped.set() 281 | 282 | # Wait for the thread to finish 283 | self.thread.join() 284 | 285 | 286 | # overarching class to determine which stream to use 287 | class VideoStream: 288 | def __init__(self, src=0, resolution=(416, 320), exp_compensation=-2, **kwargs): 289 | self.CAMERA_VERSION = PICAMERA_VERSION if PICAMERA_VERSION is not None else 'webcam' 290 | self.logger = LogManager.get_logger(__name__) 291 | self.frame_height = None 292 | self.frame_width = None 293 | 294 | if self.CAMERA_VERSION == 'legacy': 295 | self.stream = PiCameraStream(resolution=resolution, exp_compensation=exp_compensation, **kwargs) 296 | 297 | elif self.CAMERA_VERSION == 'picamera2': 298 | self.stream = PiCamera2Stream(src=src, resolution=resolution, exp_compensation=exp_compensation, **kwargs) 299 | 300 | elif self.CAMERA_VERSION == 'webcam': 301 | self.stream = WebcamStream(src=src) 302 | 303 | else: 304 | self.logger.error(f"Unsupported camera version: {self.CAMERA_VERSION}") 305 | raise ValueError(f"Unsupported camera version: {self.CAMERA_VERSION}") 306 | 307 | # set the image dimensions directly from the frame streamed 308 | self.frame_width = self.stream.frame_width 309 | self.frame_height = self.stream.frame_height 310 | 311 | def start(self): 312 | # start the threaded video stream 313 | return self.stream.start() 314 | 315 | def update(self): 316 | # grab the next frame from the stream 317 | self.stream.update() 318 | 319 | def read(self): 320 | # return the current frame 321 | return self.stream.read() 322 | 323 | def stop(self): 324 | # stop the thread and release any resources 325 | self.stream.stop() 326 | 327 | 328 | -------------------------------------------------------------------------------- /utils/vis_manager.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | import warnings 4 | 5 | class BasicTerminal: 6 | def __init__(self): 7 | self.width = 80 8 | self.height = 24 9 | self.class_name = "BasicTerminal" 10 | 11 | @staticmethod 12 | def move_x(x): 13 | return f"\033[{x}G" 14 | 15 | @property 16 | def normal(self): 17 | return "\033[0m" 18 | 19 | def on_color_rgb(self, r, g, b): 20 | return f"\033[48;2;{r};{g};{b}m" 21 | 22 | def __str__(self): 23 | return f"<{self.class_name} object with width={self.width} and height={self.height}>" 24 | 25 | try: 26 | from blessed import Terminal 27 | 28 | except ModuleNotFoundError: 29 | warnings.warn("[WARNING] blessed library not found. Using basic terminal functionality. " 30 | "\nNote, please run 'pip install blessed' and check OWL installation to fix.") 31 | Terminal = BasicTerminal 32 | 33 | 34 | class RelayVis: 35 | def __init__(self, relays=4): 36 | self.term = Terminal() 37 | self.relays = relays 38 | self.width = self.term.width 39 | self.height = self.term.height 40 | self.box_width = 10 41 | self.active_color = [50, 255, 50] 42 | self.inactive_color = [100, 100, 100] 43 | 44 | self.status_list = [False for i in range(relays)] 45 | self.x_positions = [(relay * self.box_width + relay * 2) for relay in range(relays)] 46 | 47 | def setup(self): 48 | for id, pos in enumerate(self.x_positions): 49 | print(self.term.move_x(pos), f'Nozzle {id + 1}', end=' ') 50 | print('\r') 51 | for i, x_pos in enumerate(self.x_positions): 52 | r, g, b = self.inactive_color 53 | box_str = self.term.on_color_rgb(r, g, b) + " " * (self.box_width) + self.term.normal 54 | print(self.term.move_x(x_pos) + f"{box_str}", end='', flush=True) 55 | 56 | def update(self, relay=1, status=True): 57 | self.status_list[relay] = status 58 | 59 | if self.status_list[relay]: 60 | r, g, b = self.active_color 61 | box_str = self.term.on_color_rgb(r, g, b) + " " * (self.box_width) + self.term.normal 62 | print(self.term.move_x(self.x_positions[relay]) + f"{box_str}", end="", flush=True) 63 | else: 64 | r, g, b = self.inactive_color 65 | box_str = self.term.on_color_rgb(r, g, b) + " " * (self.box_width) + self.term.normal 66 | print(self.term.move_x(self.x_positions[relay]) + f"{box_str}", end="", flush=True) 67 | 68 | def close(self): 69 | print("\n", end='\n') 70 | 71 | if __name__ == "__main__": 72 | box_drawer = RelayVis(relays=4) 73 | 74 | for i in range(0, 100): 75 | relay = np.random.randint(0, 4) 76 | status = bool(np.random.randint(0, 2)) 77 | box_drawer.update(relay=relay, status=status) 78 | time.sleep(0.01) 79 | -------------------------------------------------------------------------------- /version.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from dataclasses import dataclass 3 | import platform 4 | import sys 5 | import subprocess 6 | from typing import Optional 7 | 8 | 9 | @dataclass 10 | class Version: 11 | major: int = 2 12 | minor: int = 3 13 | patch: int = 0 14 | tag: Optional[str] = None 15 | 16 | def __str__(self) -> str: 17 | return f"{self.major}.{self.minor}.{self.patch}" + (f"-{self.tag}" if self.tag else "") 18 | 19 | VERSION = Version() 20 | 21 | class SystemInfo: 22 | logger = logging.getLogger("SystemInfo") 23 | 24 | @staticmethod 25 | def get_os_info() -> dict: 26 | return { 27 | 'system': platform.system(), 28 | 'release': platform.release(), 29 | 'version': platform.version(), 30 | 'machine': platform.machine(), 31 | 'processor': platform.processor() 32 | } 33 | 34 | @staticmethod 35 | def get_python_info() -> dict: 36 | return { 37 | 'version': sys.version, 38 | 'implementation': platform.python_implementation(), 39 | 'compiler': platform.python_compiler() 40 | } 41 | 42 | @staticmethod 43 | def get_rpi_info() -> Optional[str]: 44 | try: 45 | with open('/proc/device-tree/model', 'r') as f: 46 | return f.read().strip('\x00') 47 | except FileNotFoundError: 48 | SystemInfo.logger.warning("Raspberry Pi information not found.") 49 | return None 50 | 51 | @staticmethod 52 | def get_git_info() -> Optional[dict]: 53 | try: 54 | # Check if git is available first 55 | if subprocess.call(['which', 'git'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) != 0: 56 | raise FileNotFoundError("Git not available") 57 | 58 | commit = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).decode('ascii').strip() 59 | branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('ascii').strip() 60 | return {'commit': commit, 'branch': branch} 61 | except (subprocess.CalledProcessError, FileNotFoundError) as e: 62 | SystemInfo.logger.warning("Git information could not be retrieved: %s", e) 63 | return None 64 | --------------------------------------------------------------------------------