├── Images └── BirdTable01.jpg ├── Pipfile ├── README.md ├── Todo.md ├── archive ├── Camera.py ├── TwitcherDB.db ├── TwitcherPiServer.py ├── capture.py ├── forms.py ├── graph.pbtxt ├── index.html ├── models.py ├── oldcapture.py ├── olddal.py ├── routes.py └── test.py ├── capture2.py ├── checkpoint ├── dal.py ├── dal2.py ├── docker ├── redis │ ├── docker-compose.yml │ ├── dockerfile │ └── redis.conf └── webapp │ ├── Dockerfile │ └── docker-compose.yml ├── main.js ├── model.ckpt-48697.index ├── model.ckpt-48697.meta ├── models └── research │ └── object_detection │ └── bird.py ├── options.js ├── pipeline.config ├── redis_test.py ├── requirements.txt ├── seed_db.py ├── ssd_mobilenet_v1.config ├── static └── css │ └── main.css ├── style.css ├── templates ├── about.html ├── home.html ├── index.html ├── layout.html ├── login.html └── signup.html ├── test.html ├── webserver.py └── webserver2.py /Images/BirdTable01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevinmcaleer/TwitcherPi/50cf5500f578b28485c8b087f2381e70738b936a/Images/BirdTable01.jpg -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | 8 | [packages] 9 | 10 | [requires] 11 | python_version = "3.7" 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TwitcherPi 2 | 3 | `TwitcherPi` is an opensource AI bird classification system based on Tensorflow, intended to run on a Raspberry Pi Zero. The Rapsberry Pi will take pictures every x minutes (or will enable one to be taken based on a trigger from a motion sensor). 4 | 5 | ## Machine Learning model 6 | 7 | The images will be stored in a Redis database, for later classification. 8 | You can visit the RaspberryPi Zero hosted TwicherPi website and classify the pictures waiting to be processed using the web user interface. The interface will enable you to draw a rectangle over the image if there are any birds present, and then classify them. 9 | 10 | Once there are enough tagged images in the database, the model will be able to be updated so it can automatically detect each bird and record when it was seen and what type of bird it is. 11 | 12 | --- 13 | 14 | ## Types of bird 15 | 16 | The following types (classes) of bird can be detected: 17 | 18 | 1. House Sparrow 19 | 1. Starling 20 | 1. Blue tit 21 | 1. Blackbird 22 | 1. Woodpigeon 23 | 1. Goldfinch 24 | 1. Great tit 25 | 1. Robin 26 | 1. Long-tailed tit 27 | 1. Chaffinch 28 | 29 | Note - you can add to this using the web UI. 30 | 31 | --- 32 | 33 | ## Setting up the python environment with Tensorflow 34 | 35 | sudo apt-get update 36 | sudo apt install python3-dev python3-pip 37 | sudo apt install libatlas-base-dev 38 | sudo pip3 install -U virtualenv 39 | 40 | virtualenv --system-site-packages -p python3 ./venv 41 | 42 | source ./venv/bin/activate 43 | 44 | pip install --upgrade pip 45 | pip list 46 | pip install --upgrade tensorflow 47 | 48 | --- 49 | 50 | ## Verify the install 51 | 52 | `python -c "import tensorflow as tf; tf.enable_eager_execution(); print(tf.reduce_sum(tf.random_normal([1000, 1000])))"` 53 | 54 | ```bash 55 | pip install pillow 56 | pip install lxml 57 | pip install jupyter 58 | pip install matplotlib 59 | 60 | git clone https://github.com/tensorflow/models.git 61 | ``` 62 | 63 | --- 64 | 65 | ## Download protobuf from the website: 66 | 67 | 68 | 69 | protoc object_detection/protos/*.proto --python_out=. 70 | export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim 71 | -------------------------------------------------------------------------------- /Todo.md: -------------------------------------------------------------------------------- 1 | # TODO 2 | 3 | ## Clean up project repository 4 | 5 | - [ ] move old files to `archive` folder 6 | - [ ] improve documentation 7 | - [ ] make existing code simpler to follow and more descriptive 8 | 9 | --- 10 | 11 | ## Setup simple Redis database 12 | 13 | - [ ] Setup to run in a docker container with a data folder outside the container, mapped using volumes 14 | - [ ] To save captured images 15 | - [ ] To add metadata for processed images 16 | - [ ] To remove images that have been processed with no birds in them 17 | - [ ] To add label classifications 18 | 19 | ## Labeler 20 | 21 | - [X] create HTML with placeholders for: 22 | - [X] image 23 | - [X] image details (date, time, state) 24 | - [X] list of labels 25 | - [X] buttons to add / remove labels 26 | - [ ] save button 27 | - [ ] discard and delete button 28 | - [ ] list of defined areas 29 | - [ ] buttons to add / remove defined areas 30 | - [X] create flask backend to get list of labels from mongodb 31 | - [ ] create flask backend to set / update list of labels to mongodb 32 | - [ ] create backend to get image & image details from mongodb 33 | - [ ] finesse with CSS and transition animations 34 | - [X] create python script to seed database with some labels 35 | - [ ] add some test data from real camera captures 36 | - [ ] Update UI to make it simpler and responsive 37 | -------------------------------------------------------------------------------- /archive/Camera.py: -------------------------------------------------------------------------------- 1 | # Grab a video feed from the camera and pass to the classifer 2 | # TwitcherPi - AI Bird Counter 3 | # By Kevin McAleer 4 | # January 2019 5 | 6 | from picamera import PiCamera 7 | from time import sleep 8 | import datetime 9 | 10 | camera = PiCamera() 11 | 12 | # camera.start_preview() 13 | # sleep(10) 14 | # camera.stop_preview() 15 | 16 | 17 | def take_photo(): 18 | # Get the current date and time and make a unique filename 19 | 20 | # camera.image_effect = 'saturation' 21 | camera.start_preview() 22 | now = datetime.datetime 23 | print(now) 24 | filename = "{0:%Y}-{0:%m}-{0:%d}".format(now) 25 | # filename = '/home/pi/Desktop/image.jpg' 26 | camera.capture(filename) 27 | camera.stop_preview() 28 | 29 | # Main loop 30 | 31 | 32 | while True: 33 | camera.start_preview() 34 | take_photo() 35 | sleep(10) 36 | camera.stop_preview() 37 | -------------------------------------------------------------------------------- /archive/TwitcherDB.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevinmcaleer/TwitcherPi/50cf5500f578b28485c8b087f2381e70738b936a/archive/TwitcherDB.db -------------------------------------------------------------------------------- /archive/TwitcherPiServer.py: -------------------------------------------------------------------------------- 1 | # TwitcherPi Image server 2 | 3 | from flask import Flask 4 | app = Flask(__name__) 5 | 6 | 7 | @app.route('/') 8 | def hello_world(): 9 | return('Hello, World!') 10 | 11 | if __name__ == '__main__': 12 | app.run() 13 | -------------------------------------------------------------------------------- /archive/capture.py: -------------------------------------------------------------------------------- 1 | # TwitcherPi Image Capture 2 | # Kevin McAleer 3 | # December 2021 4 | 5 | # Import Libraries 6 | from io import BytesIO 7 | from picamera import PiCamera 8 | from dal import ImageDocument 9 | from datetime import datetime 10 | from PIL import Image 11 | 12 | image_document = ImageDocument() 13 | 14 | camera = PiCamera() 15 | camera.resolution = (1024, 768) 16 | 17 | def take_picture(): 18 | # Take a picture 19 | 20 | image = BytesIO() 21 | camera.capture('test.jpg') 22 | camera.capture(image, 'jpeg') 23 | 24 | im = image 25 | 26 | image_bytes = BytesIO() 27 | im.save(image_bytes, format='JPEG') 28 | 29 | image = { 30 | 'data': image_bytes.getvalue() 31 | } 32 | 33 | def save_image(image): 34 | # Save an image to the Database 35 | 36 | image_document.capture_date = str(datetime.now().isoformat()) 37 | image_document.author = "Kevin McAleer" 38 | image_document.save_image(image) 39 | 40 | img = take_picture() 41 | 42 | save_image(img) 43 | -------------------------------------------------------------------------------- /archive/forms.py: -------------------------------------------------------------------------------- 1 | from flask_wtf import Form 2 | from wtforms import StringField, PasswordField, SubmitField 3 | from wtforms.validators import DataRequired, Email, Length 4 | class SignupForm(Form): 5 | first_name = StringField('First name', validators=[DataRequired("Please enter your first name.")]) 6 | last_name = StringField('Last name' , validators=[DataRequired("Please enter your last name.")]) 7 | email = StringField('Email', validators=[DataRequired("Please enter your email address"),Email("Please type a valid email address")]) 8 | password = PasswordField('password', validators=[DataRequired("Please enter your password"), Length(min=6, message="Passwords must be 6 characters or more.")]) 9 | submit = SubmitField('Sign up') 10 | class LoginForm(Form): 11 | email = StringField('Email', validators=[DataRequired("Please enter your email address"), Email("Please enter a valid email address")]) 12 | password = PasswordField('Password',validators=[DataRequired("Please enter a password.")]) 13 | submit = SubmitField("Sign in") 14 | -------------------------------------------------------------------------------- /archive/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |

Hello World

4 | 5 | -------------------------------------------------------------------------------- /archive/models.py: -------------------------------------------------------------------------------- 1 | from flask_sqlalchemy import SQLAlchemy 2 | from werkzeug import generate_password_hash, check_password_hash 3 | 4 | db = SQLAlchemy() 5 | 6 | class User(db.Model): 7 | __tablename__ = 'users' 8 | uid = db.Column(db.Integer, primary_key = True) 9 | firstname = db.Column(db.String(100)) 10 | lastname = db.Column(db.String(100)) 11 | email = db.Column(db.String(120), unique=True) 12 | pwdhash = db.Column(db.String(54)) 13 | 14 | def __init__(self, firstname, lastname, email, password): 15 | self.firstname = firstname.title() 16 | self.lastname = lastname.title() 17 | self.email = email.lower() 18 | self.set_password(password) 19 | 20 | def set_password(self, password): 21 | self.pwdhash = generate_password_hash(password) 22 | 23 | def check_password(self, password): 24 | return check_password_hash(self.pwdhash, password) 25 | -------------------------------------------------------------------------------- /archive/oldcapture.py: -------------------------------------------------------------------------------- 1 | # TwitcherPi Image Capture 2 | # Kevin McAleer 3 | # December 2021 4 | 5 | # Import libraries 6 | from picamera import PiCamera 7 | from io import BytesIO 8 | from olddal import ImageDocument 9 | 10 | from datetime import datetime 11 | 12 | 13 | image_document = ImageDocument() 14 | 15 | camera = PiCamera() 16 | camera.resolution = (1024, 768) 17 | 18 | def take_picture(): 19 | # take a picture 20 | 21 | image = BytesIO() 22 | camera.capture(image,'jpeg') 23 | return image 24 | 25 | def save_image(image): 26 | # Save an image to the mongo database 27 | 28 | image_document.save_image(image) 29 | 30 | 31 | image_document.load_image() 32 | -------------------------------------------------------------------------------- /archive/olddal.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | import base64 3 | from io import BytesIO 4 | from datetime import datetime 5 | 6 | # Connect to Mongo database server, where the IP address is that of your MongoDB server (TwitcherPi is the name of the database) 7 | # There are two collections - images and birds; images saves the raw captured image, birds stores details of detected and classified birds 8 | client = MongoClient('mongodb://192.168.1.150/twitcherpi') 9 | db = client.twitcherpi 10 | 11 | class ImageDocument(): 12 | """ Hides complexities of connecting to databases etc """ 13 | 14 | __captured_date = "" 15 | __processed = False 16 | __image = BytesIO() 17 | 18 | def __init__(self): 19 | now = datetime.now() 20 | self.__captured_date = now.isoformat 21 | 22 | @property 23 | def captured_date(self)->str: 24 | """ Returns the date captured of the image """ 25 | return self.__captured_date 26 | 27 | @captured_date.setter 28 | def captured_date(self, value:str): 29 | """ Sets the image capture date """ 30 | self.__captured_date = value 31 | 32 | def save_image(self, image): 33 | """ Save the image to the database """ 34 | self.__image = image 35 | db.images.insert_one({"date":self.date_string, "image":base64.b64encode(image.getbuffer())}) 36 | 37 | @property 38 | def image(self): 39 | """ Returns the image """ 40 | return self.__image 41 | 42 | @image.setter 43 | def image(self, value:BytesIO): 44 | """ Sets the image file """ 45 | self.__image = value 46 | 47 | @property 48 | def processed(self)->bool: 49 | """ Returns the current status of the image; if its processed or not """ 50 | return self.__processed 51 | 52 | @processed.setter 53 | def processed(self, value:bool): 54 | """ Sets the current processed status """ 55 | self.__processed = value 56 | 57 | def load_image(self): 58 | image_record = db.images.find() 59 | image = image_record['image'] 60 | print(image) -------------------------------------------------------------------------------- /archive/routes.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, session, redirect, url_for 2 | from archive.models import db, User 3 | from archive.forms import SignupForm, LoginForm 4 | 5 | app = Flask(__name__) 6 | 7 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///TwitcherDB.db' 8 | db.init_app(app) 9 | 10 | app.secret_key = 'development-key' 11 | 12 | @app.route("/") 13 | def index(): 14 | return render_template("index.html") 15 | 16 | @app.route("/about") 17 | def about(): 18 | return render_template("about.html") 19 | 20 | @app.route("/signup", methods=['GET','POST']) 21 | def signup(): 22 | form = SignupForm() 23 | 24 | if request.method == 'POST': 25 | if form.validate() == False: 26 | return render_template('signup.html', form=form) 27 | else: 28 | newuser = User(form.first_name.data, form.last_name.data, form.email.data, form.password.data) 29 | db.session.add(newuser) 30 | db.session.commit() 31 | 32 | session['email'] = newuser.email 33 | return redirect(url_for('home')) 34 | 35 | elif request.method == 'GET': 36 | return render_template('signup.html', form=form) 37 | 38 | @app.route("/home") 39 | def home(): 40 | if 'email' not in session: 41 | return render_template(url_for('login')) 42 | return render_template("home.html") 43 | 44 | @app.route("/login", methods=['GET','POST']) 45 | def login(): 46 | form = LoginForm() 47 | 48 | if request.method == "POST": 49 | if form.validate() == False: 50 | return render_template("login.html", form=form) 51 | else: 52 | email = form.email.data 53 | password = form.password.data 54 | 55 | user = User.query.filter_by(email=email).first() 56 | if user is not None and user.check_password(password): 57 | session['email'] = form.email.data 58 | return redirect(url_for('home')) 59 | else: 60 | return redirect(url_for('login')) 61 | elif request.method == 'GET': 62 | return render_template('login.html', form=form) 63 | @app.route("/logout") 64 | def logout(): 65 | session.pop('email', None) 66 | return redirect(url_for('index')) 67 | 68 | if __name__ == "__main__": 69 | app.run(debug=True) 70 | -------------------------------------------------------------------------------- /archive/test.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | 3 | app = Flask(__name__) 4 | 5 | @app.route('/') 6 | def home(): 7 | return "

hello world

" 8 | 9 | def main(): 10 | app.run(host='0.0.0.0', port=2222) 11 | 12 | if __name__ == "__main__": 13 | main() -------------------------------------------------------------------------------- /capture2.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | from picamera import PiCamera 3 | from dal2 import ImageDocument 4 | from io import BytesIO 5 | 6 | image_docuement = ImageDocument() 7 | 8 | camera = PiCamera() 9 | camera.resolution = (1024, 768) 10 | 11 | def take_picture(): 12 | image = BytesIO 13 | camera.capture('test2.jpg') 14 | # camera.capture(image, 'jpeg') 15 | 16 | return image 17 | 18 | def save_image(image): 19 | 20 | image_docuement.__captured_date = str(datetime.now().isoformat) 21 | image_docuement.__author = "Kevin McAleer" 22 | image_docuement.save_image(image) 23 | 24 | img = take_picture() 25 | save_image(img) -------------------------------------------------------------------------------- /checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "model.ckpt-48697" 2 | all_model_checkpoint_paths: "model.ckpt-48032" 3 | all_model_checkpoint_paths: "model.ckpt-48194" 4 | all_model_checkpoint_paths: "model.ckpt-48363" 5 | all_model_checkpoint_paths: "model.ckpt-48531" 6 | all_model_checkpoint_paths: "model.ckpt-48697" 7 | -------------------------------------------------------------------------------- /dal.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | from io import BytesIO 3 | from datetime import datetime 4 | from bson import ObjectId 5 | from PIL.Image import Image 6 | 7 | client = MongoClient('mongodb://192.168.1.226') 8 | db = client.twitcherpi 9 | 10 | class ImageDocument(): 11 | __image = BytesIO 12 | __captured_date = "" 13 | __author = "" 14 | 15 | def __init__(self): 16 | now = datetime.now() 17 | self.__captured_date = now.isoformat() 18 | 19 | @property 20 | def author(self): 21 | return self.__author 22 | 23 | @author.setter 24 | def author(self, value): 25 | self.__author = value 26 | 27 | @property 28 | def capture_date(self): 29 | return self.__captured_date 30 | 31 | @capture_date.setter 32 | def capture_date(self, value): 33 | self.__captured_date = value 34 | 35 | def save_image(self, image): 36 | # self.__image = image.getvalue() 37 | # myimage = image.getvalue() 38 | image_id = db.images.insert_one({"date":self.__captured_date, "image":image, "author":self.__author}) 39 | # db.images.insert_one({"date":self.__captured_date, "image":base64.b64encode(self.__image.getbuffer()), "author":self.__author}) 40 | return image_id 41 | 42 | def seed_database(self): 43 | """ Seeds the database with a default set of values """ 44 | labels = ["House Sparrow", "Starling","Blue tit","Blackbird","Woodpigeon","Goldfinch","Great tit","Robin","Long-tailed tit","Chaffinch"] 45 | 46 | for label in labels: 47 | db.labels.insert_one({"label":label}) 48 | 49 | def get_labels(self): 50 | """ Return a list of labels """ 51 | 52 | data = list(db.labels.find({},{"_id":False})) 53 | # data = db.labels.find() 54 | print("there be labels here!:", data) 55 | return data 56 | 57 | def get_one(self): 58 | """ Get one image file record """ 59 | 60 | image_file = list(db.images.find()) 61 | return image_file 62 | 63 | def get_by_id(self, id): 64 | """ Get image by ID """ 65 | print("id is:", id) 66 | print("id type is:", type(id)) 67 | objInstance = ObjectId(id) 68 | image_file = db.images.find_one({"_id": objInstance}) 69 | 70 | img = Image.open(io.BytesIO(image_file['data'])) 71 | return img 72 | 73 | def get_ids(self): 74 | id_list = [str(id) for id in db.images.find().distinct('_id')] 75 | return id_list -------------------------------------------------------------------------------- /dal2.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient 2 | from io import BytesIO 3 | from datetime import datetime 4 | 5 | client = MongoClient('mongodb://192.168.1.226') 6 | db = client.twicherpi 7 | 8 | class ImageDocument(): 9 | __image = BytesIO 10 | __captured_date = "" 11 | __author = "" 12 | 13 | def __init__(self): 14 | now = datetime.now() 15 | self.__captured_date = now 16 | 17 | def seed_database(self): 18 | labels = ['sparrow', 'blackbird','robin'] 19 | 20 | for label in labels: 21 | db.labels2.insert_one({'label':label}) 22 | 23 | def get_labels(self): 24 | """ Returns a list of labels """ 25 | data = list(db.labels2.find({},{"_id":False})) 26 | print(data) 27 | return data 28 | 29 | def get_ids(self): 30 | id_list = [str(id) for id in db.labels2.find().distinct('_id')] 31 | return id_list 32 | 33 | def save_image(self, image:BytesIO): 34 | self.__image = image.getvalue 35 | myimage = image.getvalue() 36 | image_id = db.images2.insert_one({"date":self.__captured_date, "image":myimage, "author":self.__author}) 37 | 38 | # temporary routine 39 | im = ImageDocument() 40 | im.get_labels() -------------------------------------------------------------------------------- /docker/redis/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | redis: 5 | build: . 6 | image: redis-pi 7 | container_name: redis-pi 8 | ports: 9 | - "6379:6379" 10 | volumes: 11 | - redis_data:/data/redis 12 | 13 | volumes: 14 | redis_data: 15 | driver: local 16 | driver_opts: 17 | type: none 18 | device: data 19 | o: bind 20 | -------------------------------------------------------------------------------- /docker/redis/dockerfile: -------------------------------------------------------------------------------- 1 | # Use the Redis 6.2 base image for ARM32v6 architecture 2 | FROM arm32v7/redis:6.2 3 | 4 | # Set the working directory 5 | WORKDIR /data 6 | 7 | # Expose the default Redis port 8 | EXPOSE 6379 9 | 10 | # Create a directory for Redis data 11 | RUN mkdir -p /data/redis 12 | 13 | # Set the Redis configuration file 14 | COPY redis.conf /usr/local/etc/redis/redis.conf 15 | 16 | # Run the Redis server with the provided configuration file 17 | CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ] 18 | -------------------------------------------------------------------------------- /docker/redis/redis.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | protected-mode yes 3 | port 6379 4 | tcp-backlog 511 5 | timeout 0 6 | tcp-keepalive 300 7 | daemonize no 8 | supervised no 9 | pidfile /var/run/redis_6379.pid 10 | loglevel notice 11 | logfile "" 12 | databases 16 13 | always-show-logo yes 14 | save 900 1 15 | save 300 10 16 | save 60 10000 17 | stop-writes-on-bgsave-error yes 18 | rdbcompression yes 19 | rdbchecksum yes 20 | dbfilename dump.rdb 21 | dir /data/redis 22 | slave-serve-stale-data yes 23 | slave-read-only yes 24 | repl-diskless-sync no 25 | repl-diskless-sync-delay 5 26 | repl-disable-tcp-nodelay no 27 | slave-priority 100 28 | lazyfree-lazy-eviction no 29 | lazyfree-lazy-expire no 30 | lazyfree-lazy-server-del no 31 | slave-lazy-flush no 32 | appendonly no 33 | appendfilename "appendonly.aof" 34 | appendfsync everysec 35 | no-appendfsync-on-rewrite no 36 | auto-aof-rewrite-percentage 100 37 | auto-aof-rewrite-min-size 64mb 38 | aof-load-truncated yes 39 | aof-use-rdb-preamble no 40 | lua-time-limit 5000 41 | slowlog-log-slower-than 10000 42 | slowlog-max-len 128 43 | latency-monitor-threshold 0 44 | notify-keyspace-events "" 45 | hash-max-ziplist-entries 512 46 | hash-max-ziplist-value 64 47 | list-max-ziplist-size -2 48 | list-compress-depth 0 49 | set-max-intset-entries 512 50 | zset-max-ziplist-entries 128 51 | zset-max-ziplist-value 64 52 | hll-sparse-max-bytes 3000 53 | activerehashing yes 54 | client-output-buffer-limit normal 0 0 0 55 | client-output-buffer-limit slave 256mb 64mb 60 56 | client-output-buffer-limit pubsub 32mb 8mb 60 57 | hz 10 58 | aof-rewrite-incremental-fsync yes 59 | -------------------------------------------------------------------------------- /docker/webapp/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM python:3.8-slim-buster 3 | WORKDIR /app 4 | 5 | COPY requirements.txt requirements.txt 6 | RUN pip install pip --upgrade 7 | RUN pip3 install -r requirements.txt 8 | COPY . /app 9 | # RUN export FLASK_APP=webserver.py 10 | ENTRYPOINT [ "python3" ] 11 | CMD ["webserver.py"] 12 | -------------------------------------------------------------------------------- /docker/webapp/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | webserver: 4 | # environment: 5 | # FLASK_APP: webserver.py 6 | build: . 7 | ports: 8 | - "2222:2222" 9 | restart: always 10 | # networks: 11 | # - webnet 12 | # networks: 13 | # webnet: 14 | -------------------------------------------------------------------------------- /main.js: -------------------------------------------------------------------------------- 1 | class Label { 2 | constructor(x,y, width, height, label, scale) { 3 | this.x = x; 4 | this.y = y; 5 | this.width = width; 6 | this.height = height; 7 | this.label = label; 8 | this.scale = scale; 9 | } 10 | } 11 | 12 | class Selection { 13 | constructor(labels, image, date){ 14 | this.labels = labels; 15 | this.image = image; 16 | this.date = date; 17 | } 18 | } 19 | 20 | const canvas = document.getElementById("canvas"); 21 | const silklayer = document.getElementById("silk"); 22 | canvas.width = window.innerWidth - 60; 23 | canvas.height = 400; 24 | silklayer.width = window.innerWidth - 60; 25 | silklayer.height = 400; 26 | canvas.style.position = "absolute"; 27 | silklayer.style.position = "absolute"; 28 | canvas.style.zIndex = 0; 29 | silklayer.style.zIndex = 1; 30 | 31 | var context = canvas.getContext("2d"); 32 | var silk = silklayer.getContext("2d"); 33 | 34 | // The image scaling on the canvas 35 | var scale = 0; 36 | 37 | // context.fillStyle = "white"; 38 | // silk.fillStyle = "white"; 39 | // context.clearRect(0,0, canvas.width, canvas.height); 40 | // silk.clearRect(0,0, canvas.width, canvas.height); 41 | 42 | let draw_color = "rgba(0, 255, 0, 0.5)"; 43 | let font_color = "rgba(0, 0, 0)"; 44 | let draw_width = "2"; 45 | var is_drawing = false; 46 | 47 | var current_label = ""; 48 | var selections = []; 49 | 50 | // let label_array = ["sparrow","blackbird","robin","starling","wood pigeon","great tit","blue tit","crow"]; 51 | var label_array = ['none'] 52 | 53 | // create skeleton image_file 54 | var image_file = new Selection(); 55 | 56 | // cache links to tags in the page 57 | const image = document.getElementById('source'); 58 | const rects = document.getElementById('rects'); 59 | const remove_button = document.getElementById("remove"); 60 | const labels = document.getElementById('labels'); 61 | 62 | image.addEventListener('load', e => { 63 | scaleToFit(image) 64 | // context.drawImage(image, 0, 0, canvas.height, canvas.width); 65 | }); 66 | 67 | function update_current_label(){ 68 | // rects = document.getElementById('rects'); 69 | my_index = labels.selectedIndex; 70 | if (my_index == -1) { 71 | my_index = 1; 72 | } 73 | 74 | var cur_label = labels[my_index].value; 75 | return cur_label; 76 | 77 | // Update the current label with whatever is selected 78 | // var cur_label = rects[rects.selectedIndex].value; 79 | } 80 | 81 | // current_label = update_current_label(); 82 | 83 | function change_color(element){ 84 | draw_color = element.style.background; 85 | } 86 | 87 | function change_width(element) { 88 | draw_width = element.style.stroke; 89 | } 90 | 91 | silklayer.addEventListener("touchstart", start, false); 92 | silklayer.addEventListener("touchmove", refresh, false); 93 | silklayer.addEventListener("mousedown", start, false); 94 | silklayer.addEventListener("mousemove", refresh, false); 95 | 96 | silklayer.addEventListener("touchend", stop, false); 97 | silklayer.addEventListener("mouseup", stop, false); 98 | silklayer.addEventListener("mouseout", stop, false); 99 | document.getElementById("remove").addEventListener("click", remove_item); 100 | document.getElementById("savebtn").addEventListener("click", save_data); 101 | 102 | 103 | function start(event) { 104 | is_drawing = true; 105 | // silk.moveTo(event.pageX - canvas.offsetLeft, 106 | // event.pageY - canvas.offsetTop); 107 | silk.startX = event.pageX - silklayer.offsetLeft; 108 | silk.startY = event.pageY - silklayer.offsetTop; 109 | // event.preventDefault(); 110 | console.log('started'); 111 | } 112 | 113 | function update_options(){ 114 | var str = ""; 115 | my_index = selections.selectedIndex; 116 | if (my_index == -1) { 117 | // my_index = 1; 118 | } 119 | for (var item of selections) { 120 | str += ""; 121 | } 122 | document.getElementById("rects").innerHTML = str; 123 | } 124 | 125 | function draw_selection(selections) { 126 | // draw all the rectangles in the selection array and add text label 127 | 128 | silk.clearRect(0,0,silklayer.width, silklayer.height); 129 | for (let i in selections) { 130 | silk.fillStyle = draw_color; 131 | silk.strokeStyle = draw_color; 132 | // context.moveTo(selections[i].x, selections[i].y); 133 | silk.fillRect(selections[i].x, selections[i].y, selections[i].width, selections[i].height); 134 | silk.StrokeStyle = "rgba(0,255,0,1)"; 135 | silk.fillStyle = "rgba(0,255,0,1)"; 136 | silk.fillText(selections[i].label, selections[i].x,selections[i].y); 137 | } 138 | } 139 | 140 | function draw(event){ 141 | // silk.clearRect(0,0,canvas.width, canvas.height); 142 | if ( is_drawing ) { 143 | 144 | // draw all the rectangles 145 | silk.clearRect(x,y,silklayer.width, silklayer.height); 146 | draw_selection(selections); 147 | 148 | // set the coordinates from the staring position and end point 149 | x = silk.startX ; 150 | y = silk.startY ; 151 | 152 | w = event.pageX - silklayer.offsetLeft; 153 | h = event.pageY - silklayer.offsetTop; 154 | console.log("w:"+w); 155 | console.log("h:"+h); 156 | silk.strokeStyle = "rgba(0, 255, 0, 1)"; 157 | silk.lineWidth = draw_width; 158 | silk.fillStyle = draw_color; 159 | 160 | silk.strokeRect(x, y,w-x,h-y); 161 | silk.beginPath(); 162 | silk.moveTo(x, y); 163 | silk.lineTo(w, h); 164 | silk.stroke(); 165 | silk.closePath(); 166 | silk.fillStyle =draw_color; 167 | silk.strokeStyle = draw_color; 168 | silk.fillRect(x,y,w-x,h-y); 169 | silk.fillStyle = "rgba(0, 255, 0, 1)"; 170 | silk.fillText(update_current_label(), x,y-2); 171 | 172 | } 173 | else { 174 | refresh(event); 175 | draw_selection(selections); 176 | } 177 | } 178 | 179 | function scaleToFit(img){ 180 | // get the scale 181 | var scale = Math.min(canvas.width / img.width, canvas.height / img.height); 182 | 183 | // get the top left position of the image 184 | var x = (canvas.width / 2) - (img.width / 2) * scale; 185 | var y = (canvas.height / 2) - (img.height / 2) * scale; 186 | context.drawImage(img, x, y, img.width * scale, img.height * scale); 187 | return scale 188 | } 189 | 190 | function refresh(event){ 191 | if (is_drawing) { 192 | scaleToFit(image); 193 | draw(event); 194 | } 195 | else { 196 | 197 | // context.drawImage(image, 0, 0, canvas.height, canvas.width); 198 | scale = scaleToFit(image); 199 | 200 | // draw all the rectangles 201 | 202 | silk.font = "10px Arial"; 203 | x = event.pageX - silklayer.offsetLeft; 204 | y = event.pageY - silklayer.offsetTop; 205 | 206 | // wipe the canvas clean 207 | silk.clearRect(0,0,silklayer.width, silklayer.height); 208 | silk.fillStyle = "rgba(255, 255, 255, 1)"; 209 | 210 | silk.fillRect(0,0,100,40); 211 | silk.fillStyle = font_color; 212 | silk.fillText("X = " + x, 0, 10); 213 | silk.fillText("Y = " + y, 0, 20); 214 | silk.fillText("Not Drawing", 0, 40); 215 | 216 | draw_selection(selections); 217 | } 218 | } 219 | 220 | function stop(event){ 221 | if ( is_drawing ) { 222 | is_drawing = false; 223 | 224 | // get the x, y, width and height 225 | x = silk.startX ; 226 | y = silk.startY ; 227 | w = event.pageX - silklayer.offsetLeft - x; 228 | h = event.pageY - silklayer.offsetTop - y; 229 | console.log("scale is " + scale); 230 | 231 | // create a new label to draw 232 | l = new Label(x, y, w, h, scale); 233 | 234 | // get currently selected label 235 | l.label = update_current_label(); 236 | l.scale = scale; 237 | selections.push(l); 238 | silk.strokeRect(x, y,w,h); 239 | draw_selection(selections); 240 | update_options(); 241 | console.log('stopped'); 242 | } 243 | } 244 | 245 | function remove_item(event){ 246 | // get currently selected item 247 | 248 | var sel = document.getElementById('rects'); 249 | var index = sel.selectedIndex; 250 | 251 | selections.splice(index,1); 252 | update_options(); 253 | draw_selection(); 254 | refresh(event); 255 | } 256 | 257 | function save_data(){ 258 | // Saves data by posting to API 259 | 260 | // assemble rects as JSON 261 | 262 | image_file.labels = selections; 263 | // data.labels = selections 264 | // data.image = image_id; 265 | // data.date = image_date; 266 | 267 | data = image_file; 268 | 269 | postData('http://192.168.1.226:2222/save', data) 270 | .then(data => { 271 | console.log(data); // JSON data parsed by `data.json()` call 272 | }); 273 | } 274 | 275 | async function postData(url = '', data = {}) { 276 | // Default options are marked with * 277 | const response = await fetch(url, { 278 | method: 'POST', // *GET, POST, PUT, DELETE, etc. 279 | mode: 'cors', // no-cors, *cors, same-origin 280 | cache: 'no-cache', // *default, no-cache, reload, force-cache, only-if-cached 281 | credentials: 'same-origin', // include, *same-origin, omit 282 | headers: { 283 | 'Content-Type': 'application/json' 284 | // 'Content-Type': 'application/x-www-form-urlencoded', 285 | }, 286 | redirect: 'follow', // manual, *follow, error 287 | referrerPolicy: 'no-referrer', // no-referrer, *no-referrer-when-downgrade, origin, origin-when-cross-origin, same-origin, strict-origin, strict-origin-when-cross-origin, unsafe-url 288 | body: JSON.stringify(data) // body data type must match "Content-Type" header 289 | }); 290 | return response.json(); // parses JSON response into native JavaScript objects 291 | } 292 | 293 | 294 | -------------------------------------------------------------------------------- /model.ckpt-48697.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevinmcaleer/TwitcherPi/50cf5500f578b28485c8b087f2381e70738b936a/model.ckpt-48697.index -------------------------------------------------------------------------------- /model.ckpt-48697.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kevinmcaleer/TwitcherPi/50cf5500f578b28485c8b087f2381e70738b936a/model.ckpt-48697.meta -------------------------------------------------------------------------------- /models/research/object_detection/bird.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | 4 | import six.moves.urllib as urllib 5 | import sys 6 | import tarfile 7 | import tensorflow as tf 8 | import zipfile 9 | 10 | from distutils.version import StrictVersion 11 | from collections import defaultdict 12 | from io import StringIO 13 | from matplotlib import pyplot as plt 14 | from PIL import Image 15 | 16 | sys.path.append("..") 17 | from object_detection.utils import ops as util_ops 18 | from utils import label_map_util 19 | from utils import visualization_utils as vis_util 20 | 21 | MODEL_NAME = 'ssd_mobilenet_v1' 22 | MODEL_FILE = MODEL_NAME + '.tar.gz' 23 | # DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/' 24 | 25 | # Path to frozen detection graphself. 26 | PATH_TO_FROZEN_GRAPH = 'bird_inference_graph2/frozen_inference_graph.pb' 27 | 28 | # List of the strings that is used to add correct label for each box. 29 | PATH_TO_LABELS = os.path.join('training', 'object-detection.pbtxt') 30 | DETECTION_GRAPH = tf.Graph() 31 | with DETECTION_GRAPH.as_default(): 32 | OD_GRAPH_DEF = tf.GraphDef() 33 | with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: 34 | SERIALIZED_GRAPH = fid.read() 35 | OD_GRAPH_DEF.ParseFromString(SERIALIZED_GRAPH) 36 | tf.import_graph_def(OD_GRAPH_DEF, name='') 37 | 38 | category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) 39 | 40 | def load_image_into_numpy_array(image): 41 | # print("loading image into numpy array") 42 | (im_width, im_height) = image.size 43 | return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8) 44 | 45 | # detection 46 | PATH_TO_TEST_IMAGES = 'images/test' 47 | TEST_IMAGE_PATHS = [os.path.join(PATH_TO_TEST_IMAGES, 'BirdTable0{}.jpg'.format(i)) for i in range(0, 10)] 48 | # print(TEST_IMAGE_PATHS) 49 | IMAGE_SIZE = (12,8) 50 | 51 | def run_inference_for_single_image(image, graph): 52 | """ runs the inference for a single image """ 53 | with graph.as_default(): 54 | with tf.Session() as sess: 55 | # Get handles to input and output tensors 56 | ops = tf.get_default_graph().get_operations() 57 | all_tensor_names = {output.name for op in ops for output in op.outputs} 58 | tensor_dict = {} 59 | for key in [ 60 | 'num_detections', 'detection_boxes', 'detection_scores', 61 | 'detection_classes', 'detection_masks' 62 | ]: 63 | tensor_name = key + ':0' 64 | if tensor_name in all_tensor_names: 65 | tensor_dict[key] = tf.get_default_graph().get_tensor_by_name( 66 | tensor_name) 67 | if 'detection_masks' in tensor_dict: 68 | 69 | # The following processing is only for single image 70 | detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0]) 71 | detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0]) 72 | # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size. 73 | real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32) 74 | detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1]) 75 | detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1]) 76 | detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks( 77 | detection_masks, detection_boxes, image.shape[0], image.shape[1]) 78 | detection_masks_reframed = tf.cast( 79 | tf.greater(detection_masks_reframed, 0.5), tf.uint8) 80 | # Follow the convention by adding back the batch dimension 81 | tensor_dict['detection_masks'] = tf.expand_dims( 82 | detection_masks_reframed, 0) 83 | image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0') 84 | 85 | # Run inference 86 | output_dict = sess.run(tensor_dict, 87 | feed_dict={image_tensor: np.expand_dims(image, 0)}) 88 | 89 | # all outputs are float32 numpy arrays, so convert types as appropriate 90 | output_dict['num_detections'] = int(output_dict['num_detections'][0]) 91 | # print(output_dict['num_detections']) 92 | output_dict['detection_classes'] = output_dict[ 93 | 'detection_classes'][0].astype(np.uint8) 94 | output_dict['detection_boxes'] = output_dict['detection_boxes'][0] 95 | output_dict['detection_scores'] = output_dict['detection_scores'][0] 96 | if 'detection_masks' in output_dict: 97 | output_dict['detection_masks'] = output_dict['detection_masks'][0] 98 | return output_dict 99 | 100 | COUNT = 0 101 | for image_path in TEST_IMAGE_PATHS: 102 | 103 | # print(image_path) 104 | image = Image.open(image_path) 105 | # the array based representation of the image will be used later in order to prepare the 106 | # result image with boxes and labels on it. 107 | image_np = load_image_into_numpy_array(image) 108 | # Expand dimensions since the model expects images to have shape: [1, None, None, 3] 109 | image_np_expanded = np.expand_dims(image_np, axis=0) 110 | # Actual detection. 111 | output_dict = run_inference_for_single_image(image_np, DETECTION_GRAPH) 112 | # print(output_dict['detection_scores']) 113 | # Visualization of the results of a detection. 114 | vis_util.visualize_boxes_and_labels_on_image_array( 115 | image_np, 116 | output_dict['detection_boxes'], 117 | output_dict['detection_classes'], 118 | output_dict['detection_scores'], 119 | category_index, 120 | instance_masks=output_dict.get('detection_masks'), 121 | use_normalized_coordinates=True, 122 | line_thickness=8) 123 | plt.figure(figsize=IMAGE_SIZE) 124 | plt.imshow(image_np) 125 | plt.savefig("objectdetected" + str(COUNT) + ".jpg") 126 | COUNT = COUNT + 1 127 | -------------------------------------------------------------------------------- /options.js: -------------------------------------------------------------------------------- 1 | $(function(){ 2 | 3 | var $labels = $('#labels'); 4 | 5 | $.ajax({ 6 | type: 'GET', 7 | url: 'http://localhost:2222/labels', 8 | success: function(labels) { 9 | $.each(labels, function(i, label){ 10 | $labels.append('') 11 | }); 12 | // set the default value to the first option 13 | // var temp="a"; 14 | // $("#labels").val(temp); 15 | $("#labels").selectedIndex=0; 16 | // $("#labels")[labels.selectedIndex].selected = 'selected'; 17 | } 18 | }); 19 | }) -------------------------------------------------------------------------------- /pipeline.config: -------------------------------------------------------------------------------- 1 | # SSD with Mobilenet v1, configured for the mac-n-cheese dataset. 2 | # Users should configure the fine_tune_checkpoint field in the train config as 3 | # well as the label_map_path and input_path fields in the train_input_reader and 4 | # eval_input_reader. Search for "${YOUR_GCS_BUCKET}" to find the fields that 5 | # should be configured. 6 | 7 | model { 8 | ssd { 9 | num_classes: 10 10 | box_coder { 11 | faster_rcnn_box_coder { 12 | y_scale: 10.0 13 | x_scale: 10.0 14 | height_scale: 5.0 15 | width_scale: 5.0 16 | } 17 | } 18 | matcher { 19 | argmax_matcher { 20 | matched_threshold: 0.5 21 | unmatched_threshold: 0.5 22 | ignore_thresholds: false 23 | negatives_lower_than_unmatched: true 24 | force_match_for_each_row: true 25 | } 26 | } 27 | similarity_calculator { 28 | iou_similarity { 29 | } 30 | } 31 | anchor_generator { 32 | ssd_anchor_generator { 33 | num_layers: 6 34 | min_scale: 0.2 35 | max_scale: 0.95 36 | aspect_ratios: 1.0 37 | aspect_ratios: 2.0 38 | aspect_ratios: 0.5 39 | aspect_ratios: 3.0 40 | aspect_ratios: 0.3333 41 | } 42 | } 43 | image_resizer { 44 | fixed_shape_resizer { 45 | height: 300 46 | width: 300 47 | } 48 | } 49 | box_predictor { 50 | convolutional_box_predictor { 51 | min_depth: 0 52 | max_depth: 0 53 | num_layers_before_predictor: 0 54 | use_dropout: false 55 | dropout_keep_probability: 0.8 56 | kernel_size: 1 57 | box_code_size: 4 58 | apply_sigmoid_to_scores: false 59 | conv_hyperparams { 60 | activation: RELU_6, 61 | regularizer { 62 | l2_regularizer { 63 | weight: 0.00004 64 | } 65 | } 66 | initializer { 67 | truncated_normal_initializer { 68 | stddev: 0.03 69 | mean: 0.0 70 | } 71 | } 72 | batch_norm { 73 | train: true, 74 | scale: true, 75 | center: true, 76 | decay: 0.9997, 77 | epsilon: 0.001, 78 | } 79 | } 80 | } 81 | } 82 | feature_extractor { 83 | type: 'ssd_mobilenet_v1' 84 | min_depth: 16 85 | depth_multiplier: 1.0 86 | conv_hyperparams { 87 | activation: RELU_6, 88 | regularizer { 89 | l2_regularizer { 90 | weight: 0.00004 91 | } 92 | } 93 | initializer { 94 | truncated_normal_initializer { 95 | stddev: 0.03 96 | mean: 0.0 97 | } 98 | } 99 | batch_norm { 100 | train: true, 101 | scale: true, 102 | center: true, 103 | decay: 0.9997, 104 | epsilon: 0.001, 105 | } 106 | } 107 | } 108 | loss { 109 | classification_loss { 110 | weighted_sigmoid { 111 | anchorwise_output: true 112 | } 113 | } 114 | localization_loss { 115 | weighted_smooth_l1 { 116 | anchorwise_output: true 117 | } 118 | } 119 | hard_example_miner { 120 | num_hard_examples: 3000 121 | iou_threshold: 0.99 122 | loss_type: CLASSIFICATION 123 | max_negatives_per_positive: 3 124 | min_negatives_per_image: 0 125 | } 126 | classification_weight: 1.0 127 | localization_weight: 1.0 128 | } 129 | normalize_loss_by_num_matches: true 130 | post_processing { 131 | batch_non_max_suppression { 132 | score_threshold: 1e-8 133 | iou_threshold: 0.6 134 | max_detections_per_class: 100 135 | max_total_detections: 100 136 | } 137 | score_converter: SIGMOID 138 | } 139 | } 140 | } 141 | 142 | train_config: { 143 | batch_size: 10 144 | optimizer { 145 | rms_prop_optimizer: { 146 | learning_rate: { 147 | exponential_decay_learning_rate { 148 | initial_learning_rate: 0.004 149 | decay_steps: 800720 150 | decay_factor: 0.95 151 | } 152 | } 153 | momentum_optimizer_value: 0.9 154 | decay: 0.9 155 | epsilon: 1.0 156 | } 157 | } 158 | fine_tune_checkpoint: "./data/model.ckpt" 159 | from_detection_checkpoint: true 160 | data_augmentation_options { 161 | random_horizontal_flip { 162 | } 163 | } 164 | data_augmentation_options { 165 | ssd_random_crop { 166 | } 167 | } 168 | } 169 | 170 | train_input_reader: { 171 | tf_record_input_reader { 172 | input_path: "data/train.record" 173 | } 174 | label_map_path: "training/object-detection.pbtxt" 175 | } 176 | 177 | eval_config: { 178 | num_examples: 1018 179 | } 180 | 181 | eval_input_reader: { 182 | tf_record_input_reader { 183 | input_path: "data/test.record" 184 | } 185 | label_map_path: "training/object-detection.pbtxt" 186 | shuffle: false 187 | num_readers: 1 188 | } 189 | false 190 | num_readers: 1 191 | } 192 | -------------------------------------------------------------------------------- /redis_test.py: -------------------------------------------------------------------------------- 1 | import redis 2 | 3 | redis_client = redis.Redis(host='192.168.1.102', port=6379) 4 | 5 | # # Set a hash value 6 | # redis_client.hset('myhash', 'key1', 'value1') 7 | 8 | # Get a hash value by key 9 | value = redis_client.hget('myhash', 'key1') 10 | 11 | # Get all hash values 12 | all_values = redis_client.hgetall('myhash') 13 | 14 | print(f'all_values {all_values}, value {value}') -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | alabaster==0.7.12 2 | Babel==2.9.1 3 | certifi==2021.10.8 4 | charset-normalizer==2.0.9 5 | click==8.0.1 6 | cycler==0.10.0 7 | docutils==0.17.1 8 | Flask==2.0.1 9 | Flask-Cors==3.0.10 10 | idna==3.3 11 | imagesize==1.3.0 12 | itsdangerous==2.0.1 13 | Jinja2==3.0.1 14 | kiwisolver==1.3.1 15 | MarkupSafe==2.0.1 16 | matplotlib==3.4.2 17 | numpy==1.21.0 18 | packaging==21.3 19 | Pillow==8.3.0 20 | Pygments==2.10.0 21 | pymongo==3.5.1 22 | pyparsing==2.4.7 23 | python-dateutil==2.8.1 24 | pytz==2021.3 25 | requests==2.26.0 26 | six==1.16.0 27 | snowballstemmer==2.2.0 28 | Sphinx==4.3.1 29 | sphinxcontrib-applehelp==1.0.2 30 | sphinxcontrib-devhelp==1.0.2 31 | sphinxcontrib-htmlhelp==2.0.0 32 | sphinxcontrib-jsmath==1.0.1 33 | sphinxcontrib-qthelp==1.0.3 34 | sphinxcontrib-serializinghtml==1.1.5 35 | urllib3==1.26.7 36 | Werkzeug==2.0.1 37 | -------------------------------------------------------------------------------- /seed_db.py: -------------------------------------------------------------------------------- 1 | from dal import ImageDocument 2 | 3 | db = ImageDocument() 4 | 5 | db.seed_database() -------------------------------------------------------------------------------- /ssd_mobilenet_v1.config: -------------------------------------------------------------------------------- 1 | # SSD with Mobilenet v1, configured for the mac-n-cheese dataset. 2 | # Users should configure the fine_tune_checkpoint field in the train config as 3 | # well as the label_map_path and input_path fields in the train_input_reader and 4 | # eval_input_reader. Search for "${YOUR_GCS_BUCKET}" to find the fields that 5 | # should be configured. 6 | 7 | model { 8 | ssd { 9 | num_classes: 10 10 | box_coder { 11 | faster_rcnn_box_coder { 12 | y_scale: 10.0 13 | x_scale: 10.0 14 | height_scale: 5.0 15 | width_scale: 5.0 16 | } 17 | } 18 | matcher { 19 | argmax_matcher { 20 | matched_threshold: 0.5 21 | unmatched_threshold: 0.5 22 | ignore_thresholds: false 23 | negatives_lower_than_unmatched: true 24 | force_match_for_each_row: true 25 | } 26 | } 27 | similarity_calculator { 28 | iou_similarity { 29 | } 30 | } 31 | anchor_generator { 32 | ssd_anchor_generator { 33 | num_layers: 6 34 | min_scale: 0.2 35 | max_scale: 0.95 36 | aspect_ratios: 1.0 37 | aspect_ratios: 2.0 38 | aspect_ratios: 0.5 39 | aspect_ratios: 3.0 40 | aspect_ratios: 0.3333 41 | } 42 | } 43 | image_resizer { 44 | fixed_shape_resizer { 45 | height: 300 46 | width: 300 47 | } 48 | } 49 | box_predictor { 50 | convolutional_box_predictor { 51 | min_depth: 0 52 | max_depth: 0 53 | num_layers_before_predictor: 0 54 | use_dropout: false 55 | dropout_keep_probability: 0.8 56 | kernel_size: 1 57 | box_code_size: 4 58 | apply_sigmoid_to_scores: false 59 | conv_hyperparams { 60 | activation: RELU_6, 61 | regularizer { 62 | l2_regularizer { 63 | weight: 0.00004 64 | } 65 | } 66 | initializer { 67 | truncated_normal_initializer { 68 | stddev: 0.03 69 | mean: 0.0 70 | } 71 | } 72 | batch_norm { 73 | train: true, 74 | scale: true, 75 | center: true, 76 | decay: 0.9997, 77 | epsilon: 0.001, 78 | } 79 | } 80 | } 81 | } 82 | feature_extractor { 83 | type: 'ssd_mobilenet_v1' 84 | min_depth: 16 85 | depth_multiplier: 1.0 86 | conv_hyperparams { 87 | activation: RELU_6, 88 | regularizer { 89 | l2_regularizer { 90 | weight: 0.00004 91 | } 92 | } 93 | initializer { 94 | truncated_normal_initializer { 95 | stddev: 0.03 96 | mean: 0.0 97 | } 98 | } 99 | batch_norm { 100 | train: true, 101 | scale: true, 102 | center: true, 103 | decay: 0.9997, 104 | epsilon: 0.001, 105 | } 106 | } 107 | } 108 | loss { 109 | classification_loss { 110 | weighted_sigmoid { 111 | anchorwise_output: true 112 | } 113 | } 114 | localization_loss { 115 | weighted_smooth_l1 { 116 | anchorwise_output: true 117 | } 118 | } 119 | hard_example_miner { 120 | num_hard_examples: 3000 121 | iou_threshold: 0.99 122 | loss_type: CLASSIFICATION 123 | max_negatives_per_positive: 3 124 | min_negatives_per_image: 0 125 | } 126 | classification_weight: 1.0 127 | localization_weight: 1.0 128 | } 129 | normalize_loss_by_num_matches: true 130 | post_processing { 131 | batch_non_max_suppression { 132 | score_threshold: 1e-8 133 | iou_threshold: 0.6 134 | max_detections_per_class: 100 135 | max_total_detections: 100 136 | } 137 | score_converter: SIGMOID 138 | } 139 | } 140 | } 141 | 142 | train_config: { 143 | batch_size: 10 144 | optimizer { 145 | rms_prop_optimizer: { 146 | learning_rate: { 147 | exponential_decay_learning_rate { 148 | initial_learning_rate: 0.004 149 | decay_steps: 800720 150 | decay_factor: 0.95 151 | } 152 | } 153 | momentum_optimizer_value: 0.9 154 | decay: 0.9 155 | epsilon: 1.0 156 | } 157 | } 158 | fine_tune_checkpoint: "./data/model.ckpt" 159 | from_detection_checkpoint: true 160 | data_augmentation_options { 161 | random_horizontal_flip { 162 | } 163 | } 164 | data_augmentation_options { 165 | ssd_random_crop { 166 | } 167 | } 168 | } 169 | 170 | train_input_reader: { 171 | tf_record_input_reader { 172 | input_path: "data/train.record" 173 | } 174 | label_map_path: "training/object-detection.pbtxt" 175 | } 176 | 177 | eval_config: { 178 | num_examples: 1018 179 | } 180 | 181 | eval_input_reader: { 182 | tf_record_input_reader { 183 | input_path: "data/test.record" 184 | } 185 | label_map_path: "training/object-detection.pbtxt" 186 | shuffle: false 187 | num_readers: 1 188 | } 189 | -------------------------------------------------------------------------------- /static/css/main.css: -------------------------------------------------------------------------------- 1 | /* 2 | * General 3 | */ 4 | 5 | html, body, h1 { 6 | margin: 0; 7 | padding: 0; 8 | } 9 | 10 | body { 11 | background-color: #f4f4f4; 12 | font-family: 'Open Sans', sans-serif; 13 | } 14 | 15 | h2 { 16 | 17 | font-weight: 300; 18 | 19 | letter-spacing: -1.2px; 20 | } 21 | 22 | a { 23 | text-decoration: none; 24 | display: inline-block; 25 | } 26 | 27 | .container { 28 | width: 1000px; 29 | margin: 0 auto; 30 | } 31 | 32 | .pull-left { 33 | float: left; 34 | } 35 | 36 | .pull-right { 37 | float: right; 38 | } 39 | 40 | .clearfix { 41 | clear: both; 42 | } 43 | 44 | .section-content { 45 | color: #3a3a3a; 46 | float: left; 47 | width: 340px; 48 | } 49 | 50 | .section-device { 51 | float: right; 52 | position: relative; 53 | top: 65px; 54 | } 55 | 56 | .section-device img { 57 | width: 600px; 58 | } 59 | 60 | .btn-primary { 61 | background: #4285f4; 62 | border: 1px solid #1266f1; 63 | color: #ffffff; 64 | font-family: 'Open Sans', sans-serif; 65 | font-size: 13px; 66 | font-weight: 600; 67 | letter-spacing: 0.23px; 68 | padding: 12px 32px; 69 | text-align: center; 70 | text-decoration: none; 71 | text-transform: uppercase; 72 | } 73 | 74 | .btn-secondary { 75 | background: #ffffff; 76 | border: 1px solid #e6e6e6; 77 | color: #3372df; 78 | font-family: 'Open Sans', sans-serif; 79 | font-size: 13px; 80 | font-weight: 600; 81 | letter-spacing: 0.23px; 82 | padding: 12px 32px; 83 | text-align: center; 84 | text-decoration: none; 85 | text-transform: uppercase; 86 | } 87 | 88 | 89 | 90 | /* 91 | * Header 92 | */ 93 | 94 | header { 95 | background-color: #fff; 96 | padding: 25px 0; 97 | } 98 | 99 | .title { 100 | float: left; 101 | font-family: 'Helvetica Neue', Arial, Helvetica, sans-serif; 102 | font-size: 17px; 103 | letter-spacing: 2.5px; 104 | text-transform: uppercase; 105 | } 106 | 107 | .title a, .title a:visited { 108 | color: #3a3a3a; 109 | } 110 | 111 | .main-nav { 112 | float: right; 113 | } 114 | 115 | .main-nav ul { 116 | list-style: none; 117 | margin: 0; 118 | padding: 0; 119 | } 120 | 121 | .main-nav li { 122 | display: inline-block; 123 | font-family: 'Open Sans', sans-serif; 124 | font-size: 13px; 125 | font-weight: 600; 126 | text-transform: uppercase; 127 | } 128 | 129 | .main-nav li a { 130 | color: #3a3a3a; 131 | } 132 | 133 | 134 | /* 135 | * Hero section 136 | */ 137 | 138 | .hero-section .section-content { 139 | position: relative; 140 | top: 100px; 141 | } 142 | 143 | .hero-section .section-content h2 { 144 | font-size: 40px; 145 | } 146 | 147 | .hero-section .section-content .btn-primary { 148 | margin-right: 10px; 149 | } 150 | 151 | 152 | /* 153 | * Sign up 154 | */ 155 | 156 | .signup-section .section-content h2 { 157 | font-size: 30px; 158 | } 159 | 160 | .signup-section .section-content { 161 | position: relative; 162 | top: 53px; 163 | } 164 | 165 | .form-group { 166 | margin-bottom: 20px; 167 | } 168 | 169 | .form-group label { 170 | color: #3a3a3a; 171 | display: block; 172 | font-family: 'Open Sans', sans-serif; 173 | font-size: 15px; 174 | font-weight: 600; 175 | margin-bottom: 5px; 176 | } 177 | 178 | .form-group input { 179 | border: none; 180 | border-bottom: 2px solid #4285f4; 181 | background-color: transparent; 182 | color: #3a3a3a; 183 | font-size: 18px; 184 | padding: 5px 0; 185 | width: 100%; 186 | } 187 | 188 | .form-group input:focus { 189 | outline: 0; 190 | } 191 | 192 | .form-group .error-message { 193 | margin: 10px 0; 194 | color: #db4437; 195 | } 196 | 197 | .form-group .error-message + input { 198 | border-bottom: 2px solid #db4437; 199 | } 200 | 201 | .error-message { 202 | color: #db4437; 203 | } 204 | 205 | 206 | /* 207 | * Sign up 208 | */ 209 | 210 | .about-section h2 { 211 | font-size: 30px; 212 | margin-bottom: 0; 213 | } 214 | 215 | .about-section p { 216 | font-size: 16px; 217 | } 218 | 219 | 220 | /* 221 | * Home 222 | */ 223 | 224 | .section-tabs { 225 | float: left; 226 | width: 340px; 227 | } 228 | 229 | .section-tabs .places { 230 | margin-top: 62px; 231 | padding-right: 20px; 232 | } 233 | 234 | .section-tabs .places a, 235 | .section-tabs .places a:visited { 236 | color: #3372df; 237 | } 238 | 239 | 240 | .section-tabs article { 241 | margin-bottom: 20px; 242 | } 243 | 244 | .section-tabs article .name { 245 | font-size: 18px; 246 | } 247 | 248 | .section-tabs article .walking-distance { 249 | color: #6e6e6e; 250 | font-size: 13px; 251 | margin: 0; 252 | } 253 | 254 | .section-map { 255 | float: right; 256 | width: 660px; 257 | } 258 | 259 | .section-map .nav { 260 | padding: 20px 0; 261 | } 262 | 263 | .section-map .form-group { 264 | display: inline; 265 | } 266 | 267 | .section-map #address { 268 | font-size: 14px; 269 | margin-right: 10px; 270 | width: 87%; 271 | } 272 | 273 | .section-map #submit { 274 | font-size: 10px; 275 | padding: 5px 15px; 276 | } 277 | 278 | #map { 279 | height: 500px; 280 | } 281 | 282 | .leaflet-popup-content h3 { 283 | margin: 0; 284 | } 285 | 286 | .leaflet-popup-content p { 287 | margin: 10px 0 ; 288 | } 289 | -------------------------------------------------------------------------------- /style.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css?family=Mukta'); 2 | 3 | :root {} 4 | 5 | body { 6 | margin: 0px; 7 | padding: 30px; 8 | font-family: "Mukta"; 9 | /* overflow: hidden; */ 10 | background: #f5f5f5; 11 | display: grid; 12 | grid-template: 13 | "header header header header" 14 | "content content content content" 15 | "... tools tools ..." 16 | "footer footer footer footer"; 17 | } 18 | 19 | header { 20 | grid-area: header; 21 | } 22 | 23 | canvas { 24 | grid-area: content; 25 | /* box-shadow: -3px 2px 9px 6px black; */ 26 | border: 1px solid black; 27 | /* color: white; 28 | background-color: white; */ 29 | cursor: pointer; 30 | } 31 | 32 | .content { 33 | grid-area: content; 34 | } 35 | 36 | .tools { 37 | grid-area: content; 38 | /* align-self: center; 39 | width: 100px; 40 | height: 40px; */ 41 | /* border: 2px solid white; */ 42 | /* color: white; */ 43 | /* background-color: #222; */ 44 | /* font-weight: bold; 45 | margin: 0 15px; */ 46 | } 47 | 48 | .color-field { 49 | align-self: center; 50 | margin: 0 15px; 51 | height: 50px; 52 | } 53 | 54 | .pen-range { 55 | align-items: center; 56 | margin: 0 10px; 57 | } 58 | 59 | .details { 60 | grid-area: content; 61 | background-color: white; 62 | padding: 5px; 63 | padding-left: 20px; 64 | margin-top: 20px; 65 | margin-bottom: 20px; 66 | position: relative; 67 | top: 400px; 68 | } 69 | 70 | .tools { 71 | cursor: pointer; 72 | background-color: rgb(233, 233, 233); 73 | grid-area: tools; 74 | } 75 | 76 | .dropdowns { 77 | position: relative; 78 | top: 400px; 79 | } -------------------------------------------------------------------------------- /templates/about.html: -------------------------------------------------------------------------------- 1 | {% extends "layout.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 | 7 |

About TwitcherPi

8 |

This tool enables you to capture images from the Pi and tag whether 9 | they contain birds or not, in readiness for labelling with ImgLabel

10 | 11 |
12 |
13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /templates/home.html: -------------------------------------------------------------------------------- 1 | {% extends "layout.html" %} 2 | {% block content %} 3 |
4 |
5 | 6 |

{{name}}

7 | 8 |
9 |
10 | {% endblock %} 11 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "layout.html" %} 2 | 3 | {% block content%} 4 |
5 |
6 | 7 |
8 |

Current Capture

9 | Sign Up 10 | Learn More 11 |
12 | 13 |
14 | 15 |
16 |
17 | 18 | {% endblock%} 19 | -------------------------------------------------------------------------------- /templates/layout.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
9 |
10 |

TwitcherPi

11 | 12 |
13 |
14 | 15 | {% block content %} 16 | {% endblock %} 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /templates/login.html: -------------------------------------------------------------------------------- 1 | {% extends "layout.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 |

Log in

7 | 8 |
9 | {{ form.hidden_tag() }} 10 | 11 |
12 | {{ form.email.label }} 13 | {{ form.email }} 14 |
15 | 16 |
17 | {{ form.password.label }} 18 | {{ form.password }} 19 |
20 | 21 | {{ form.submit(class="btn-primary") }} 22 |
23 |
24 |
25 | {% endblock %} 26 | -------------------------------------------------------------------------------- /templates/signup.html: -------------------------------------------------------------------------------- 1 | {% extends "layout.html" %} 2 | 3 | {% block content %} 4 |
5 |
6 |

Create an account

7 | 8 |
9 | {{ form.hidden_tag() }} 10 | 11 |
12 | {{ form.first_name.label }} 13 | 14 | {% if form.first_name.errors %} 15 | {% for error in form.first_name.errors %} 16 |

{{ error }}

17 | {% endfor%} 18 | {% endif %} 19 | 20 | {{ form.first_name }} 21 |
22 | 23 |
24 | {{ form.last_name.label }} 25 | 26 | {% if form.last_name.errors %} 27 | {% for error in form.last_name.errors %} 28 |

{{ error }}

29 | {% endfor%} 30 | {% endif %} 31 | 32 | {{ form.last_name }} 33 |
34 | 35 |
36 | {{ form.email.label }} 37 | 38 | {% if form.email.errors %} 39 | {% for error in form.email.errors %} 40 |

{{ error }}

41 | {% endfor%} 42 | {% endif %} 43 | 44 | {{ form.email }} 45 |
46 | 47 |
48 | {{ form.password.label }} 49 | 50 | {% if form.password.errors %} 51 | {% for error in form.password.errors %} 52 |

{{ error }}

53 | {% endfor%} 54 | {% endif %} 55 | 56 | {{ form.password }} 57 |
58 | 59 | {{ form.submit(class="btn-primary") }} 60 | 61 |
62 |
63 |
64 | {% endblock%} 65 | -------------------------------------------------------------------------------- /test.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | Document 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 |

Image Labeler

16 |
17 |
18 | 19 | 20 | 21 |
22 | 23 |
24 | 25 |
26 |

Date: 28/12/2021, 10:00:00am

27 |
28 | 29 | 70 |
71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /webserver.py: -------------------------------------------------------------------------------- 1 | # webserver 2 | 3 | import json 4 | from flask import Flask, render_template, jsonify, request 5 | from flask.helpers import make_response 6 | from dal import ImageDocument 7 | from json import dumps, loads 8 | from flask_cors import CORS 9 | 10 | app = Flask(__name__) 11 | CORS(app) 12 | data = ImageDocument() 13 | 14 | @app.route('/') 15 | def home(name=None): 16 | return render_template('home.html', name=name) 17 | 18 | @app.route('/labels', methods = ['GET']) 19 | def labels(): 20 | global data 21 | my_labels = data.get_labels() 22 | print(type(my_labels)) 23 | print("got labels:",my_labels) 24 | return jsonify(my_labels) 25 | 26 | @app.route('/save', methods = ['POST']) 27 | def save(): 28 | global data 29 | save_data = request.get_json(force=True) 30 | print ("Data:",save_data) 31 | resp = jsonify(success=True) 32 | resp.status_code = 200 33 | return resp 34 | 35 | @app.route('/get_one', methods = ['GET']) 36 | def get_one(): 37 | """ Get one image file from the data layer """ 38 | global data 39 | image_data = data.get_one() 40 | return jsonify(image_data) 41 | 42 | @app.route('/image', methods= ['GET']) 43 | def get_image(): 44 | global data 45 | image_id = request.args.get('id') 46 | print("image_id is:", image_id) 47 | img = data.get_by_id(image_id) 48 | print(img) 49 | response = make_response(img) 50 | response.headers.set('Content-Type', 'image/jpeg') 51 | response.headers.set('Content-Disposition', 'attachment',filename=id+'.jpg') 52 | return response 53 | 54 | @app.route('/ids', methods = ['GET']) 55 | def get_ids(): 56 | global data 57 | ids = data.get_ids() 58 | return jsonify(ids) 59 | 60 | def main(): 61 | """ main event loop """ 62 | print("Starting TwitcherPi Database Server") 63 | app.run(host='0.0.0.0', port=2222) 64 | 65 | if __name__ == "__main__": 66 | main() -------------------------------------------------------------------------------- /webserver2.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, jsonify 2 | from dal2 import ImageDocument 3 | 4 | app = Flask(__name__) 5 | 6 | data = ImageDocument() 7 | 8 | @app.route('/') 9 | def home(): 10 | return "

hello world

" 11 | 12 | @app.route('/labels') 13 | def labels(): 14 | global data 15 | my_labels = data.get_labels() 16 | return jsonify(my_labels) 17 | 18 | @app.route('/ids') 19 | def get_ids(): 20 | global data 21 | ids = data.get_ids() 22 | return jsonify(ids) 23 | 24 | def main(): 25 | app.run(host='0.0.0.0', port=2222) 26 | 27 | if __name__ == "__main__": 28 | main() --------------------------------------------------------------------------------