├── tests
├── __init__.py
├── utils
│ ├── __init__.py
│ └── test_transform.py
├── backend
│ ├── __init__.py
│ └── test_common.py
├── layers
│ └── __init__.py
├── models
│ ├── __init__.py
│ ├── test_densenet.py
│ └── test_mobilenet.py
├── preprocessing
│ ├── __init__.py
│ └── test_image.py
├── requirements.txt
├── test_losses.py
└── bin
│ └── test_train.py
├── snapshots
└── .donotdelete
├── keras_retinanet
├── __init__.py
├── bin
│ ├── __init__.py
│ └── convert_model.py
├── utils
│ ├── __init__.py
│ ├── model.py
│ ├── compute_overlap.pyx
│ ├── gpu.py
│ ├── tf_version.py
│ ├── colors.py
│ ├── coco_eval.py
│ ├── config.py
│ ├── visualization.py
│ └── crops_sampling.py
├── preprocessing
│ ├── __init__.py
│ ├── coco.py
│ └── kitti.py
├── models
│ ├── mobilenetv3
│ │ ├── __init__.py
│ │ ├── mobilenet_v3_small.py
│ │ ├── mobilenet_v3_large.py
│ │ └── mobilenet_v3_base.py
│ ├── vgg.py
│ ├── densenet.py
│ ├── mobilenet.py
│ ├── mobilenet_v3.py
│ ├── __init__.py
│ └── resnet.py
├── backend
│ ├── __init__.py
│ └── backend.py
├── callbacks
│ ├── __init__.py
│ ├── common.py
│ ├── coco.py
│ └── eval.py
├── layers
│ └── __init__.py
├── initializers.py
└── losses.py
├── labels.csv
├── examples
└── 235.jpg
├── docs
├── imgs
│ ├── la-logo.jpg
│ ├── screenshot.png
│ ├── examples
│ │ ├── 01.png
│ │ ├── 02.png
│ │ ├── 03.png
│ │ ├── 04.png
│ │ ├── 05.png
│ │ ├── 06.png
│ │ ├── 07.png
│ │ ├── 08.png
│ │ ├── 09.png
│ │ └── 10.png
│ ├── lacmus-logo.png
│ ├── partners
│ │ ├── dtl-logo-200px.png
│ │ ├── ods-logo-200px.png
│ │ ├── gitbook-logo-200px.png
│ │ ├── teplica-logo-128px.png
│ │ ├── jetbrains_logo_200px.png
│ │ ├── lizaalert-logo-128px.png
│ │ └── novaya-gazeta-logo-128px.png
│ └── skhemes
│ │ ├── RescuerLaAppSkheme-v1.png
│ │ ├── RescuerLaBackendSkheme-v1.png
│ │ ├── RescuerLaAppSkheme-v1.drawio
│ │ └── RescuerLaBackendSkheme-v1.drawio
└── train-usage.md
├── data_utils
├── bboxCropper
│ ├── screenshot.PNG
│ ├── config.cfg
│ ├── README.md
│ └── bboxCropper.py
├── ImgGenerator
│ ├── imgs
│ │ ├── in_soup.PNG
│ │ ├── in_soup2.PNG
│ │ ├── on_snow.PNG
│ │ └── in_forest.PNG
│ ├── config.cfg
│ ├── annotation_template.xml
│ └── README.md
├── LaddAugmentor
│ ├── LaddAugmentor.csproj
│ └── Program.cs
├── LaddGenerator
│ ├── LaddGenerator.csproj
│ ├── ArgsParser.cs
│ ├── Annotation.cs
│ └── Program.cs
├── LaddValidator
│ ├── LaddValidator.csproj
│ ├── ArgsParser.cs
│ └── Program.cs
├── LaddGenerator.sln
├── README.md
├── yolo2voc.py
└── voc2coco.py
├── .gitmodules
├── openvino.dockerfile
├── setup.cfg
├── config.ini
├── config_p2_p5_low.ini
├── setup_conda_env.sh
├── .github
└── workflows
│ └── test.yaml
├── cpu.dockerfile
├── gpu.dockerfile
├── Makefile
├── README.md
├── keras2tf_2.py
├── setup.py
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
└── cli_inference.py
/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/snapshots/.donotdelete:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labels.csv:
--------------------------------------------------------------------------------
1 | Pedestrian,0
2 |
--------------------------------------------------------------------------------
/tests/backend/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/layers/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/models/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/bin/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/utils/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/tests/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/models/mobilenetv3/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/keras_retinanet/backend/__init__.py:
--------------------------------------------------------------------------------
1 | from .backend import * # noqa: F401,F403
2 |
--------------------------------------------------------------------------------
/keras_retinanet/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | from .common import * # noqa: F401,F403
2 |
--------------------------------------------------------------------------------
/examples/235.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/examples/235.jpg
--------------------------------------------------------------------------------
/docs/imgs/la-logo.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/la-logo.jpg
--------------------------------------------------------------------------------
/docs/imgs/screenshot.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/screenshot.png
--------------------------------------------------------------------------------
/docs/imgs/examples/01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/01.png
--------------------------------------------------------------------------------
/docs/imgs/examples/02.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/02.png
--------------------------------------------------------------------------------
/docs/imgs/examples/03.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/03.png
--------------------------------------------------------------------------------
/docs/imgs/examples/04.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/04.png
--------------------------------------------------------------------------------
/docs/imgs/examples/05.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/05.png
--------------------------------------------------------------------------------
/docs/imgs/examples/06.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/06.png
--------------------------------------------------------------------------------
/docs/imgs/examples/07.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/07.png
--------------------------------------------------------------------------------
/docs/imgs/examples/08.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/08.png
--------------------------------------------------------------------------------
/docs/imgs/examples/09.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/09.png
--------------------------------------------------------------------------------
/docs/imgs/examples/10.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/examples/10.png
--------------------------------------------------------------------------------
/docs/imgs/lacmus-logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/lacmus-logo.png
--------------------------------------------------------------------------------
/data_utils/bboxCropper/screenshot.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/data_utils/bboxCropper/screenshot.PNG
--------------------------------------------------------------------------------
/docs/imgs/partners/dtl-logo-200px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/dtl-logo-200px.png
--------------------------------------------------------------------------------
/docs/imgs/partners/ods-logo-200px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/ods-logo-200px.png
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "tests/test-data"]
2 | path = tests/test-data
3 | url = https://github.com/lacmus-foundation/lacmus-test-data.git
4 |
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/imgs/in_soup.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/data_utils/ImgGenerator/imgs/in_soup.PNG
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/imgs/in_soup2.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/data_utils/ImgGenerator/imgs/in_soup2.PNG
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/imgs/on_snow.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/data_utils/ImgGenerator/imgs/on_snow.PNG
--------------------------------------------------------------------------------
/docs/imgs/partners/gitbook-logo-200px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/gitbook-logo-200px.png
--------------------------------------------------------------------------------
/docs/imgs/partners/teplica-logo-128px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/teplica-logo-128px.png
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/imgs/in_forest.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/data_utils/ImgGenerator/imgs/in_forest.PNG
--------------------------------------------------------------------------------
/docs/imgs/partners/jetbrains_logo_200px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/jetbrains_logo_200px.png
--------------------------------------------------------------------------------
/docs/imgs/partners/lizaalert-logo-128px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/lizaalert-logo-128px.png
--------------------------------------------------------------------------------
/docs/imgs/skhemes/RescuerLaAppSkheme-v1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/skhemes/RescuerLaAppSkheme-v1.png
--------------------------------------------------------------------------------
/docs/imgs/partners/novaya-gazeta-logo-128px.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/partners/novaya-gazeta-logo-128px.png
--------------------------------------------------------------------------------
/docs/imgs/skhemes/RescuerLaBackendSkheme-v1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lacmus-foundation/lacmus/HEAD/docs/imgs/skhemes/RescuerLaBackendSkheme-v1.png
--------------------------------------------------------------------------------
/keras_retinanet/layers/__init__.py:
--------------------------------------------------------------------------------
1 | from ._misc import RegressBoxes, UpsampleLike, Anchors, ClipBoxes # noqa: F401
2 | from .filter_detections import FilterDetections # noqa: F401
3 |
--------------------------------------------------------------------------------
/tests/requirements.txt:
--------------------------------------------------------------------------------
1 | check-manifest
2 | image-classifiers
3 | efficientnet
4 | # pytest
5 | pytest-xdist
6 | pytest-cov
7 | pytest-flake8
8 | # flake8
9 | coverage
10 | codecov
11 |
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/config.cfg:
--------------------------------------------------------------------------------
1 | DATASET_PATH = E:\test_dataset
2 | BACKGROUNDS_FOLDER_NAME = Backgrounds
3 | AUGMENTED_FOLDER_NAME = Augmented
4 | PADDING_WIDTH = 25
5 | INPAINT_PIXELS_WIDTH = 50
--------------------------------------------------------------------------------
/data_utils/bboxCropper/config.cfg:
--------------------------------------------------------------------------------
1 | CROP_SIZE = 512
2 | DATASET_PATH = C:\TMP\test
3 | CROPS_FOLDER_NAME = Crops
4 | FRAMES_FOLDER_NAME = Frames
5 | MASKS_FOLDER_NAME = Masks
6 | INVERT_MASKS = False
7 |
--------------------------------------------------------------------------------
/openvino.dockerfile:
--------------------------------------------------------------------------------
1 | FROM openvino/ubuntu18_runtime:latest
2 |
3 | RUN mkdir /home/openvino/lacmus
4 | WORKDIR /home/openvino/lacmus
5 | COPY cli_inference_openvino.py .
6 |
7 | CMD bash -c "source ${INTEL_OPENVINO_DIR}/bin/setupvars.sh"
--------------------------------------------------------------------------------
/data_utils/LaddAugmentor/LaddAugmentor.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | netcoreapp2.2
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/data_utils/LaddGenerator/LaddGenerator.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | netcoreapp2.2
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/data_utils/LaddValidator/LaddValidator.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | Exe
5 | netcoreapp2.2
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/data_utils/LaddAugmentor/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 |
3 | namespace LaddAugmentor
4 | {
5 | class Program
6 | {
7 | static void Main(string[] args)
8 | {
9 | Console.WriteLine("Hello World!");
10 | }
11 | }
12 | }
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | # ignore:
2 | # E201 whitespace after '['
3 | # E202 whitespace before ']'
4 | # E203 whitespace before ':'
5 | # E221 multiple spaces before operator
6 | # E241 multiple spaces after ','
7 | # E251 unexpected spaces around keyword / parameter equals
8 | # E501 line too long (85 > 79 characters)
9 | # W504 line break after binary operator
10 | [tool:pytest]
11 | flake8-max-line-length = 100
12 | flake8-ignore = E201 E202 E203 E221 E241 E251 E402 E501 W504
13 |
--------------------------------------------------------------------------------
/config.ini:
--------------------------------------------------------------------------------
1 | [anchor_parameters]
2 | sizes = 16 32 64 128 256
3 | strides = 8 16 32 64 128
4 | ratios = 0.5 1 2 3
5 | scales = 1 1.2 1.6
6 |
7 | [random_transform_parameters]
8 | min_rotation = -0.1
9 | max_rotation = 0.1
10 | min_translation = -0.1 -0.1
11 | max_translation = 0.1 0.1
12 | min_shear = -0.1
13 | max_shear = 0.1
14 | min_scaling = 0.9 0.9
15 | max_scaling = 1.1 1.1
16 | flip_x_chance = 0.5
17 | flip_y_chance = 0.1
18 |
19 | [visual_effect_parameters]
20 | contrast_range = 0.9 1.1
21 | brightness_range = -.1 .1
22 | hue_range = -0.05 0.05
23 | saturation_range = 0.95 1.05
--------------------------------------------------------------------------------
/config_p2_p5_low.ini:
--------------------------------------------------------------------------------
1 | [anchor_parameters]
2 | sizes = 16 32 64 128
3 | strides = 4 8 16 32
4 | ratios = 0.5 1 2 3
5 | scales = 1 1.2 1.6
6 |
7 | [pyramid_levels]
8 | levels = 2 3 4 5
9 |
10 | [random_transform_parameters]
11 | min_rotation = -0.1
12 | max_rotation = 0.1
13 | min_translation = -0.1 -0.1
14 | max_translation = 0.1 0.1
15 | min_shear = -0.1
16 | max_shear = 0.1
17 | min_scaling = 0.9 0.9
18 | max_scaling = 1.1 1.1
19 | flip_x_chance = 0.5
20 | flip_y_chance = 0.1
21 |
22 | [visual_effect_parameters]
23 | contrast_range = 0.9 1.1
24 | brightness_range = -.1 .1
25 | hue_range = -0.05 0.05
26 | saturation_range = 0.95 1.05
27 |
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/annotation_template.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | Unknown
4 | __
5 |
6 | Unknown
7 |
8 |
9 | __
10 | __
11 | 3
12 |
13 | 0
14 |
26 |
--------------------------------------------------------------------------------
/setup_conda_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # This script creates and configures conda environment for lacmus project.
4 | # The name of the environment will be 'lacmusenv' by default or the one you passed as the first argument.
5 | # Usage:
6 | # ./setup_conda_env.sh [environment_name]
7 |
8 | # Do not forget to grant the script execute permission by:
9 | # chmod +x ./setup_conda_env.sh
10 |
11 |
12 | env_name=$1
13 |
14 | if [ -z $env_name ]
15 | then
16 | env_name="lacmusenv"
17 | fi
18 |
19 | conda create -n $env_name python=3.7 anaconda
20 | source activate $env_name
21 | conda install tensorflow-gpu==1.14
22 | pip install numpy --user
23 | pip install . --user
24 | python setup.py build_ext --inplace
25 |
26 | echo
27 | echo "Done creating $env_name environment"
28 |
--------------------------------------------------------------------------------
/.github/workflows/test.yaml:
--------------------------------------------------------------------------------
1 | name: Unit testing on ubuntu
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 | steps:
9 | - uses: actions/checkout@v1
10 | - name: Set up python
11 | uses: actions/setup-python@v1
12 | with:
13 | python-version: 3.7
14 | - name: Install lacmus
15 | run: |
16 | python -m pip install --upgrade pip
17 | pip install tensorflow==2.3.0
18 | pip install keras==2.4.3
19 | pip install opencv-python
20 | pip install --upgrade setuptools
21 | pip install .
22 | python setup.py build_ext --inplace
23 | pip install pytest
24 | pip install pycocotools
25 | git clone https://github.com/lacmus-foundation/lacmus-test-data.git tests/test-data
26 | - name: Run tests
27 | run: pytest tests
--------------------------------------------------------------------------------
/tests/preprocessing/test_image.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 | from PIL import Image
4 | from keras_retinanet.utils import image
5 | import numpy as np
6 |
7 | _STUB_IMG_FNAME = 'stub-image.jpg'
8 |
9 |
10 | @pytest.fixture(autouse=True)
11 | def run_around_tests(tmp_path):
12 | """Create a temp image for test"""
13 | rand_img = np.random.randint(0, 255, (3, 3, 3), dtype='uint8')
14 | Image.fromarray(rand_img).save(os.path.join(tmp_path, _STUB_IMG_FNAME))
15 | yield
16 |
17 |
18 | def test_read_image_bgr(tmp_path):
19 | stub_image_path = os.path.join(tmp_path, _STUB_IMG_FNAME)
20 |
21 | original_img = np.asarray(Image.open(
22 | stub_image_path).convert('RGB'))[:, :, ::-1]
23 | loaded_image = image.read_image_bgr(stub_image_path)
24 |
25 | # Assert images are equal
26 | np.testing.assert_array_equal(original_img, loaded_image)
27 |
--------------------------------------------------------------------------------
/cpu.dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.4.2
2 |
3 | # install debian packages
4 | ENV DEBIAN_FRONTEND noninteractive
5 | RUN apt-get update -qq \
6 | && apt-get install --no-install-recommends -y \
7 | # install essentials
8 | build-essential \
9 | wget \
10 | git \
11 | cython \
12 | ffmpeg \
13 | libsm6 \
14 | libxext6 \
15 | # requirements for numpy
16 | libopenblas-base \
17 | python3-numpy \
18 | python3-scipy \
19 | # requirements for keras
20 | python3-h5py \
21 | python3-yaml \
22 | python3-pydot \
23 | && apt-get clean \
24 | && rm -rf /var/lib/apt/lists/*
25 |
26 | RUN mkdir /opt/lacmus
27 | WORKDIR /opt/lacmus
28 | COPY . .
29 |
30 | RUN pip3 install --upgrade setuptools \
31 | && pip3 install opencv-python \
32 | && pip3 install git+https://github.com/lacmus-foundation/keras-resnet.git \
33 | && pip3 install . \
34 | && python3 setup.py build_ext --inplace
35 |
36 | ENTRYPOINT ["bash"]
--------------------------------------------------------------------------------
/tests/test_losses.py:
--------------------------------------------------------------------------------
1 | import keras_retinanet.losses
2 | from tensorflow import keras
3 |
4 | import numpy as np
5 |
6 | import pytest
7 |
8 |
9 | def test_smooth_l1():
10 | regression = np.array([
11 | [
12 | [0, 0, 0, 0],
13 | [0, 0, 0, 0],
14 | [0, 0, 0, 0],
15 | [0, 0, 0, 0],
16 | ]
17 | ], dtype=keras.backend.floatx())
18 | regression = keras.backend.variable(regression)
19 |
20 | regression_target = np.array([
21 | [
22 | [0, 0, 0, 1, 1],
23 | [0, 0, 1, 0, 1],
24 | [0, 0, 0.05, 0, 1],
25 | [0, 0, 1, 0, 0],
26 | ]
27 | ], dtype=keras.backend.floatx())
28 | regression_target = keras.backend.variable(regression_target)
29 |
30 | loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
31 | loss = keras.backend.eval(loss)
32 |
33 | assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
34 |
--------------------------------------------------------------------------------
/gpu.dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.4.2-gpu
2 |
3 | # install debian packages
4 | ENV DEBIAN_FRONTEND noninteractive
5 | RUN apt-get update -qq \
6 | && apt-get install --no-install-recommends -y \
7 | # install essentials
8 | build-essential \
9 | wget \
10 | git \
11 | g++ \
12 | cython \
13 | ffmpeg \
14 | libsm6 \
15 | libxext6 \
16 | # requirements for numpy
17 | libopenblas-base \
18 | python3-numpy \
19 | python3-scipy \
20 | # requirements for keras
21 | python3-h5py \
22 | python3-yaml \
23 | python3-pydot \
24 | && apt-get clean \
25 | && rm -rf /var/lib/apt/lists/*
26 |
27 | RUN mkdir /opt/lacmus
28 | WORKDIR /opt/lacmus
29 | COPY . .
30 |
31 | RUN pip3 install --upgrade setuptools \
32 | && pip3 install opencv-python \
33 | && pip3 install git+https://github.com/lacmus-foundation/keras-resnet.git \
34 | && pip3 install . \
35 | && python3 setup.py build_ext --inplace
36 |
37 | ENTRYPOINT ["bash"]
--------------------------------------------------------------------------------
/data_utils/bboxCropper/README.md:
--------------------------------------------------------------------------------
1 | ### Cropping tool for dataset images
2 | Before running **bboxCropper.py**: update **config.cfg** with your data.
3 |
4 |
5 | #### Inputs
6 | Dataset, located at **DATASET_PATH**.
7 |
8 |
9 | #### Outputs
10 | - creates folders **CROPS_FOLDER_NAME**, **FRAMES_FOLDER_NAME**, **MASKS_FOLDER_NAME** in dataset folder.
11 | - crops square of **CROP_SIZE*****CROP_SIZE** around bbox center on the image, with random shift, and saves it to **CROPS_FOLDER_NAME** folder.
12 | - cuts out bbox content from this crop, saves it to **FRAMES_FOLDER_NAME** folder.
13 | - creates binary mask of the size of the crop, with True pixels under bbox and all other zeros, saves it to **MASKS_FOLDER_NAME** folder.
14 |
15 | #### About masks
16 | - by default, mask pixels are True in places where image inpainting required, others are False. For mask inversion: set INVERT_MASKS in cfg file.
17 |
18 | 
19 |
--------------------------------------------------------------------------------
/keras_retinanet/utils/model.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 |
18 | def freeze(model):
19 | """ Set all layers in a model to non-trainable.
20 |
21 | The weights for these layers will not be updated during training.
22 |
23 | This function modifies the given model in-place,
24 | but it also returns the modified model to allow easy chaining with other functions.
25 | """
26 | for layer in model.layers:
27 | layer.trainable = False
28 | return model
29 |
--------------------------------------------------------------------------------
/keras_retinanet/initializers.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | from tensorflow import keras
18 |
19 | import math
20 |
21 |
22 | class PriorProbability(keras.initializers.Initializer):
23 | """ Apply a prior probability to the weights.
24 | """
25 |
26 | def __init__(self, probability=0.01):
27 | self.probability = probability
28 |
29 | def get_config(self):
30 | return {
31 | 'probability': self.probability
32 | }
33 |
34 | def __call__(self, shape, dtype=None):
35 | # set bias to -log((1 - p)/p) for foreground
36 | result = keras.backend.ones(shape, dtype=dtype) * -math.log((1 - self.probability) / self.probability)
37 |
38 | return result
39 |
--------------------------------------------------------------------------------
/docs/imgs/skhemes/RescuerLaAppSkheme-v1.drawio:
--------------------------------------------------------------------------------
1 | 7VnLcpswFP0aL5sxCIy9jB9p2jpNOl40WSoggxoZeYSIcb++FyMMWI4hHb/iduORjh6Wzr3nXkm00GCWfBZ4Htxxj7CW2faSFhq2TNOxe/CbAssMsEwF+IJ6GWQUwIT+JgpsKzSmHokqHSXnTNJ5FXR5GBJXVjAsBF9Uu005q/7rHPtEAyYuZjr6k3oyyNCu6RT4LaF+kP+z0VH7m+G8s9pJFGCPL0oQGrXQQHAus9IsGRCWcpfzko27eaN1vTBBQtlkwA8jGQT36Mft129hbxS/Pny3x5+sbJZXzGK1YbVYucwZEDwOPZJO0m6h/iKgkkzm2E1bF2BywAI5Y1AzoKimI0KS5M11Guvdg9cQPiNSLKFLPqCnCFMeg1R1UdBvOYr+oEx9R3XEyuT+euqCFSgoYt5BktHVWDI0mogHfqOqXMiA+zzEbFSg/SqRRZ8x53NF3y8i5VKJAMeSV8klCZWPpfJTOtWVrWrDRM28qizzSggMPJYrpVFptRi2quXjsv2lm9ptRuCAx8IlO+izlWyx8Inc0a+z3S0EYVjS1+o69m5ju14IIOB5WnSXjIIhRb0anjOLj5/XAHZf/JUf3McSZiH7k806YuaBVpdNLpCyaroHE01PY1Sn9L9odoqhgWgM65Sq6dSrpkYiOJpnaXtKk9TM+5ACMu06KXSPKQXnLFmyndqAcVSW8vh1zmeRdQg94WHE0GiaEAG71MiCbcsqI5EU/IUMOOOQvIYhT/NPf0oZ24Awo36YZjqgaZXnUhIpHIqvVcOMet4qPG8zQdVIB7CCoRvB2eKp5qFMYCLNBNYpU1uRzp5KLXWp7cq0K9mtJrdFkIrkdXqjSh2D4Siibg7fULZeU+jlnZQzAaLa95wiDbNpjmxvd6/j5Mh8mSVvGWKJJ7Bksz3sX65sN0+kRp6Wa46k6GChU9ftmGAR0tAH9I57MZBzseZAyD4vc5h6wkenDKMf7IZgWA3Dn3PS6Nfghem8L9ZW57wu1jmBJUbtD3j6+PvDhwqv5ZPHvqXV9MnKPOnt29AfrR4EzIzB/70soxEWXfgpA+R5tZnYminUOphC9Sta5wMq1HGsanJrt60ana5qD0RQIDL1jffcHPap38avZ6e9GejPZ1/CKYH9wu4u/0Bqd8/sfmC+62uYcZQXKMtGFZJMRycph45Dkv6uP+bgQwD9S85rbbyeom2fKffzKAXV4ivxqq30qR2N/gA=
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 |
2 | # Version for docker images from git
3 | RLA_VERSION := $(shell git describe --abbrev=0 --tags)
4 |
5 | # Build both docker images
6 | .PHONY: build-all
7 | build-all: build build-gpu
8 |
9 |
10 | # Build docker image. Application using GPU (nvidia docker needed)
11 | .PHONY: build-gpu
12 | build-gpu:
13 | docker build --file Dockerfile.gpu -t lizaalert/lacmus:$(RLA_VERSION)-gpu .
14 |
15 | # Build docker image. Application using CPU
16 | .PHONY: build
17 | build:
18 | docker build -t lizaalert/lacmus:$(RLA_VERSION) .
19 |
20 | # Build and run docker image. Application using CPU
21 | .PHONY: run
22 | run: build
23 | docker run --rm \
24 | -v /tmp/.X11-unix:/tmp/.X11-unix \
25 | -e DISPLAY=unix$(DISPLAY) \
26 | --workdir=$(pwd) \
27 | --volume="/home/$(USER):/home/$(USER)" \
28 | --volume="/etc/group:/etc/group:ro" \
29 | --volume="/etc/passwd:/etc/passwd:ro" \
30 | --volume="/etc/shadow:/etc/shadow:ro" \
31 | --volume="/etc/sudoers.d:/etc/sudoers.d:ro" \
32 | lizaalert/lacmus:$(RLA_VERSION)
33 |
34 | # Build and run docker image. Application using GPU
35 | run-gpu: build-gpu
36 | docker run --rm \
37 | --runtime=nvidia \
38 | -v /tmp/.X11-unix:/tmp/.X11-unix \
39 | -e DISPLAY=unix$(DISPLAY) \
40 | --workdir=$(pwd) \
41 | --volume="/home/$(USER):/home/$(USER)" \
42 | --volume="/etc/group:/etc/group:ro" \
43 | --volume="/etc/passwd:/etc/passwd:ro" \
44 | --volume="/etc/shadow:/etc/shadow:ro" \
45 | --volume="/etc/sudoers.d:/etc/sudoers.d:ro" \
46 | lizaalert/lacmus:$(RLA_VERSION)-gpu
47 |
48 |
--------------------------------------------------------------------------------
/keras_retinanet/callbacks/common.py:
--------------------------------------------------------------------------------
1 | from tensorflow import keras
2 |
3 |
4 | class RedirectModel(keras.callbacks.Callback):
5 | """Callback which wraps another callback, but executed on a different model.
6 |
7 | ```python
8 | model = keras.models.load_model('model.h5')
9 | model_checkpoint = ModelCheckpoint(filepath='snapshot.h5')
10 | parallel_model = multi_gpu_model(model, gpus=2)
11 | parallel_model.fit(X_train, Y_train, callbacks=[RedirectModel(model_checkpoint, model)])
12 | ```
13 |
14 | Args
15 | callback : callback to wrap.
16 | model : model to use when executing callbacks.
17 | """
18 |
19 | def __init__(self,
20 | callback,
21 | model):
22 | super(RedirectModel, self).__init__()
23 |
24 | self.callback = callback
25 | self.redirect_model = model
26 |
27 | def on_epoch_begin(self, epoch, logs=None):
28 | self.callback.on_epoch_begin(epoch, logs=logs)
29 |
30 | def on_epoch_end(self, epoch, logs=None):
31 | self.callback.on_epoch_end(epoch, logs=logs)
32 |
33 | def on_batch_begin(self, batch, logs=None):
34 | self.callback.on_batch_begin(batch, logs=logs)
35 |
36 | def on_batch_end(self, batch, logs=None):
37 | self.callback.on_batch_end(batch, logs=logs)
38 |
39 | def on_train_begin(self, logs=None):
40 | # overwrite the model with our custom model
41 | self.callback.set_model(self.redirect_model)
42 |
43 | self.callback.on_train_begin(logs=logs)
44 |
45 | def on_train_end(self, logs=None):
46 | self.callback.on_train_end(logs=logs)
47 |
--------------------------------------------------------------------------------
/data_utils/LaddGenerator.sln:
--------------------------------------------------------------------------------
1 |
2 | Microsoft Visual Studio Solution File, Format Version 12.00
3 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LaddGenerator", ".\LaddGenerator\LaddGenerator.csproj", "{D452A77C-D639-4376-8372-BA5B8AC67CF4}"
4 | EndProject
5 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LaddValidator", ".\LaddValidator\LaddValidator.csproj", "{C9A6B61C-8F39-4A32-BDA0-6C248153448E}"
6 | EndProject
7 | Global
8 | GlobalSection(SolutionConfigurationPlatforms) = preSolution
9 | Debug|Any CPU = Debug|Any CPU
10 | Release|Any CPU = Release|Any CPU
11 | EndGlobalSection
12 | GlobalSection(ProjectConfigurationPlatforms) = postSolution
13 | {D452A77C-D639-4376-8372-BA5B8AC67CF4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
14 | {D452A77C-D639-4376-8372-BA5B8AC67CF4}.Debug|Any CPU.Build.0 = Debug|Any CPU
15 | {D452A77C-D639-4376-8372-BA5B8AC67CF4}.Release|Any CPU.ActiveCfg = Release|Any CPU
16 | {D452A77C-D639-4376-8372-BA5B8AC67CF4}.Release|Any CPU.Build.0 = Release|Any CPU
17 | {2C83E9CE-CD86-4B8F-AC3A-60CB7ACB9F07}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
18 | {2C83E9CE-CD86-4B8F-AC3A-60CB7ACB9F07}.Debug|Any CPU.Build.0 = Debug|Any CPU
19 | {2C83E9CE-CD86-4B8F-AC3A-60CB7ACB9F07}.Release|Any CPU.ActiveCfg = Release|Any CPU
20 | {2C83E9CE-CD86-4B8F-AC3A-60CB7ACB9F07}.Release|Any CPU.Build.0 = Release|Any CPU
21 | {C9A6B61C-8F39-4A32-BDA0-6C248153448E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
22 | {C9A6B61C-8F39-4A32-BDA0-6C248153448E}.Debug|Any CPU.Build.0 = Debug|Any CPU
23 | {C9A6B61C-8F39-4A32-BDA0-6C248153448E}.Release|Any CPU.ActiveCfg = Release|Any CPU
24 | {C9A6B61C-8F39-4A32-BDA0-6C248153448E}.Release|Any CPU.Build.0 = Release|Any CPU
25 | EndGlobalSection
26 | EndGlobal
27 |
--------------------------------------------------------------------------------
/tests/models/test_densenet.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2018 vidosits (https://github.com/vidosits/)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | import warnings
18 | import pytest
19 | import numpy as np
20 | from tensorflow import keras
21 | from keras_retinanet import losses
22 | from keras_retinanet.models.densenet import DenseNetBackbone
23 |
24 | parameters = ['densenet121']
25 |
26 |
27 | @pytest.mark.parametrize("backbone", parameters)
28 | def test_backbone(backbone):
29 | # ignore warnings in this test
30 | warnings.simplefilter('ignore')
31 |
32 | num_classes = 10
33 |
34 | inputs = np.zeros((1, 200, 400, 3), dtype=np.float32)
35 | targets = [np.zeros((1, 14814, 5), dtype=np.float32), np.zeros((1, 14814, num_classes + 1))]
36 |
37 | inp = keras.layers.Input(inputs[0].shape)
38 |
39 | densenet_backbone = DenseNetBackbone(backbone)
40 | model = densenet_backbone.retinanet(num_classes=num_classes, inputs=inp)
41 | model.summary()
42 |
43 | # compile model
44 | model.compile(
45 | loss={
46 | 'regression': losses.smooth_l1(),
47 | 'classification': losses.focal()
48 | },
49 | optimizer=keras.optimizers.Adam(lr=1e-5, clipnorm=0.001))
50 |
51 | model.fit(inputs, targets, batch_size=1)
52 |
--------------------------------------------------------------------------------
/data_utils/LaddGenerator/ArgsParser.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 |
4 | namespace LaddGenerator
5 | {
6 | public class ArgsParser
7 | {
8 | private readonly Dictionary _argsKeys;
9 |
10 | public ArgsParser(Dictionary argsKeys)
11 | {
12 | _argsKeys = argsKeys;
13 | }
14 |
15 | public Dictionary Parse(string[] args)
16 | {
17 | if (args.Length == 0)
18 | {
19 | List argsList = new List();
20 | Console.Write("usage\n");
21 | foreach (var (key, value) in _argsKeys)
22 | {
23 | Console.WriteLine($"\t{key}\t{value}");
24 | }
25 | for (int i = 0; i < _argsKeys.Count; i++)
26 | {
27 | argsList.AddRange(Console.ReadLine()?.Split(' '));
28 | }
29 |
30 | args = argsList.ToArray();
31 | }
32 | else if (args.Length / 2 != _argsKeys.Count)
33 | {
34 | Console.Write("usage\n");
35 | foreach (var (key, value) in _argsKeys)
36 | {
37 | Console.WriteLine($"\t{key}\t{value}");
38 | }
39 |
40 | return null;
41 | }
42 |
43 | Dictionary result = new Dictionary();
44 | foreach (var (key, value) in _argsKeys)
45 | {
46 | for (int i = 0; i < args.Length; i += 2)
47 | {
48 | if(args[i].Contains(key))
49 | result.Add(key.Replace("--", ""), args[i+1]);
50 | }
51 | }
52 |
53 | return result;
54 | }
55 | }
56 | }
--------------------------------------------------------------------------------
/data_utils/LaddValidator/ArgsParser.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 |
4 | namespace LaddValidator
5 | {
6 | public class ArgsParser
7 | {
8 | private readonly Dictionary _argsKeys;
9 |
10 | public ArgsParser(Dictionary argsKeys)
11 | {
12 | _argsKeys = argsKeys;
13 | }
14 |
15 | public Dictionary Parse(string[] args)
16 | {
17 | if (args.Length == 0)
18 | {
19 | List argsList = new List();
20 | Console.Write("usage\n");
21 | foreach (var (key, value) in _argsKeys)
22 | {
23 | Console.WriteLine($"\t{key}\t{value}");
24 | }
25 | for (int i = 0; i < _argsKeys.Count; i++)
26 | {
27 | argsList.AddRange(Console.ReadLine()?.Split(' '));
28 | }
29 |
30 | args = argsList.ToArray();
31 | }
32 | else if (args.Length / 2 != _argsKeys.Count)
33 | {
34 | Console.Write("usage\n");
35 | foreach (var (key, value) in _argsKeys)
36 | {
37 | Console.WriteLine($"\t{key}\t{value}");
38 | }
39 |
40 | return null;
41 | }
42 |
43 | Dictionary result = new Dictionary();
44 | foreach (var (key, value) in _argsKeys)
45 | {
46 | for (int i = 0; i < args.Length; i += 2)
47 | {
48 | if(args[i].Contains(key))
49 | result.Add(key.Replace("--", ""), args[i+1]);
50 | }
51 | }
52 |
53 | return result;
54 | }
55 | }
56 | }
--------------------------------------------------------------------------------
/keras_retinanet/utils/compute_overlap.pyx:
--------------------------------------------------------------------------------
1 | # --------------------------------------------------------
2 | # Fast R-CNN
3 | # Copyright (c) 2015 Microsoft
4 | # Licensed under The MIT License [see LICENSE for details]
5 | # Written by Sergey Karayev
6 | # --------------------------------------------------------
7 |
8 | cimport cython
9 | import numpy as np
10 | cimport numpy as np
11 |
12 |
13 | def compute_overlap(
14 | np.ndarray[double, ndim=2] boxes,
15 | np.ndarray[double, ndim=2] query_boxes
16 | ):
17 | """
18 | Args
19 | a: (N, 4) ndarray of float
20 | b: (K, 4) ndarray of float
21 |
22 | Returns
23 | overlaps: (N, K) ndarray of overlap between boxes and query_boxes
24 | """
25 | cdef unsigned int N = boxes.shape[0]
26 | cdef unsigned int K = query_boxes.shape[0]
27 | cdef np.ndarray[double, ndim=2] overlaps = np.zeros((N, K), dtype=np.float64)
28 | cdef double iw, ih, box_area
29 | cdef double ua
30 | cdef unsigned int k, n
31 | for k in range(K):
32 | box_area = (
33 | (query_boxes[k, 2] - query_boxes[k, 0]) *
34 | (query_boxes[k, 3] - query_boxes[k, 1])
35 | )
36 | for n in range(N):
37 | iw = (
38 | min(boxes[n, 2], query_boxes[k, 2]) -
39 | max(boxes[n, 0], query_boxes[k, 0])
40 | )
41 | if iw > 0:
42 | ih = (
43 | min(boxes[n, 3], query_boxes[k, 3]) -
44 | max(boxes[n, 1], query_boxes[k, 1])
45 | )
46 | if ih > 0:
47 | ua = np.float64(
48 | (boxes[n, 2] - boxes[n, 0]) *
49 | (boxes[n, 3] - boxes[n, 1]) +
50 | box_area - iw * ih
51 | )
52 | overlaps[n, k] = iw * ih / ua
53 | return overlaps
54 |
--------------------------------------------------------------------------------
/keras_retinanet/utils/gpu.py:
--------------------------------------------------------------------------------
1 | '''
2 | https://github.com/lacmus-foundation/lacmus
3 | Copyright (C) 2019-2020 lacmus-foundation
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU General Public License as published by
7 | the Free Software Foundation, either version 3 of the License, or
8 | (at your option) any later version.
9 |
10 | This program is distributed in the hope that it will be useful,
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | GNU General Public License for more details.
14 |
15 | You should have received a copy of the GNU General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | import tensorflow as tf
20 |
21 |
22 | def setup_gpu(gpu_id):
23 | try:
24 | visible_gpu_indices = [int(id) for id in gpu_id.split(',')]
25 | available_gpus = tf.config.list_physical_devices('GPU')
26 | visible_gpus = [gpu for idx, gpu in enumerate(available_gpus) if idx in visible_gpu_indices]
27 |
28 | if visible_gpus:
29 | try:
30 | # Currently, memory growth needs to be the same across GPUs.
31 | for gpu in available_gpus:
32 | tf.config.experimental.set_memory_growth(gpu, True)
33 |
34 | # Use only the selcted gpu.
35 | tf.config.set_visible_devices(visible_gpus, 'GPU')
36 | except RuntimeError as e:
37 | # Visible devices must be set before GPUs have been initialized.
38 | print(e)
39 |
40 | logical_gpus = tf.config.list_logical_devices('GPU')
41 | print(len(available_gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
42 | else:
43 | tf.config.set_visible_devices([], 'GPU')
44 | except ValueError:
45 | tf.config.set_visible_devices([], 'GPU')
46 |
--------------------------------------------------------------------------------
/data_utils/ImgGenerator/README.md:
--------------------------------------------------------------------------------
1 | ### Tool for generation of new dataset images
2 |
3 | #### How it works
4 | - cuts all target images from existing dataset;
5 | - transforms crops (at the moment only random rotation is implemented);
6 | - puts crops on new backgrounds;
7 | - generates Pascal VOC-style annotations.
8 |
9 | #### Inputs
10 |
11 | Before running: update **config.cfg** with your data.
12 |
13 | Folders:
14 | - **DATASET_PATH** - location of existing dataset;
15 | - **BACKGROUNDS_FOLDER_NAME** - name of folder with new backgroungs, the folder shall be located in a folder with existing dataset;
16 | - **AUGMENTED_FOLDER_NAME** - name of folder for outputs, the folder also shall be located in a folder with existing dataset;
17 |
18 | Cropping details:
19 |
20 | Targets being cropped with some padding and pixels for smooth transition.
21 |
22 | Example: if the
23 | - target image is 50x50 pixels,
24 | - padding is 10% of image W and H,
25 | - transition area is 25 pixels at each side of image,
26 |
27 | then crop will have a size:
28 | **H = W = (50 + 50x0,1 + 50x0,1 + 25 + 25) = 110 px**
29 |
30 | Related variables:
31 | - **PADDING_WIDTH** - **percentage** of target image H and W to pad,
32 | - **INPAINT_PIXELS_WIDTH** - width of smooth transition area.
33 |
34 |
35 |
36 | #### Outputs
37 |
38 | All outputs are located in **AUGMENTED_FOLDER_NAME**:
39 |
40 | - **JPEGImages** - folder with resulting images,
41 | - **Annotations** - folder with xml annotations files,
42 | - **Targets** - crops of targets.
43 |
44 |
45 |
46 |
47 |
48 |
49 |
--------------------------------------------------------------------------------
/tests/models/test_mobilenet.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 lvaleriu (https://github.com/lvaleriu/)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | import warnings
18 | import pytest
19 | import numpy as np
20 | from tensorflow import keras
21 | from keras_retinanet import losses
22 | from keras_retinanet.models.mobilenet import MobileNetBackbone
23 |
24 |
25 | alphas = ['1.0']
26 | parameters = []
27 |
28 | for backbone in MobileNetBackbone.allowed_backbones:
29 | for alpha in alphas:
30 | parameters.append((backbone, alpha))
31 |
32 |
33 | @pytest.mark.parametrize("backbone, alpha", parameters)
34 | def test_backbone(backbone, alpha):
35 | # ignore warnings in this test
36 | warnings.simplefilter('ignore')
37 |
38 | num_classes = 10
39 |
40 | inputs = np.zeros((1, 1024, 363, 3), dtype=np.float32)
41 | targets = [np.zeros((1, 68760, 5), dtype=np.float32), np.zeros((1, 68760, num_classes + 1))]
42 |
43 | inp = keras.layers.Input(inputs[0].shape)
44 |
45 | mobilenet_backbone = MobileNetBackbone(backbone='{}_{}'.format(backbone, format(alpha)))
46 | training_model = mobilenet_backbone.retinanet(num_classes=num_classes, inputs=inp)
47 | training_model.summary()
48 |
49 | # compile model
50 | training_model.compile(
51 | loss={
52 | 'regression': losses.smooth_l1(),
53 | 'classification': losses.focal()
54 | },
55 | optimizer=keras.optimizers.Adam(lr=1e-5, clipnorm=0.001))
56 |
57 | training_model.fit(inputs, targets, batch_size=1)
58 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # lacmus
2 |
3 | 
4 |
5 | The program for searching through photos from the air of lost people in the forest using Retina Net neural nwtwork.
6 |
7 | The project is being developed by a non-profit organization Liza Alert.
8 |
9 | ## Demonstration
10 |
11 | 
12 |
13 | *Picture 1*
14 |
15 | 
16 |
17 | *Picture 2*
18 |
19 | [](http://www.youtube.com/watch?v=9pVtPM4bzww)
20 |
21 | *Video 1*
22 |
23 | See [more examples](docs/work-demo.md).
24 |
25 | ## Training data
26 |
27 | You can download `Lacmus Drone Dataset (LaDD)` from mail.ru cloud
28 |
29 | - https://cloud.mail.ru/public/2k53/2bJVwYSa7
30 |
31 | You also can download Lacmus version of `Stenford Drone Dataset (SDD)` from mail.ru cloud
32 |
33 | - https://cloud.mail.ru/public/4GKW/3FW26Sq77
34 |
35 |
36 | ## Usage
37 |
38 | Read more about training steps and atraining data at [train documentation](docs/train-usage.md) to learn how to train the model.
39 |
40 | ## Pretrained models
41 |
42 | The models are avalable [here](https://github.com/lizaalert/lacmus/releases/tag/0.1.1).
43 |
44 | ## Partners
45 |
46 | [![ODS][logoODS]](https://ods.ai) [![DTL][logoDTL]](http://immersiya.com/about) [![JB][logoJB]](https://www.jetbrains.com/) [![GitBook][logoGitBook]](https://www.gitbook.com/)
47 | [![Liza alert][logoLA]](https://lizaalert.org/) [![Novaya Gazeta][logoNovayaGazeta]](https://novayagazeta.ru/) [![Teplica][logoTeplica]](https://te-st.ru/)
48 |
49 | [logoDTL]: docs/imgs/partners/dtl-logo-200px.png "DTL"
50 |
51 | [logoODS]: docs/imgs/partners/ods-logo-200px.png "ODS"
52 |
53 | [logoLA]: docs/imgs/partners/lizaalert-logo-128px.png "Liza alert"
54 |
55 | [logoNovayaGazeta]: docs/imgs/partners/novaya-gazeta-logo-128px.png "Novaya Gazeta"
56 |
57 | [logoTeplica]: docs/imgs/partners/teplica-logo-128px.png "Teplica"
58 |
59 | [logoJB]: docs/imgs/partners/jetbrains_logo_200px.png "JetBrains"
60 |
61 | [logoGitBook]: docs/imgs/partners/gitbook-logo-200px.png "JetBrains"
--------------------------------------------------------------------------------
/keras2tf_2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import argparse
5 | import numpy as np
6 | import tensorflow as tf
7 | from tensorflow import keras
8 | from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
9 |
10 | from keras_retinanet import models
11 |
12 |
13 | def parse_args(args):
14 | parser = argparse.ArgumentParser(description='convert keras_retinanet model to tensorflow frozen graph')
15 | parser.add_argument(
16 | '--input',
17 | help='path to h5 keras inference model',
18 | type=str,
19 | required=True
20 | )
21 | parser.add_argument(
22 | '--backbone',
23 | help='backbone name',
24 | type=str,
25 | required=False,
26 | default='resnet50'
27 | )
28 | return parser.parse_args(args)
29 |
30 | def main(args=None):
31 | args = parse_args(args)
32 | weights_name = args.input
33 | backbone = args.backbone
34 |
35 | dirname = os.path.dirname(weights_name)
36 | basename = os.path.basename(weights_name)
37 | fn, ext = os.path.splitext(basename)
38 |
39 | model = models.load_model(weights_name, backbone_name=backbone)
40 |
41 | # Convert Keras model to ConcreteFunction
42 | full_model = tf.function(lambda input_1: model(input_1))
43 | full_model = full_model.get_concrete_function(
44 | tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype))
45 |
46 | # Get frozen ConcreteFunction
47 | frozen_func = convert_variables_to_constants_v2(full_model)
48 | frozen_func.graph.as_graph_def()
49 |
50 | layers = [op.name for op in frozen_func.graph.get_operations()]
51 |
52 | print("Frozen model inputs: ")
53 | print(frozen_func.inputs)
54 | print("Frozen model outputs: ")
55 | print(frozen_func.outputs)
56 |
57 | # Save frozen graph to disk
58 | tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
59 | logdir=dirname,
60 | name=f"{fn}.pb",
61 | as_text=False)
62 | print(f'weights saved: {dirname}')
63 |
64 | if __name__ == '__main__':
65 | main()
--------------------------------------------------------------------------------
/keras_retinanet/utils/tf_version.py:
--------------------------------------------------------------------------------
1 | '''
2 | https://github.com/lacmus-foundation/lacmus
3 | Copyright (C) 2019-2020 lacmus-foundation
4 |
5 | This program is free software: you can redistribute it and/or modify
6 | it under the terms of the GNU General Public License as published by
7 | the Free Software Foundation, either version 3 of the License, or
8 | (at your option) any later version.
9 |
10 | This program is distributed in the hope that it will be useful,
11 | but WITHOUT ANY WARRANTY; without even the implied warranty of
12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 | GNU General Public License for more details.
14 |
15 | You should have received a copy of the GNU General Public License
16 | along with this program. If not, see .
17 | '''
18 |
19 | from __future__ import print_function
20 |
21 | import tensorflow as tf
22 | import sys
23 |
24 | MINIMUM_TF_VERSION = 2, 3, 0
25 | BLACKLISTED_TF_VERSIONS = []
26 |
27 |
28 | def tf_version():
29 | """ Get the Tensorflow version.
30 | Returns
31 | tuple of (major, minor, patch).
32 | """
33 | return tuple(map(int, tf.version.VERSION.split('-')[0].split('.')))
34 |
35 |
36 | def tf_version_ok(minimum_tf_version=MINIMUM_TF_VERSION, blacklisted=BLACKLISTED_TF_VERSIONS):
37 | """ Check if the current Tensorflow version is higher than the minimum version.
38 | """
39 | return tf_version() >= minimum_tf_version and tf_version() not in blacklisted
40 |
41 |
42 | def assert_tf_version(minimum_tf_version=MINIMUM_TF_VERSION, blacklisted=BLACKLISTED_TF_VERSIONS):
43 | """ Assert that the Tensorflow version is up to date.
44 | """
45 | detected = tf.version.VERSION
46 | required = '.'.join(map(str, minimum_tf_version))
47 | assert(tf_version_ok(minimum_tf_version, blacklisted)), 'You are using tensorflow version {}. The minimum required version is {} (blacklisted: {}).'.format(detected, required, blacklisted)
48 |
49 |
50 | def check_tf_version():
51 | """ Check that the Tensorflow version is up to date. If it isn't, print an error message and exit the script.
52 | """
53 | try:
54 | assert_tf_version()
55 | except AssertionError as e:
56 | print(e, file=sys.stderr)
57 | sys.exit(1)
58 |
--------------------------------------------------------------------------------
/tests/bin/test_train.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | import keras_retinanet.backend
18 | import keras_retinanet.bin.train
19 | from tensorflow import keras
20 |
21 | import warnings
22 |
23 | import pytest
24 |
25 |
26 | @pytest.fixture(autouse=True)
27 | def clear_session():
28 | # run before test (do nothing)
29 | yield
30 | # run after test, clear keras session
31 | keras.backend.clear_session()
32 |
33 |
34 | def test_coco():
35 | # ignore warnings in this test
36 | warnings.simplefilter('ignore')
37 |
38 | # run training / evaluation
39 | keras_retinanet.bin.train.main([
40 | '--epochs=1',
41 | '--steps=1',
42 | '--no-weights',
43 | '--no-snapshots',
44 | 'coco',
45 | 'tests/test-data/coco',
46 | ])
47 |
48 |
49 | def test_pascal():
50 | # ignore warnings in this test
51 | warnings.simplefilter('ignore')
52 |
53 | # run training / evaluation
54 | keras_retinanet.bin.train.main([
55 | '--epochs=1',
56 | '--steps=1',
57 | '--no-weights',
58 | '--no-snapshots',
59 | 'pascal',
60 | 'tests/test-data/pascal',
61 | ])
62 |
63 |
64 | def test_csv():
65 | # ignore warnings in this test
66 | warnings.simplefilter('ignore')
67 |
68 | # run training / evaluation
69 | keras_retinanet.bin.train.main([
70 | '--epochs=1',
71 | '--steps=1',
72 | '--no-weights',
73 | '--no-snapshots',
74 | 'csv',
75 | 'tests/test-data/csv/annotations.csv',
76 | 'tests/test-data/csv/classes.csv',
77 | ])
78 |
79 |
80 | def test_vgg():
81 | # ignore warnings in this test
82 | warnings.simplefilter('ignore')
83 |
84 | # run training / evaluation
85 | keras_retinanet.bin.train.main([
86 | '--backbone=vgg16',
87 | '--epochs=1',
88 | '--steps=1',
89 | '--no-weights',
90 | '--no-snapshots',
91 | '--freeze-backbone',
92 | 'coco',
93 | 'tests/test-data/coco',
94 | ])
95 |
--------------------------------------------------------------------------------
/data_utils/README.md:
--------------------------------------------------------------------------------
1 | # Lacmus Drone Dataset (LADD)
2 |
3 | LADD is a dataset of drone created images for pedestrian detection. LADD annotations are into VOC format.
4 |
5 | You can [download the LADD](https://cloud.mail.ru/public/2k53/2bJVwYSa7) from Mail.Cloud directly.
6 |
7 | #### Overview of dataset
8 |
9 | * You can see a example of the labeled image.
10 |
11 | We have just one kind of label :
12 |
13 | * 0 - Pedestrian
14 |
15 | 
16 |
17 |
18 | * The structure of the `LADD_VERSION_SIZON`
19 |
20 | ```
21 | ├── LADD
22 | │ ├── Annotations
23 | │ │ └── X.xml (419 items)
24 | │ ├── examples
25 | │ │ └── X.jpg (10 items)
26 | │ ├── ImageSets
27 | │ │ └── Main
28 | # *.txt which split the dataset
29 | │ │ └── test.txt
30 | │ │ └── train.txt
31 | │ │ └── trainval.txt
32 | │ │ └── val.txt
33 | │ └── JPEGImages
34 | │ └── X.jpg (419 items)
35 |
36 | ```
37 |
38 | * The `JPEGImages`:
39 |
40 | * **Image Type** : *jpeg(JPEG)*
41 | * **Width** x **Height** : *4000 x 3000*
42 |
43 | * The `Annotations` : The VOC format `.xml` for Object Detection, automatically generate by the label tools. Below is an example of `.xml` file.
44 |
45 | ```xml
46 |
47 | VocGalsTfl
48 | 0
49 |
50 | Unknown
51 |
52 |
53 | 4000
54 | 3000
55 | 3
56 |
57 | 0
58 |
70 |
73 |
74 | ```
75 |
76 | The dataset is divided into 3 seasons: **summer**, **spring** and **summer**. All files are stored in archives `LADD_VERSION_SIZON_NUMBER` and are numb. You can independently collect your dataset. To do this, simply merge the corresponding folders and contents of text files into one.
77 |
78 | You can also use our official tools.
79 |
80 | ## License
81 |
82 | LADD is licensed under GNU General Public License v3.0. You can read the license text [here](https://github.com/lizaalert/lacmus/blob/master/LICENSE).
83 |
84 | This license applies not only to the dataset, but also to ALL SOFTWARE products that use it to one degree or another.
85 |
86 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import setuptools
2 | from setuptools.extension import Extension
3 | from distutils.command.build_ext import build_ext as DistUtilsBuildExt
4 |
5 |
6 | class BuildExtension(setuptools.Command):
7 | description = DistUtilsBuildExt.description
8 | user_options = DistUtilsBuildExt.user_options
9 | boolean_options = DistUtilsBuildExt.boolean_options
10 | help_options = DistUtilsBuildExt.help_options
11 |
12 | def __init__(self, *args, **kwargs):
13 | from setuptools.command.build_ext import build_ext as SetupToolsBuildExt
14 |
15 | # Bypass __setatrr__ to avoid infinite recursion.
16 | self.__dict__['_command'] = SetupToolsBuildExt(*args, **kwargs)
17 |
18 | def __getattr__(self, name):
19 | return getattr(self._command, name)
20 |
21 | def __setattr__(self, name, value):
22 | setattr(self._command, name, value)
23 |
24 | def initialize_options(self, *args, **kwargs):
25 | return self._command.initialize_options(*args, **kwargs)
26 |
27 | def finalize_options(self, *args, **kwargs):
28 | ret = self._command.finalize_options(*args, **kwargs)
29 | import numpy
30 | self.include_dirs.append(numpy.get_include())
31 | return ret
32 |
33 | def run(self, *args, **kwargs):
34 | return self._command.run(*args, **kwargs)
35 |
36 |
37 | extensions = [
38 | Extension(
39 | 'keras_retinanet.utils.compute_overlap',
40 | ['keras_retinanet/utils/compute_overlap.pyx']
41 | ),
42 | ]
43 |
44 |
45 | setuptools.setup(
46 | name = 'keras-retinanet',
47 | version = '2.5.0',
48 | description = 'Keras implementation of RetinaNet object detection.',
49 | url = 'https://github.com/fizyr/keras-retinanet',
50 | author = 'Hans Gaiser',
51 | author_email = 'h.gaiser@fizyr.com',
52 | maintainer = 'Georgy Perevozchikov',
53 | maintainer_email = 'gosha20777@live.ru',
54 | cmdclass = {'build_ext': BuildExtension},
55 | packages = setuptools.find_packages(),
56 | install_requires = ['keras-resnet==0.2.1', 'efficientnet', 'image-classifiers', 'six', 'numpy', 'cython', 'Pillow', 'opencv-python', 'progressbar2'],
57 | entry_points = {
58 | 'console_scripts': [
59 | 'retinanet-train=keras_retinanet.bin.train:main',
60 | 'retinanet-evaluate=keras_retinanet.bin.evaluate:main',
61 | 'retinanet-debug=keras_retinanet.bin.debug:main',
62 | 'retinanet-convert-model=keras_retinanet.bin.convert_model:main',
63 | ],
64 | },
65 | ext_modules = extensions,
66 | setup_requires = ["cython>=0.28", "numpy>=1.14.0"]
67 | )
68 |
--------------------------------------------------------------------------------
/data_utils/LaddValidator/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 | using System.Linq;
5 |
6 | namespace LaddValidator
7 | {
8 | class Program
9 | {
10 | private static readonly Dictionary _argsKeys = new Dictionary()
11 | {
12 | {"--src", "source path"},
13 | {"--val_file", "destination path"},
14 | };
15 |
16 | static void Main(string[] args)
17 | {
18 | var parser = new ArgsParser(_argsKeys);
19 | var parsedArgs = parser.Parse(args);
20 | if (parsedArgs == null)
21 | {
22 | return;
23 | }
24 | var splitPatch = parsedArgs["src"] + "ImageSets/Main/";
25 | var valFilePatch = parsedArgs["val_file"];
26 | if (!Directory.Exists(splitPatch))
27 | {
28 | Console.Write("unable to open: " + splitPatch);
29 | return;
30 | }
31 | if (!File.Exists(valFilePatch))
32 | {
33 | Console.Write("unable to open: " + valFilePatch);
34 | return;
35 | }
36 |
37 | var valLines = File.ReadLines(valFilePatch).ToList();
38 | var trainLines = File.ReadLines(splitPatch + "train.txt").ToList();
39 | var testLines = File.ReadLines(splitPatch + "test.txt").ToList();
40 | trainLines.AddRange(testLines);
41 |
42 | for (int i = 0; i < trainLines.Count; i++)
43 | {
44 | for (int j = 0; j < valLines.Count; j++)
45 | {
46 | if (valLines[j] == trainLines[i])
47 | {
48 | trainLines.RemoveAt(i);
49 | Console.WriteLine($"{valLines[j]} moved to val set");
50 | if(i>0)
51 | i--;
52 | }
53 | }
54 | }
55 | Shuffle(valLines);
56 | File.WriteAllLines(splitPatch+"train.txt", trainLines);
57 | File.WriteAllLines(splitPatch+"trainval.txt", trainLines);
58 | File.WriteAllLines(splitPatch+"test.txt", valLines);
59 | File.WriteAllLines(splitPatch+"val.txt", valLines);
60 | }
61 |
62 | private static void Shuffle(IList list)
63 | {
64 | Random rng = new Random();
65 | int n = list.Count;
66 | while (n > 1) {
67 | n--;
68 | int k = rng.Next(n + 1);
69 | T value = list[k];
70 | list[k] = list[n];
71 | list[n] = value;
72 | }
73 | }
74 | }
75 | }
--------------------------------------------------------------------------------
/keras_retinanet/models/mobilenetv3/mobilenet_v3_small.py:
--------------------------------------------------------------------------------
1 | """MobileNet v3 small models for Keras.
2 | # Reference
3 | [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244?context=cs)
4 | """
5 |
6 |
7 | from tensorflow.keras.models import Model
8 | from tensorflow.keras.layers import Input, Conv2D, GlobalAveragePooling2D, Reshape
9 | from tensorflow.keras.utils import plot_model
10 |
11 | from .mobilenet_v3_base import MobileNetBase
12 |
13 |
14 | class MobileNetV3_Small(MobileNetBase):
15 | def __init__(self, shape, n_class, alpha=1.0, include_top=True):
16 | """Init.
17 |
18 | # Arguments
19 | input_shape: An integer or tuple/list of 3 integers, shape
20 | of input tensor.
21 | n_class: Integer, number of classes.
22 | alpha: Integer, width multiplier.
23 | include_top: if inculde classification layer.
24 |
25 | # Returns
26 | MobileNetv3 model.
27 | """
28 | super(MobileNetV3_Small, self).__init__(shape, n_class, alpha)
29 | self.include_top = include_top
30 |
31 | def build(self, plot=False):
32 | """build MobileNetV3 Small.
33 |
34 | # Arguments
35 | plot: Boolean, weather to plot model.
36 |
37 | # Returns
38 | model: Model, model.
39 | """
40 |
41 | inputs = Input(shape=self.shape)
42 |
43 | x = self._conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS')
44 |
45 | x = self._bottleneck(x, 16, (3, 3), e=16, s=2, squeeze=True, nl='RE')
46 | x = self._bottleneck(x, 24, (3, 3), e=72, s=2, squeeze=False, nl='RE')
47 | x = self._bottleneck(x, 24, (3, 3), e=88, s=1, squeeze=False, nl='RE')
48 | x = self._bottleneck(x, 40, (5, 5), e=96, s=2, squeeze=True, nl='HS')
49 | x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS')
50 | x = self._bottleneck(x, 40, (5, 5), e=240, s=1, squeeze=True, nl='HS')
51 | x = self._bottleneck(x, 48, (5, 5), e=120, s=1, squeeze=True, nl='HS')
52 | x = self._bottleneck(x, 48, (5, 5), e=144, s=1, squeeze=True, nl='HS')
53 | x = self._bottleneck(x, 96, (5, 5), e=288, s=2, squeeze=True, nl='HS')
54 | x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS')
55 | x = self._bottleneck(x, 96, (5, 5), e=576, s=1, squeeze=True, nl='HS')
56 |
57 | x = self._conv_block(x, 576, (1, 1), strides=(1, 1), nl='HS')
58 | x = GlobalAveragePooling2D()(x)
59 | x = Reshape((1, 1, 576))(x)
60 |
61 | x = Conv2D(1280, (1, 1), padding='same')(x)
62 | x = self._return_activation(x, 'HS')
63 |
64 | if self.include_top:
65 | x = Conv2D(self.n_class, (1, 1), padding='same', activation='softmax')(x)
66 | x = Reshape((self.n_class,))(x)
67 |
68 | model = Model(inputs, x)
69 |
70 | if plot:
71 | plot_model(model, to_file='MobileNetv3_small.png', show_shapes=True)
72 |
73 | return model
74 |
--------------------------------------------------------------------------------
/keras_retinanet/callbacks/coco.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | from tensorflow import keras
18 | from ..utils.coco_eval import evaluate_coco
19 |
20 |
21 | class CocoEval(keras.callbacks.Callback):
22 | """ Performs COCO evaluation on each epoch.
23 | """
24 | def __init__(self, generator, tensorboard=None, threshold=0.05):
25 | """ CocoEval callback intializer.
26 |
27 | Args
28 | generator : The generator used for creating validation data.
29 | tensorboard : If given, the results will be written to tensorboard.
30 | threshold : The score threshold to use.
31 | """
32 | self.generator = generator
33 | self.threshold = threshold
34 | self.tensorboard = tensorboard
35 |
36 | super(CocoEval, self).__init__()
37 |
38 | def on_epoch_end(self, epoch, logs=None):
39 | logs = logs or {}
40 |
41 | coco_tag = ['AP @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
42 | 'AP @[ IoU=0.50 | area= all | maxDets=100 ]',
43 | 'AP @[ IoU=0.75 | area= all | maxDets=100 ]',
44 | 'AP @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
45 | 'AP @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
46 | 'AP @[ IoU=0.50:0.95 | area= large | maxDets=100 ]',
47 | 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]',
48 | 'AR @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]',
49 | 'AR @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
50 | 'AR @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
51 | 'AR @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
52 | 'AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ]']
53 | coco_eval_stats = evaluate_coco(self.generator, self.model, self.threshold)
54 |
55 | if coco_eval_stats is not None:
56 | for index, result in enumerate(coco_eval_stats):
57 | logs[coco_tag[index]] = result
58 |
59 | if self.tensorboard:
60 | import tensorflow as tf
61 | writer = tf.summary.create_file_writer(self.tensorboard.log_dir)
62 | with writer.as_default():
63 | for index, result in enumerate(coco_eval_stats):
64 | tf.summary.scalar('{}. {}'.format(index + 1, coco_tag[index]), result, step=epoch)
65 | writer.flush()
66 |
--------------------------------------------------------------------------------
/keras_retinanet/utils/colors.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 |
4 | def label_color(label):
5 | """ Return a color from a set of predefined colors. Contains 80 colors in total.
6 |
7 | Args
8 | label: The label to get the color for.
9 |
10 | Returns
11 | A list of three values representing a RGB color.
12 |
13 | If no color is defined for a certain label, the color green is returned and a warning is printed.
14 | """
15 | if label < len(colors):
16 | return colors[label]
17 | else:
18 | warnings.warn('Label {} has no color, returning default.'.format(label))
19 | return (0, 255, 0)
20 |
21 |
22 | """
23 | Generated using:
24 |
25 | ```
26 | colors = [list((matplotlib.colors.hsv_to_rgb([x, 1.0, 1.0]) * 255).astype(int)) for x in np.arange(0, 1, 1.0 / 80)]
27 | shuffle(colors)
28 | pprint(colors)
29 | ```
30 | """
31 | colors = [
32 | [31 , 0 , 255] ,
33 | [0 , 159 , 255] ,
34 | [255 , 95 , 0] ,
35 | [255 , 19 , 0] ,
36 | [255 , 0 , 0] ,
37 | [255 , 38 , 0] ,
38 | [0 , 255 , 25] ,
39 | [255 , 0 , 133] ,
40 | [255 , 172 , 0] ,
41 | [108 , 0 , 255] ,
42 | [0 , 82 , 255] ,
43 | [0 , 255 , 6] ,
44 | [255 , 0 , 152] ,
45 | [223 , 0 , 255] ,
46 | [12 , 0 , 255] ,
47 | [0 , 255 , 178] ,
48 | [108 , 255 , 0] ,
49 | [184 , 0 , 255] ,
50 | [255 , 0 , 76] ,
51 | [146 , 255 , 0] ,
52 | [51 , 0 , 255] ,
53 | [0 , 197 , 255] ,
54 | [255 , 248 , 0] ,
55 | [255 , 0 , 19] ,
56 | [255 , 0 , 38] ,
57 | [89 , 255 , 0] ,
58 | [127 , 255 , 0] ,
59 | [255 , 153 , 0] ,
60 | [0 , 255 , 255] ,
61 | [0 , 255 , 216] ,
62 | [0 , 255 , 121] ,
63 | [255 , 0 , 248] ,
64 | [70 , 0 , 255] ,
65 | [0 , 255 , 159] ,
66 | [0 , 216 , 255] ,
67 | [0 , 6 , 255] ,
68 | [0 , 63 , 255] ,
69 | [31 , 255 , 0] ,
70 | [255 , 57 , 0] ,
71 | [255 , 0 , 210] ,
72 | [0 , 255 , 102] ,
73 | [242 , 255 , 0] ,
74 | [255 , 191 , 0] ,
75 | [0 , 255 , 63] ,
76 | [255 , 0 , 95] ,
77 | [146 , 0 , 255] ,
78 | [184 , 255 , 0] ,
79 | [255 , 114 , 0] ,
80 | [0 , 255 , 235] ,
81 | [255 , 229 , 0] ,
82 | [0 , 178 , 255] ,
83 | [255 , 0 , 114] ,
84 | [255 , 0 , 57] ,
85 | [0 , 140 , 255] ,
86 | [0 , 121 , 255] ,
87 | [12 , 255 , 0] ,
88 | [255 , 210 , 0] ,
89 | [0 , 255 , 44] ,
90 | [165 , 255 , 0] ,
91 | [0 , 25 , 255] ,
92 | [0 , 255 , 140] ,
93 | [0 , 101 , 255] ,
94 | [0 , 255 , 82] ,
95 | [223 , 255 , 0] ,
96 | [242 , 0 , 255] ,
97 | [89 , 0 , 255] ,
98 | [165 , 0 , 255] ,
99 | [70 , 255 , 0] ,
100 | [255 , 0 , 172] ,
101 | [255 , 76 , 0] ,
102 | [203 , 255 , 0] ,
103 | [204 , 0 , 255] ,
104 | [255 , 0 , 229] ,
105 | [255 , 133 , 0] ,
106 | [127 , 0 , 255] ,
107 | [0 , 235 , 255] ,
108 | [0 , 255 , 197] ,
109 | [255 , 0 , 191] ,
110 | [0 , 44 , 255] ,
111 | [50 , 255 , 0]
112 | ]
113 |
--------------------------------------------------------------------------------
/keras_retinanet/models/mobilenetv3/mobilenet_v3_large.py:
--------------------------------------------------------------------------------
1 | """MobileNet v3 Large models for Keras.
2 | # Reference
3 | [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244?context=cs)
4 | """
5 |
6 |
7 | from tensorflow.keras.models import Model
8 | from tensorflow.keras.layers import Input, Conv2D, GlobalAveragePooling2D, Reshape
9 | from tensorflow.keras.utils import plot_model
10 |
11 | from .mobilenet_v3_base import MobileNetBase
12 |
13 |
14 | class MobileNetV3_Large(MobileNetBase):
15 | def __init__(self, shape, n_class, alpha=1.0, include_top=True):
16 | """Init.
17 |
18 | # Arguments
19 | input_shape: An integer or tuple/list of 3 integers, shape
20 | of input tensor.
21 | n_class: Integer, number of classes.
22 | alpha: Integer, width multiplier.
23 | include_top: if inculde classification layer.
24 |
25 | # Returns
26 | MobileNetv3 model.
27 | """
28 | super(MobileNetV3_Large, self).__init__(shape, n_class, alpha)
29 | self.include_top = include_top
30 |
31 | def build(self, plot=False):
32 | """build MobileNetV3 Large.
33 |
34 | # Arguments
35 | plot: Boolean, weather to plot model.
36 |
37 | # Returns
38 | model: Model, model.
39 | """
40 | inputs = Input(shape=self.shape)
41 |
42 | x = self._conv_block(inputs, 16, (3, 3), strides=(2, 2), nl='HS')
43 |
44 | x = self._bottleneck(x, 16, (3, 3), e=16, s=1, squeeze=False, nl='RE')
45 | x = self._bottleneck(x, 24, (3, 3), e=64, s=2, squeeze=False, nl='RE')
46 | x = self._bottleneck(x, 24, (3, 3), e=72, s=1, squeeze=False, nl='RE')
47 | x = self._bottleneck(x, 40, (5, 5), e=72, s=2, squeeze=True, nl='RE')
48 | x = self._bottleneck(x, 40, (5, 5), e=120, s=1, squeeze=True, nl='RE')
49 | x = self._bottleneck(x, 40, (5, 5), e=120, s=1, squeeze=True, nl='RE')
50 | x = self._bottleneck(x, 80, (3, 3), e=240, s=2, squeeze=False, nl='HS')
51 | x = self._bottleneck(x, 80, (3, 3), e=200, s=1, squeeze=False, nl='HS')
52 | x = self._bottleneck(x, 80, (3, 3), e=184, s=1, squeeze=False, nl='HS')
53 | x = self._bottleneck(x, 80, (3, 3), e=184, s=1, squeeze=False, nl='HS')
54 | x = self._bottleneck(x, 112, (3, 3), e=480, s=1, squeeze=True, nl='HS')
55 | x = self._bottleneck(x, 112, (3, 3), e=672, s=1, squeeze=True, nl='HS')
56 | x = self._bottleneck(x, 160, (5, 5), e=672, s=2, squeeze=True, nl='HS')
57 | x = self._bottleneck(x, 160, (5, 5), e=960, s=1, squeeze=True, nl='HS')
58 | x = self._bottleneck(x, 160, (5, 5), e=960, s=1, squeeze=True, nl='HS')
59 |
60 | x = self._conv_block(x, 960, (1, 1), strides=(1, 1), nl='HS')
61 | x = GlobalAveragePooling2D()(x)
62 | x = Reshape((1, 1, 960))(x)
63 |
64 | x = Conv2D(1280, (1, 1), padding='same')(x)
65 | x = self._return_activation(x, 'HS')
66 |
67 | if self.include_top:
68 | x = Conv2D(self.n_class, (1, 1), padding='same', activation='softmax')(x)
69 | x = Reshape((self.n_class,))(x)
70 |
71 | model = Model(inputs, x)
72 |
73 | if plot:
74 | plot_model(model, to_file='MobileNetv3_large.png', show_shapes=True)
75 |
76 | return model
77 |
--------------------------------------------------------------------------------
/docs/imgs/skhemes/RescuerLaBackendSkheme-v1.drawio:
--------------------------------------------------------------------------------
1 | 7V1tc5u4Fv41ntm9M/GAxOtH24m7u9Pem67bbffTDsayzQYjL8hNvL/+CpCwhGSHpGCTpP3gwkES0nl5ztGRRAZwsnl4lwbb9Qe8QPEAGIuHAbweAGBC4NP/csq+pLi2WRJWabRghQ6EWfQvYkSDUXfRAmVSQYJxTKKtTAxxkqCQSLQgTfG9XGyJY/mt22CFFMIsDGKV+iVakHVJ9YB7oP+CotWav9l02IA3AS/MRpKtgwW+F0jwZgAnKcakvNo8TFCcM4/zpaw3PfK06liKEtKkwrtb7xd8+xueWf8lk78+Y9fzsyvWyrcg3rEBs86SPedAinfJAuWNGAM4vl9HBM22QZg/vacyp7Q12cT0zqSXyyiOJzjGaVEXLgLkLUNKz0iK75DwxAk9NF/SJ+oweJ9QStCDQGLDeofwBpF0T4vwpwZjMdMxi93eHwQGeJG1ICzoMGLAlGRVNX3gI71grHwCW4HC1j8idK+wlo6PyPyT+ZTgBNWYykhBHK0SehtSriFKH+fciqjajtiDTbRY5K/RCkwWaRsScLyhLcnA9IeO+A8qIvE0EgFdCcQ2VEWfGgN/OvCtga8qPVpQFGC3OCVrvMJJEN8cqDUeHsq8x3jLhPk3ImTPIC3YESyLGj1E5Ktw/WfeFOVieXf9wFoubvb8JqHM+FoUNAyTE8qaputwwqFycSfVvkVpRHmaK01BzEiQklGOlLk6xUGWRSEnT6OY9/aokmR4l4boBOuZ5Gl7K0ROQRFrMGf9SZ1LURyQ6JsM0K1rDHwKMpqPI2MbVgZknHNUnDMdjVWZdldmZWmY5MQFptGhOKv86nNGdQ0Yn3/lj+Ypf8Qp9N2H8py45YQrWjvGqygZ2JNvQRotKc6RCCdC7e3JyukuKdQmxKskelLNDKGiZraLSXay/0I7rYF8RpUpSlaUYB/uPhXwcgWOOWNM9WkZF9a8pj4AJZ3gPXRreO/aqi4CW6OL3UF8/w0WwItbrK3BtZfsCK9yTwhlT2j5vXSEzst0hE4DjB8tNlHy3SC/Kz3FJkiC1SZnb0PA1TQVsA5lVPMoaDbGbp33QFuq1D/QX0R/T4Y1SwP+Fjwn+Ls9BH/Tl7hkAQ2Xzgz+Gja9ZPCvbspang16CfzeywR+rwnwb7ffDft4uYyjJA+0Fyi7IzhvMSja/YG5YsQto4kmyXVmzOUpWkFBvqB5i9LoW5KrlmaEnioCS4Pn3c14vJeH2DRa92TUBl6VyOoVanM0fhS2gdcr2OYhxXm1oqE45HkbkDXB9J6VwTwq4kclB2HbkmNVb3GUT184cri+jBymVYOEUhlZrYP8KT+DvVBsmxfIjr/HN/XvOahT2eJBuaoxfocbaHsNqQWohpbMCI2ztGwNVNt1wbTnLIHCpdEtDZuMnyaYKjWOY5T+/Hp9Z10gpj/UJQzP6T3NHmb4oft4ht/VTRm701tdir9nXNLmH7RsgnZXbOph9rnOJsu8vDYdTybO+XTuD2mJJ9/YweBRmPvNO5j79Q0x61DQUMe7w0tdjq2UyCL6Jk7oB5QHPsh/x/aAysArKGOvuLb4Nf0dFb9GQXeEMlPWyHha/N4Uv+Pi0aS4dgU6EBqpXlqWr6593iAoLmhJU3idqdQt32IPBZUTxnhi2COD96bqpSX0z2XXQOzujcCL8vXX0vAAZx+7GMk8Ldmkba1k2Y3wasaOowNr3ZpitCSXtyVPtiWvaSbW7MyYjifXKmz7FGR32Q8EFNy8qwsazwuCviI3EfGonY11wFXZtGjfHqMwMFCRYyxglMsrlq+gvzbHA1eoKINNBXqPQHENfg0dulbd1vahdaCu0E4c10Tg27XQ4QrARTaKiHigD7m0boQSU0UAQPJPkoRsPjBKgfKAqwYN6fqNoCpwtPZ5XmDlln8KWItdyj+QVZwZXBpXgZpEuhIslZqRyQ2uFsuoWFoBxY0AHSeg5lqwciX40mCdJeCEFhIncnxnK0FmE5itRjQRYLzyL0AaIBBR3R7UY11D4YDHMOyNIJMFL49Lav5P4f65EwR2jU2XzzYBXU6uht7XAQlmiLxh+FbkdnH0Pr4PWJkjV3NvUwrOjgbBEwFDVdzXBqOPArRYbCxnHMQAWozlTKEboAoln5QdqOJFsX9jZUg1V1abN1SNOM8e/3+qjmv3VbyBfIBdW4/TLOSf2z30MH9chxlggabz7w49RIMM8oxkZPeW43tVcpfPnAA1fcwj/LEY4YseQHQRJ2bkz8iZNHQRnuCytJN7NYHMM9uaHtaGU7wOqO5FzEB4wtN81D8JQy3LQV6n8gyuwiljMCrchT8VphNlPzyh06ZQ0ZNZIPICDEsWDn9WEky16ZbWoY0FkUNpCnSYOJ1WAuPANWC8S4MlSoJBdcz21XurXuRZbF9h98X2noHnH5q03Go3Wq82n4Gme4bb38L0fSivW9c4Gsc02AxUs8QWzMmrbUUBhqUak24fJ3SdrrimrirMCE7zTxOA6XUetHzcIfrk1QYsHt/BwbcH6Y4Two7ClS+rUToNyd/xfDr7EOJP/m3y9UpV2zPC22E77Z/CEz26ac3/mSe1jaagc9mT2WqK/xZnZJWi2cf3itiydbDNL8N9HFHppI9DzrwU4/t5RQjCu1Uh3P/tSHFUQcamyojasARbtgToaZ29qbEFrytfD9Xk/K+b/DMpHKPqLC9AiH8uBdQQislj87DKvxgzzE9OhGvqN4ebXUyiqwUOd8XhP72cOvAHtnyazdLtDuA+QuR3Z18PgWqu+Hc0n0fkw0dK/UfrCjhbtykOUZY10PKzKnXN5QJNwkW3+bM7Fj9p8+dF4hTTgDLXdJ+18TVM624TM1TzvOyLT681MFGE0IONyzzdf6HYZPCMYz/m0HDrn6ux+nlYky8VPhoEWf069gN1GWRhJcLglkWp/+zyj46NmY1V9+LGM1rhQ34OX0qdnloaeFJ2ugsnY9JJW5OPkujcTFfbwqE6sZuk+y21rfwzduEdSlrmY5CGzILN1o6xK2zVzs7O6r4tdQowHA5fhE4eyd6dlX2O8/I8iOw9XPsR59G5n7CaHjLsmZ+w1MncCbt5QuTbrt3UvpwEPe3ao+4bgt0deLfUedmLgBzLuDzkWG2ftuuGd7Xsi8UPvVyOb+qU6wtO71Ca9ZF99X2loAeKp8bF2z31XfnZMoWFr2feasI6gKqCOOvHKix148YKx0GyesVCMGp5L1vN4JxVBo6lMLv/i7Zy4Hf5uK/xyiyb+/Ul7utqZbZVD1K3mIaoVR1gaN9kYH9M5rTFdK/5fkPN5xLqi+arSRiW4Sq+wPFmvI9uf39X7ke7pq7OPH9HiyjfqHmt5hPPv26rsFYjgMbrtsDX7CppadWW3h7+bET5GZ/DH9+AN/8H
--------------------------------------------------------------------------------
/keras_retinanet/utils/coco_eval.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | from pycocotools.cocoeval import COCOeval
18 |
19 | from tensorflow import keras
20 | import numpy as np
21 | import json
22 |
23 | import progressbar
24 | assert(callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead."
25 |
26 |
27 | def evaluate_coco(generator, model, threshold=0.05):
28 | """ Use the pycocotools to evaluate a COCO model on a dataset.
29 |
30 | Args
31 | generator : The generator for generating the evaluation data.
32 | model : The model to evaluate.
33 | threshold : The score threshold to use.
34 | """
35 | # start collecting results
36 | results = []
37 | image_ids = []
38 | for index in progressbar.progressbar(range(generator.size()), prefix='COCO evaluation: '):
39 | image = generator.load_image(index)
40 | image = generator.preprocess_image(image)
41 | image, scale = generator.resize_image(image)
42 |
43 | if keras.backend.image_data_format() == 'channels_first':
44 | image = image.transpose((2, 0, 1))
45 |
46 | # run network
47 | boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))
48 |
49 | # correct boxes for image scale
50 | boxes /= scale
51 |
52 | # change to (x, y, w, h) (MS COCO standard)
53 | boxes[:, :, 2] -= boxes[:, :, 0]
54 | boxes[:, :, 3] -= boxes[:, :, 1]
55 |
56 | # compute predicted labels and scores
57 | for box, score, label in zip(boxes[0], scores[0], labels[0]):
58 | # scores are sorted, so we can break
59 | if score < threshold:
60 | break
61 |
62 | # append detection for each positively labeled class
63 | image_result = {
64 | 'image_id' : generator.image_ids[index],
65 | 'category_id' : generator.label_to_coco_label(label),
66 | 'score' : float(score),
67 | 'bbox' : box.tolist(),
68 | }
69 |
70 | # append detection to results
71 | results.append(image_result)
72 |
73 | # append image to list of processed images
74 | image_ids.append(generator.image_ids[index])
75 |
76 | if not len(results):
77 | return
78 |
79 | # write output
80 | json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
81 | json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)
82 |
83 | # load results in COCO evaluation tool
84 | coco_true = generator.coco
85 | coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))
86 |
87 | # run COCO evaluation
88 | coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
89 | coco_eval.params.imgIds = image_ids
90 | coco_eval.evaluate()
91 | coco_eval.accumulate()
92 | coco_eval.summarize()
93 | return coco_eval.stats
94 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at gosha20777@live.ru. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/data_utils/yolo2voc.py:
--------------------------------------------------------------------------------
1 | # Script to convert yolo annotations to voc format
2 | import os
3 | import xml.etree.cElementTree as ET
4 | from PIL import Image
5 | import argparse
6 |
7 | CLASS_MAPPING = {
8 | '0': 'Pedestrian'
9 | # Add remaining classes here.
10 | }
11 |
12 |
13 | def create_root(file_prefix, width, height):
14 | root = ET.Element("annotation")
15 | ET.SubElement(root, "filename").text = "{}.jpg".format(file_prefix)
16 | ET.SubElement(root, "folder").text = "images"
17 | size = ET.SubElement(root, "size")
18 | ET.SubElement(size, "width").text = str(width)
19 | ET.SubElement(size, "height").text = str(height)
20 | ET.SubElement(size, "depth").text = "3"
21 | return root
22 |
23 |
24 | def create_object_annotation(root, voc_labels):
25 | for voc_label in voc_labels:
26 | obj = ET.SubElement(root, "object")
27 | ET.SubElement(obj, "name").text = voc_label[0]
28 | ET.SubElement(obj, "pose").text = "Unspecified"
29 | ET.SubElement(obj, "truncated").text = str(0)
30 | ET.SubElement(obj, "difficult").text = str(0)
31 | bbox = ET.SubElement(obj, "bndbox")
32 | ET.SubElement(bbox, "xmin").text = str(voc_label[1])
33 | ET.SubElement(bbox, "ymin").text = str(voc_label[2])
34 | ET.SubElement(bbox, "xmax").text = str(voc_label[3])
35 | ET.SubElement(bbox, "ymax").text = str(voc_label[4])
36 | return root
37 |
38 |
39 | def create_file(file_prefix, width, height, voc_labels, dest_dir):
40 | root = create_root(file_prefix, width, height)
41 | root = create_object_annotation(root, voc_labels)
42 | tree = ET.ElementTree(root)
43 | tree.write("{}/{}.xml".format(dest_dir, file_prefix))
44 |
45 |
46 | def read_file(filename, src_dir, dest_dir):
47 | file_prefix = filename.split(".txt")[0]
48 | if os.path.isfile("{}/{}.JPG".format(src_dir, file_prefix)):
49 | os.rename("{}/{}.JPG".format(src_dir, file_prefix), "{}/{}.jpg".format(src_dir, file_prefix))
50 | print("renamed to {}.jpg".format(file_prefix))
51 |
52 | image_file_name = "{}.jpg".format(file_prefix)
53 | img = Image.open("{}/{}".format(src_dir, image_file_name))
54 | w, h = img.size
55 | with open("{}/{}".format(src_dir, filename), 'r') as file:
56 | lines = file.readlines()
57 | voc_labels = []
58 | for line in lines:
59 | voc = []
60 | line = line.strip()
61 | data = line.split()
62 | voc.append(CLASS_MAPPING.get(data[0]))
63 | bbox_width = float(data[3]) * w
64 | bbox_height = float(data[4]) * h
65 | center_x = float(data[1]) * w
66 | center_y = float(data[2]) * h
67 | voc.append(int(center_x - (bbox_width / 2)))
68 | voc.append(int(center_y - (bbox_height / 2)))
69 | voc.append(int(center_x + (bbox_width / 2)))
70 | voc.append(int(center_y + (bbox_height / 2)))
71 | voc_labels.append(voc)
72 | create_file(file_prefix, w, h, voc_labels, dest_dir)
73 | print("Processing complete for file: {}".format(filename))
74 |
75 | def parse_args(args):
76 | """ Parse the arguments.
77 | """
78 | parser = argparse.ArgumentParser(description='Script which converts yolo to pascal voc')
79 | parser.add_argument('--src', help='source annotation dir.')
80 | parser.add_argument('--dest', help='destination annotation dir.')
81 | return parser.parse_args(args)
82 |
83 | def main(args=None):
84 | args = parse_args(args)
85 |
86 | if not os.path.exists(args.dest):
87 | os.makedirs(args.dest)
88 | for filename in os.listdir(args.src):
89 | if filename.endswith('txt'):
90 | read_file(filename,args.src , args.dest)
91 | else:
92 | print("Skipping file: {}".format(filename))
93 |
94 |
95 | if __name__ == "__main__":
96 | main()
97 |
--------------------------------------------------------------------------------
/keras_retinanet/utils/config.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright 2017-2018 Fizyr (https://fizyr.com)
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | """
16 |
17 | import configparser
18 | import numpy as np
19 | from tensorflow import keras
20 | from ..utils.anchors import AnchorParameters
21 |
22 |
23 | def read_config_file(config_path):
24 | config = configparser.ConfigParser()
25 |
26 | with open(config_path, 'r') as file:
27 | config.read_file(file)
28 |
29 | assert 'anchor_parameters' in config, \
30 | "Malformed config file. Verify that it contains the anchor_parameters section."
31 |
32 | config_keys = set(config['anchor_parameters'])
33 | default_keys = set(AnchorParameters.default.__dict__.keys())
34 |
35 | assert config_keys <= default_keys, \
36 | "Malformed config file. These keys are not valid: {}".format(config_keys - default_keys)
37 |
38 | if 'pyramid_levels' in config:
39 | assert('levels' in config['pyramid_levels']), "pyramid levels specified by levels key"
40 |
41 | return config
42 |
43 |
44 | def parse_anchor_parameters(config):
45 | ratios = np.array(list(map(float, config['anchor_parameters']['ratios'].split(' '))), keras.backend.floatx())
46 | scales = np.array(list(map(float, config['anchor_parameters']['scales'].split(' '))), keras.backend.floatx())
47 | sizes = list(map(int, config['anchor_parameters']['sizes'].split(' ')))
48 | strides = list(map(int, config['anchor_parameters']['strides'].split(' ')))
49 | assert (len(sizes) == len(strides)), "sizes and strides should have an equal number of values"
50 |
51 | return AnchorParameters(sizes, strides, ratios, scales)
52 |
53 |
54 | def parse_pyramid_levels(config):
55 | levels = list(map(int, config['pyramid_levels']['levels'].split(' ')))
56 |
57 | return levels
58 |
59 | def parse_random_transform_parameters(config):
60 | kwargs = dict()
61 | kwargs['min_rotation'] = float(config['random_transform_parameters']['min_rotation'])
62 | kwargs['max_rotation'] = float(config['random_transform_parameters']['max_rotation'])
63 | kwargs['min_translation'] = tuple(map(float, config['random_transform_parameters']['min_translation'].split()))
64 | kwargs['max_translation'] = tuple(map(float, config['random_transform_parameters']['max_translation'].split()))
65 | kwargs['min_shear'] = float(config['random_transform_parameters']['min_shear'])
66 | kwargs['max_shear'] = float(config['random_transform_parameters']['max_shear'])
67 | kwargs['min_scaling'] = tuple(map(float, config['random_transform_parameters']['min_scaling'].split()))
68 | kwargs['max_scaling'] = tuple(map(float, config['random_transform_parameters']['max_scaling'].split()))
69 | kwargs['flip_x_chance'] = float(config['random_transform_parameters']['flip_x_chance'])
70 | kwargs['flip_y_chance'] = float(config['random_transform_parameters']['flip_y_chance'])
71 |
72 | return kwargs
73 |
74 |
75 | def parse_visual_effect_parameters(config):
76 | kwargs = dict()
77 | kwargs['contrast_range'] = tuple(map(float, config['visual_effect_parameters']['contrast_range'].split()))
78 | kwargs['brightness_range'] = tuple(map(float, config['visual_effect_parameters']['brightness_range'].split()))
79 | kwargs['hue_range'] = tuple(map(float, config['visual_effect_parameters']['hue_range'].split()))
80 | kwargs['saturation_range'] = tuple(map(float, config['visual_effect_parameters']['saturation_range'].split()))
81 |
82 | return kwargs
83 |
--------------------------------------------------------------------------------
/data_utils/LaddGenerator/Annotation.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 | using System.Linq;
5 | using System.Xml.Serialization;
6 |
7 | namespace LaddGenerator
8 | {
9 | [Serializable]
10 | //[XmlRoot("annotation")]
11 | [XmlRoot("annotation")]
12 | public class Annotation
13 | {
14 | [XmlElement("folder")]
15 | public string Folder { get; set; } = "VocGalsTfl";
16 | [XmlElement("filename")]
17 | public string Filename { get; set; }
18 | [XmlElement("source")]
19 | public Sourse Source { get; set; } = new Sourse();
20 | [XmlElement("size")]
21 | public Size Size { get; set; }
22 | [XmlElement("segmented")]
23 | public int Segmented { get; set; } = 0;
24 | [XmlElement("object")]
25 | public List