├── tests ├── __init__.py ├── pybboxes │ ├── boxes │ │ ├── __init__.py │ │ ├── test_coco_bounding_box.py │ │ ├── test_voc_bounding_box.py │ │ ├── test_yolo_bounding_box.py │ │ ├── test_fiftyone_bounding_box.py │ │ └── test_albumentations_bounding_box.py │ ├── __init__.py │ ├── test_functional.py │ ├── conftest.py │ └── annotations │ │ └── test_annotations_conversion.py └── utils.py ├── pybboxes ├── types │ ├── __init__.py │ └── box_2d.py ├── utils │ ├── __init__.py │ └── io.py ├── annotations │ ├── __init__.py │ └── base.py ├── _typing.py ├── __init__.py ├── utils.py ├── boxes │ ├── __init__.py │ ├── coco_bounding_box.py │ ├── albumentations_bounding_box.py │ ├── voc_bounding_box.py │ ├── fiftyone_bounding_box.py │ ├── yolo_bounding_box.py │ ├── bbox.py │ └── base.py └── functional.py ├── MANIFEST.in ├── requirements.txt ├── setup.cfg ├── scripts ├── run_tests.py ├── run_code_style.py └── utils.py ├── pyproject.toml ├── .github └── workflows │ ├── publish_pypi.yml │ └── ci.yml ├── LICENSE ├── .gitignore ├── setup.py └── README.md /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pybboxes/types/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pybboxes/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include requirements.txt 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.24.2 2 | pycocotools==2.0.6 3 | pyyaml==6.0 -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | select = E9,F63,F7,F82 4 | per-file-ignores = __init__.py: F401 5 | max-complexity = 10 6 | 7 | [isort] 8 | line_length=120 9 | profile=black -------------------------------------------------------------------------------- /scripts/run_tests.py: -------------------------------------------------------------------------------- 1 | from scripts.utils import shell, validate_and_exit 2 | 3 | if __name__ == "__main__": 4 | sts_tests = shell("pytest --cov pybboxes --cov-report term-missing --cov-report xml") 5 | validate_and_exit(tests=sts_tests) 6 | -------------------------------------------------------------------------------- /tests/pybboxes/__init__.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | TESTS_PACKAGE_DIR = Path(__file__).parent 4 | TESTS_DIR = TESTS_PACKAGE_DIR.parent 5 | TEST_DATA_DIR = TESTS_DIR / "test_data" 6 | EXPECTED_OUTPUTS = TEST_DATA_DIR / "expected_outputs" 7 | -------------------------------------------------------------------------------- /pybboxes/annotations/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | https://medium.com/red-buffer/converting-a-custom-dataset-from-coco-format-to-yolo-format-6d98a4fd43fc 3 | https://blog.roboflow.com/train-yolov7-instance-segmentation-on-custom-data/ 4 | """ 5 | from pybboxes.annotations.base import Annotations 6 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 120 3 | exclude = ''' 4 | ( 5 | /( 6 | | .git 7 | | venv 8 | | .venv 9 | )/ 10 | ) 11 | ''' 12 | 13 | [tool.pytest.ini_options] 14 | minversion = "6.2" 15 | timeout = 400 16 | timeout_method = "thread" 17 | testpaths = "tests" 18 | addopts = "-vvv" 19 | -------------------------------------------------------------------------------- /pybboxes/_typing.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence, Tuple, Union 2 | 3 | import numpy as np 4 | 5 | GenericBboxType = Union[ 6 | np.ndarray, Tuple[float, float, float, float], Tuple[int, int, int, int], Sequence[float], Sequence[int] 7 | ] 8 | BboxType = Union[Tuple[float, float, float, float], Tuple[int, int, int, int]] 9 | -------------------------------------------------------------------------------- /pybboxes/__init__.py: -------------------------------------------------------------------------------- 1 | from pybboxes.boxes import ( 2 | AlbumentationsBoundingBox, 3 | BoundingBox, 4 | CocoBoundingBox, 5 | FiftyoneBoundingBox, 6 | VocBoundingBox, 7 | YoloBoundingBox, 8 | ) 9 | from pybboxes.functional import convert_bbox # Backwards compatibility 10 | 11 | __version__ = "0.2.0" 12 | -------------------------------------------------------------------------------- /pybboxes/utils.py: -------------------------------------------------------------------------------- 1 | import importlib.util 2 | from pathlib import Path 3 | from typing import Union 4 | 5 | 6 | def import_module(module_name: str, filepath: Union[str, Path]): 7 | spec = importlib.util.spec_from_file_location(module_name, filepath) 8 | module = importlib.util.module_from_spec(spec) 9 | spec.loader.exec_module(module) 10 | return module 11 | -------------------------------------------------------------------------------- /pybboxes/boxes/__init__.py: -------------------------------------------------------------------------------- 1 | from pybboxes.boxes.albumentations_bounding_box import AlbumentationsBoundingBox 2 | from pybboxes.boxes.bbox import BoundingBox 3 | from pybboxes.boxes.coco_bounding_box import CocoBoundingBox 4 | from pybboxes.boxes.fiftyone_bounding_box import FiftyoneBoundingBox 5 | from pybboxes.boxes.voc_bounding_box import VocBoundingBox 6 | from pybboxes.boxes.yolo_bounding_box import YoloBoundingBox 7 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from deepdiff import DeepDiff 4 | 5 | 6 | def assert_almost_equal(actual, desired, decimal=3, exclude_paths=None, **kwargs): 7 | # significant digits default value changed to 3 (from 5) due to variety in 8 | # results for different hardware architectures. 9 | diff = DeepDiff(actual, desired, significant_digits=decimal, exclude_paths=exclude_paths, **kwargs) 10 | assert diff == {}, f"Actual and Desired Dicts are not Almost Equal:\n {json.dumps(diff, indent=2, default=str)}" 11 | 12 | 13 | def load_json(path: str): 14 | with open(path, "r") as jf: 15 | content = json.load(jf) 16 | return content 17 | -------------------------------------------------------------------------------- /scripts/run_code_style.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from scripts.utils import shell, validate_and_exit 4 | 5 | if __name__ == "__main__": 6 | arg = sys.argv[1] 7 | 8 | if arg == "check": 9 | sts_flake = shell("flake8 pybboxes tests --config setup.cfg") 10 | sts_isort = shell("isort . --check --settings setup.cfg") 11 | sts_black = shell("black . --check --config pyproject.toml") 12 | validate_and_exit(flake8=sts_flake, isort=sts_isort, black=sts_black) 13 | elif arg == "format": 14 | sts_isort = shell("isort . --settings setup.cfg") 15 | sts_black = shell("black . --config pyproject.toml") 16 | validate_and_exit(isort=sts_isort, black=sts_black) 17 | -------------------------------------------------------------------------------- /.github/workflows/publish_pypi.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | jobs: 8 | deploy: 9 | runs-on: ubuntu-20.04 10 | 11 | steps: 12 | - uses: actions/checkout@v2 13 | - name: Set up Python 14 | uses: actions/setup-python@v2 15 | with: 16 | python-version: '3.x' 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install setuptools wheel twine 21 | - name: Build and publish 22 | env: 23 | TWINE_USERNAME: __token__ 24 | TWINE_PASSWORD: ${{ secrets.PYPI_SECRET }} 25 | run: | 26 | python setup.py sdist bdist_wheel 27 | twine upload dist/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Devrim 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /scripts/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | import shutil 5 | import sys 6 | 7 | 8 | def shell(command, exit_status=0): 9 | """ 10 | Run command through shell and return exit status if exit status of command run match with given exit status. 11 | 12 | Args: 13 | command: (str) Command string which runs through system shell. 14 | exit_status: (int) Expected exit status of given command run. 15 | 16 | Returns: actual_exit_status 17 | 18 | """ 19 | actual_exit_status = os.system(command) 20 | if actual_exit_status == exit_status: 21 | return 0 22 | return actual_exit_status 23 | 24 | 25 | def validate_and_exit(expected_out_status=0, **kwargs): 26 | if all([arg == expected_out_status for arg in kwargs.values()]): 27 | # Expected status, OK 28 | sys.exit(0) 29 | else: 30 | # Failure 31 | print_console_centered("Summary Results") 32 | fail_count = 0 33 | for component, exit_status in kwargs.items(): 34 | if exit_status != expected_out_status: 35 | print(f"{component} failed.") 36 | fail_count += 1 37 | print_console_centered(f"{len(kwargs)-fail_count} success, {fail_count} failure") 38 | sys.exit(1) 39 | 40 | 41 | def print_console_centered(text: str, fill_char="="): 42 | w, _ = shutil.get_terminal_size((80, 20)) 43 | print(f" {text} ".center(w, fill_char)) 44 | 45 | 46 | def shell_capture(command, out_json=True): 47 | out = os.popen(command).read() 48 | if out_json: 49 | out = re.findall(r"{\s+.*\}", out, flags=re.MULTILINE | re.DOTALL)[0].replace("\n", "") 50 | return json.loads(out) 51 | return out 52 | -------------------------------------------------------------------------------- /pybboxes/types/box_2d.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence, Union 2 | 3 | import numpy as np 4 | 5 | IntegerBox: Union[Sequence[int], Sequence[Sequence[int]]] 6 | FloatBox: Union[Sequence[float], Sequence[Sequence[float]]] 7 | 8 | 9 | class Box: 10 | def __init__(self, x_tl: int, y_tl: int, x_br: int, y_br: int): 11 | self.x_tl = x_tl 12 | self.y_tl = y_tl 13 | self.x_br = x_br 14 | self.y_br = y_br 15 | 16 | def __add__(self, other: "Box") -> int: 17 | return self.union(other) 18 | 19 | def __sub__(self, other: "Box") -> int: 20 | return int(self.area - self.intersection(other)) 21 | 22 | def __mul__(self, other: "Box") -> int: 23 | return self.intersection(other) 24 | 25 | def __truediv__(self, other: "Box") -> float: 26 | return self.area / other.area 27 | 28 | @property 29 | def area(self) -> int: 30 | return self.width * self.height 31 | 32 | @property 33 | def height(self) -> int: 34 | return int(self.y_br - self.y_tl) 35 | 36 | @property 37 | def width(self) -> int: 38 | return int(self.x_br - self.x_tl) 39 | 40 | def intersection(self, other: "Box") -> int: 41 | x_tl, y_tl = np.maximum((self.x_tl, self.y_tl), (other.x_tl, other.y_tl)) 42 | x_br, y_br = np.minimum((self.x_br, self.y_br), (other.x_br, other.y_br)) 43 | if x_tl >= x_br or y_tl >= y_br: 44 | return 0 45 | intersection_width = x_br - x_tl 46 | intersection_height = y_br - y_tl 47 | return int(intersection_width * intersection_height) 48 | 49 | def union(self, other: "Box") -> int: 50 | return int(self.area + other.area - self.intersection(other)) 51 | 52 | def iou(self, other: "Box") -> float: 53 | return self.intersection(other) / self.union(other) 54 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | operating-system: [ubuntu-latest, windows-latest, macos-latest] 15 | # for Python 3.10, ref https://github.com/actions/setup-python/issues/160#issuecomment-724485470 16 | python-version: [3.8, 3.9, '3.10', '3.11'] 17 | fail-fast: false 18 | 19 | steps: 20 | - name: Checkout 21 | uses: actions/checkout@v2 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: ${{ matrix.python-version }} 27 | 28 | - name: Restore Ubuntu cache 29 | uses: actions/cache@v1 30 | if: matrix.operating-system == 'ubuntu-latest' 31 | with: 32 | path: ~/.cache/pip 33 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}} 34 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}- 35 | 36 | - name: Restore MacOS cache 37 | uses: actions/cache@v1 38 | if: matrix.operating-system == 'macos-latest' 39 | with: 40 | path: ~/Library/Caches/pip 41 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}} 42 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}- 43 | 44 | - name: Restore Windows cache 45 | uses: actions/cache@v1 46 | if: matrix.operating-system == 'windows-latest' 47 | with: 48 | path: ~\AppData\Local\pip\Cache 49 | key: ${{ matrix.os }}-${{ matrix.python-version }}-${{ hashFiles('**/setup.py')}} 50 | restore-keys: ${{ matrix.os }}-${{ matrix.python-version }}- 51 | 52 | - name: Update pip 53 | run: python -m pip install --upgrade pip 54 | 55 | - name: Install dependencies 56 | run: > 57 | pip install -e .[dev] 58 | 59 | - name: Lint with flake8 and black 60 | run: | 61 | python -m scripts.run_code_style check 62 | 63 | - name: Run tests. 64 | run: | 65 | python -m scripts.run_tests 66 | -------------------------------------------------------------------------------- /pybboxes/boxes/coco_bounding_box.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes.boxes.base import BaseBoundingBox 4 | from pybboxes.boxes.bbox import BoundingBox 5 | 6 | 7 | class CocoBoundingBox(BaseBoundingBox): 8 | def __init__( 9 | self, 10 | x_tl: int, 11 | y_tl: int, 12 | w: int, 13 | h: int, 14 | image_size: Tuple[int, int] = None, 15 | strict: bool = False, 16 | ): 17 | super(CocoBoundingBox, self).__init__(x_tl, y_tl, w, h, image_size=image_size, strict=strict) 18 | 19 | def _correct_value_types(self, *values): 20 | return tuple([round(val) for val in values]) 21 | 22 | def _validate_values(self, *values): 23 | image_width, image_height = self.image_size 24 | 25 | x_tl, y_tl, w, h = values 26 | if w <= 0 or h <= 0: 27 | raise ValueError("Given width and height must be greater than 0.") 28 | elif self.strict and (x_tl < 0 or y_tl < 0): 29 | raise ValueError("Given top-left point is out of bounds.") 30 | elif (image_width is not None and x_tl + w > image_width) or ( 31 | image_width is not None and y_tl + h > image_height 32 | ): 33 | if self.strict: 34 | raise ValueError( 35 | "Given bounding box values is out of bounds. " 36 | "To silently skip out of bounds cases pass 'strict=False'." 37 | ) 38 | self._is_oob = True 39 | elif not self.is_image_size_null(): 40 | self._is_oob = False 41 | 42 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BoundingBox"]: 43 | x_tl, y_tl, w, h = self.values 44 | x_br = x_tl + w 45 | y_br = y_tl + h 46 | if return_values: 47 | return x_tl, y_tl, x_br, y_br 48 | return BoundingBox(x_tl, y_tl, x_br, y_br, image_size=self.image_size, strict=self.strict) 49 | 50 | @classmethod 51 | def from_voc( 52 | cls, 53 | x_tl: int, 54 | y_tl: int, 55 | x_br: int, 56 | y_br: int, 57 | image_size: Tuple[int, int] = None, 58 | strict: bool = True, 59 | ) -> "CocoBoundingBox": 60 | w = x_br - x_tl 61 | h = y_br - y_tl 62 | return cls(x_tl, y_tl, w, h, image_size=image_size, strict=strict) 63 | -------------------------------------------------------------------------------- /pybboxes/boxes/albumentations_bounding_box.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes.boxes.base import BaseBoundingBox 4 | from pybboxes.boxes.bbox import BoundingBox 5 | 6 | 7 | class AlbumentationsBoundingBox(BaseBoundingBox): 8 | def __init__( 9 | self, 10 | x_tl: float, 11 | y_tl: float, 12 | x_br: float, 13 | y_br: float, 14 | image_size: Tuple[int, int], 15 | strict: bool = False, 16 | ): 17 | super(AlbumentationsBoundingBox, self).__init__(x_tl, y_tl, x_br, y_br, image_size=image_size, strict=strict) 18 | 19 | def _validate_values(self, x_tl, y_tl, x_br, y_br): 20 | if not (0 <= x_tl < x_br <= 1) or not (0 <= y_tl < y_br <= 1): 21 | if self.strict: 22 | raise ValueError( 23 | "Given bounding box values is out of bounds. " 24 | "To silently skip out of bounds cases pass 'strict=False'." 25 | ) 26 | self._is_oob = True 27 | else: 28 | self._is_oob = False 29 | 30 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BoundingBox"]: 31 | if self.is_image_size_null(): 32 | raise ValueError("'image_size' is required for conversion.") 33 | x_tl, y_tl, x_br, y_br = self.values 34 | image_width, image_height = self.image_size 35 | x_tl = round(x_tl * image_width) 36 | y_tl = round(y_tl * image_height) 37 | x_br = round(x_br * image_width) 38 | y_br = round(y_br * image_height) 39 | if return_values: 40 | return x_tl, y_tl, x_br, y_br 41 | return BoundingBox(x_tl, y_tl, x_br, y_br, image_size=self.image_size, strict=self.strict) 42 | 43 | @classmethod 44 | def from_voc( 45 | cls, 46 | x_tl: int, 47 | y_tl: int, 48 | x_br: int, 49 | y_br: int, 50 | image_size: Tuple[int, int] = None, 51 | strict: bool = False, 52 | ) -> "AlbumentationsBoundingBox": 53 | if image_size is None: 54 | raise ValueError("AlbumentationsBoundingBox requires `image_size` to scale the box values.") 55 | image_width, image_height = image_size 56 | x_tl /= image_width 57 | y_tl /= image_height 58 | x_br /= image_width 59 | y_br /= image_height 60 | return cls(x_tl, y_tl, x_br, y_br, image_size=image_size, strict=strict) 61 | -------------------------------------------------------------------------------- /pybboxes/boxes/voc_bounding_box.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes.boxes.base import BaseBoundingBox 4 | from pybboxes.boxes.bbox import BoundingBox 5 | 6 | 7 | class VocBoundingBox(BaseBoundingBox): 8 | """ 9 | Alias for the VOC style bounding box. 10 | """ 11 | 12 | def __init__( 13 | self, 14 | x_tl: int, 15 | y_tl: int, 16 | x_br: int, 17 | y_br: int, 18 | image_size: Tuple[int, int], 19 | strict: bool = False, 20 | ): 21 | super(VocBoundingBox, self).__init__(x_tl, y_tl, x_br, y_br, image_size=image_size, strict=strict) 22 | 23 | def _correct_value_types(self, *values): 24 | return tuple([round(val) for val in values]) 25 | 26 | def _validate_values(self, x_tl, y_tl, x_br, y_br): 27 | image_width, image_height = self.image_size 28 | if x_tl > x_br or y_tl > y_br: 29 | raise ValueError("Incorrect BoundingBox format. Must be in type [x-tl, y-tl, x-br, y-br].") 30 | elif (x_tl, y_tl) == (x_br, y_br): 31 | raise ValueError("Given top-left and bottom-right points must be distinct.") 32 | elif ( 33 | not 0 <= x_tl < x_br 34 | or not 0 <= y_tl < y_br 35 | or (image_width is not None and x_br > image_width) 36 | or (image_height is not None and y_br > image_height) 37 | ): 38 | if self.strict: 39 | raise ValueError( 40 | "Given bounding box values is out of bounds. " 41 | "To silently skip out of bounds cases pass 'strict=False'." 42 | ) 43 | self._is_oob = True 44 | elif not self.is_image_size_null(): 45 | self._is_oob = False 46 | 47 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BoundingBox"]: 48 | x_tl, y_tl, x_br, y_br = self.values 49 | if return_values: 50 | return x_tl, y_tl, x_br, y_br 51 | return BoundingBox(x_tl, y_tl, x_br, y_br, image_size=self.image_size, strict=self.strict) 52 | 53 | @classmethod 54 | def from_voc( 55 | cls, 56 | x_tl: int, 57 | y_tl: int, 58 | x_br: int, 59 | y_br: int, 60 | image_size: Tuple[int, int] = None, 61 | strict: bool = True, 62 | ) -> "VocBoundingBox": 63 | return cls(x_tl, y_tl, x_br, y_br, image_size=image_size, strict=strict) 64 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # IDE 132 | .idea/ 133 | .vscode/ 134 | 135 | # fixtures for annotation file conversion testing 136 | testing_data_coco 137 | testing_data_images -------------------------------------------------------------------------------- /pybboxes/boxes/fiftyone_bounding_box.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes.boxes.base import BaseBoundingBox 4 | from pybboxes.boxes.bbox import BoundingBox 5 | 6 | 7 | class FiftyoneBoundingBox(BaseBoundingBox): 8 | def __init__( 9 | self, 10 | x_tl: float, 11 | y_tl: float, 12 | w: float, 13 | h: float, 14 | image_size: Tuple[int, int], 15 | strict: bool = False, 16 | ): 17 | super(FiftyoneBoundingBox, self).__init__(x_tl, y_tl, w, h, image_size=image_size, strict=strict) 18 | 19 | def _validate_values(self, *values): 20 | x_tl, y_tl, w, h = values 21 | if not 0 < w <= 1 or not 0 < h <= 1: 22 | raise ValueError("Given width and height must be in the range (0,1].") 23 | elif not 0 <= x_tl < x_tl + w <= 1 or not 0 <= y_tl < y_tl + h <= 1: 24 | if self.strict: 25 | raise ValueError( 26 | "Given bounding box values is out of bounds. " 27 | "To silently skip out of bounds cases pass 'strict=False'." 28 | ) 29 | self._is_oob = True 30 | else: 31 | self._is_oob = False 32 | 33 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BoundingBox"]: 34 | if self.is_image_size_null(): 35 | raise ValueError("'image_size' is required for conversion.") 36 | x_tl, y_tl, w, h = self.values 37 | image_width, image_height = self.image_size 38 | x_tl *= image_width 39 | y_tl *= image_height 40 | w *= image_width 41 | h *= image_height 42 | x_br = x_tl + w 43 | y_br = y_tl + h 44 | x_tl, y_tl, x_br, y_br = round(x_tl), round(y_tl), round(x_br), round(y_br) 45 | if return_values: 46 | return x_tl, y_tl, x_br, y_br 47 | return BoundingBox(x_tl, y_tl, x_br, y_br, image_size=self.image_size, strict=self.strict) 48 | 49 | @classmethod 50 | def from_voc( 51 | cls, 52 | x_tl: int, 53 | y_tl: int, 54 | x_br: int, 55 | y_br: int, 56 | image_size: Tuple[int, int] = None, 57 | strict: bool = True, 58 | ) -> "FiftyoneBoundingBox": 59 | if image_size is None: 60 | raise ValueError("YoloBounding box requires `image_size` to normalize the box values.") 61 | image_width, image_height = image_size 62 | w = x_br - x_tl 63 | h = y_br - y_tl 64 | 65 | x_tl /= image_width 66 | y_tl /= image_height 67 | w /= image_width 68 | h /= image_height 69 | return cls(x_tl, y_tl, w, h, image_size=image_size, strict=strict) 70 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import io 2 | import os 3 | import re 4 | 5 | import setuptools 6 | 7 | 8 | def get_long_description(): 9 | base_dir = os.path.abspath(os.path.dirname(__file__)) 10 | with io.open(os.path.join(base_dir, "README.md"), encoding="utf-8") as f: 11 | return f.read() 12 | 13 | 14 | def get_requirements(): 15 | with open("requirements.txt", encoding="utf8") as f: 16 | return f.read().splitlines() 17 | 18 | 19 | def get_version(): 20 | current_dir = os.path.abspath(os.path.dirname(__file__)) 21 | version_file = os.path.join(current_dir, "pybboxes", "__init__.py") 22 | with io.open(version_file, encoding="utf-8") as f: 23 | return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.M).group(1) 24 | 25 | 26 | _DEV_REQUIREMENTS = [ 27 | "black==22.3.0", 28 | "click==8.0.4", 29 | "deepdiff==5.5.0", 30 | "flake8==3.9.2", 31 | 'importlib-metadata>=1.1.0,<4.3;python_version<"3.8"', 32 | "isort==5.9.2", 33 | "pytest>=7.0.1", 34 | "pytest-cov>=3.0.0", 35 | "pytest-timeout>=2.1.0", 36 | "pytest-depends>=1.0.1", 37 | "huggingface-hub>=0.25.0", 38 | ] 39 | 40 | extras = { 41 | "dev": _DEV_REQUIREMENTS, 42 | } 43 | 44 | 45 | setuptools.setup( 46 | name="pybboxes", 47 | version=get_version(), 48 | author="Devrim Cavusoglu", 49 | license="MIT", 50 | description="Light Weight Toolkit for Bounding Boxes", 51 | long_description=get_long_description(), 52 | long_description_content_type="text/markdown", 53 | url="https://github.com/devrimcavusoglu/pybboxes", 54 | packages=setuptools.find_packages(exclude=["tests"]), 55 | python_requires=">=3.8", 56 | install_requires=get_requirements(), 57 | extras_require=extras, 58 | include_package_data=True, 59 | classifiers=[ 60 | "Development Status :: 5 - Production/Stable", 61 | "Operating System :: OS Independent", 62 | "Intended Audience :: Developers", 63 | "Intended Audience :: Science/Research", 64 | "Programming Language :: Python :: 3", 65 | "Programming Language :: Python :: 3.8", 66 | "Programming Language :: Python :: 3.9", 67 | "Programming Language :: Python :: 3.10", 68 | "Programming Language :: Python :: 3.11", 69 | "Topic :: Software Development :: Libraries", 70 | "Topic :: Software Development :: Libraries :: Python Modules", 71 | "Topic :: Education", 72 | "Topic :: Scientific/Engineering", 73 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 74 | ], 75 | keywords="machine-learning, deep-learning, image-processing, pytorch, tensorflow, numpy, bounding-box, iou, " 76 | "computer-vision, cv", 77 | ) 78 | -------------------------------------------------------------------------------- /pybboxes/boxes/yolo_bounding_box.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes.boxes.base import BaseBoundingBox 4 | from pybboxes.boxes.bbox import BoundingBox 5 | 6 | 7 | class YoloBoundingBox(BaseBoundingBox): 8 | def __init__( 9 | self, 10 | x_c: float, 11 | y_c: float, 12 | w: float, 13 | h: float, 14 | image_size: Tuple[int, int], 15 | strict: bool = False, 16 | ): 17 | super(YoloBoundingBox, self).__init__(x_c, y_c, w, h, image_size=image_size, strict=strict) 18 | 19 | def _validate_values(self, *values): 20 | x_c, y_c, w, h = values 21 | if not 0 < w <= 1 or not 0 < h <= 1: 22 | raise ValueError("Given width and height must be in the range (0,1].") 23 | elif ( 24 | not 0 <= x_c < 1 25 | or not 0 <= y_c < 1 26 | or not 0 <= x_c - w / 2 < x_c + w / 2 <= 1 27 | or not 0 <= y_c - h / 2 < y_c + h / 2 <= 1 28 | ): 29 | if self.strict: 30 | raise ValueError("Given bounding box values is out of bounds.") 31 | self._is_oob = True 32 | else: 33 | self._is_oob = False 34 | 35 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BoundingBox"]: 36 | if self.is_image_size_null(): 37 | raise ValueError("'image_size' is required for conversion.") 38 | x_c, y_c, w, h = self.values 39 | image_width, image_height = self.image_size 40 | x_tl = x_c - w / 2 41 | y_tl = y_c - h / 2 42 | x_tl *= image_width 43 | y_tl *= image_height 44 | w *= image_width 45 | h *= image_height 46 | x_br = x_tl + w 47 | y_br = y_tl + h 48 | 49 | x_tl, y_tl, x_br, y_br = round(x_tl), round(y_tl), round(x_br), round(y_br) 50 | if return_values: 51 | return x_tl, y_tl, x_br, y_br 52 | return BoundingBox(x_tl, y_tl, x_br, y_br, image_size=self.image_size, strict=self.strict) 53 | 54 | @classmethod 55 | def from_voc( 56 | cls, 57 | x_tl: int, 58 | y_tl: int, 59 | x_br: int, 60 | y_br: int, 61 | image_size: Tuple[int, int] = None, 62 | strict: bool = False, 63 | ) -> "YoloBoundingBox": 64 | if image_size is None: 65 | raise ValueError("YoloBounding box requires `image_size` to scale the box values.") 66 | image_width, image_height = image_size 67 | 68 | w = x_br - x_tl 69 | h = y_br - y_tl 70 | x_c = x_tl + w / 2 71 | y_c = y_tl + h / 2 72 | x_c /= image_width 73 | y_c /= image_height 74 | w /= image_width 75 | h /= image_height 76 | return cls(x_c, y_c, w, h, image_size=image_size, strict=strict) 77 | -------------------------------------------------------------------------------- /tests/pybboxes/test_functional.py: -------------------------------------------------------------------------------- 1 | from pybboxes.functional import compute_area, convert_bbox 2 | from tests.utils import assert_almost_equal 3 | 4 | 5 | def test_convert_albumentations2voc(albumentations_bbox, voc_bbox, image_size): 6 | converted_box = convert_bbox(albumentations_bbox, from_type="albumentations", to_type="voc", image_size=image_size) 7 | assert_almost_equal(actual=list(converted_box), desired=voc_bbox) 8 | 9 | 10 | def test_convert_coco2voc(coco_bbox, voc_bbox): 11 | converted_box = convert_bbox(coco_bbox, from_type="coco", to_type="voc") 12 | assert_almost_equal(actual=list(converted_box), desired=voc_bbox) 13 | 14 | 15 | def test_convert_fiftyone2voc(fiftyone_bbox, voc_bbox, image_size): 16 | converted_box = convert_bbox(fiftyone_bbox, from_type="fiftyone", to_type="voc", image_size=image_size) 17 | assert_almost_equal(actual=list(converted_box), desired=voc_bbox) 18 | 19 | 20 | def test_convert_yolo2voc(yolo_bbox, voc_bbox, image_size): 21 | converted_box = convert_bbox(yolo_bbox, from_type="yolo", to_type="voc", image_size=image_size) 22 | assert_almost_equal(actual=list(converted_box), desired=voc_bbox) 23 | 24 | 25 | def test_convert_voc2albumentations(voc_bbox, albumentations_bbox, image_size): 26 | converted_box = convert_bbox(voc_bbox, from_type="voc", to_type="albumentations", image_size=image_size) 27 | assert_almost_equal(actual=list(converted_box), desired=albumentations_bbox) 28 | 29 | 30 | def test_convert_voc2coco(voc_bbox, coco_bbox): 31 | converted_box = convert_bbox(voc_bbox, from_type="voc", to_type="coco") 32 | assert_almost_equal(actual=list(converted_box), desired=coco_bbox) 33 | 34 | 35 | def test_convert_voc2fiftyone(voc_bbox, fiftyone_bbox, image_size): 36 | converted_box = convert_bbox(voc_bbox, from_type="voc", to_type="fiftyone", image_size=image_size) 37 | assert_almost_equal(actual=list(converted_box), desired=fiftyone_bbox) 38 | 39 | 40 | def test_convert_voc2yolo(voc_bbox, yolo_bbox, image_size): 41 | converted_box = convert_bbox(voc_bbox, from_type="voc", to_type="yolo", image_size=image_size) 42 | assert_almost_equal(actual=list(converted_box), desired=yolo_bbox) 43 | 44 | 45 | def test_area_albumentations(albumentations_bbox, bbox_area, image_size): 46 | area = compute_area(albumentations_bbox, bbox_type="albumentations", image_size=image_size) 47 | assert_almost_equal(actual=int(area), desired=bbox_area) 48 | 49 | 50 | def test_area_coco(coco_bbox, bbox_area, image_size): 51 | area = compute_area(coco_bbox, bbox_type="coco", image_size=image_size) 52 | assert_almost_equal(actual=int(area), desired=bbox_area) 53 | 54 | 55 | def test_area_fiftyone(fiftyone_bbox, bbox_area, image_size): 56 | area = compute_area(fiftyone_bbox, bbox_type="fiftyone", image_size=image_size) 57 | assert_almost_equal(actual=int(area), desired=bbox_area) 58 | 59 | 60 | def test_area_voc(voc_bbox, bbox_area, image_size): 61 | area = compute_area(voc_bbox, bbox_type="voc", image_size=image_size) 62 | assert_almost_equal(actual=int(area), desired=bbox_area) 63 | 64 | 65 | def test_area_yolo(yolo_bbox, bbox_area, image_size): 66 | area = compute_area(yolo_bbox, bbox_type="yolo", image_size=image_size) 67 | assert_almost_equal(actual=int(area), desired=bbox_area) 68 | -------------------------------------------------------------------------------- /pybboxes/functional.py: -------------------------------------------------------------------------------- 1 | from typing import Tuple, Union 2 | 3 | from pybboxes._typing import BboxType, GenericBboxType 4 | from pybboxes.boxes.base import BaseBoundingBox 5 | from pybboxes.boxes.bbox import load_bbox 6 | 7 | 8 | def convert_bbox( 9 | bbox: GenericBboxType, 10 | from_type: str = None, 11 | to_type: str = None, 12 | image_size: Tuple[int, int] = None, 13 | return_values: bool = True, 14 | **kwargs, 15 | ) -> Union[BboxType, BaseBoundingBox]: 16 | """ 17 | Converts given bbox with given `from_type` to given `to_type`. It uses VOC format 18 | as an intermediate format. 19 | 20 | Args: 21 | bbox: (generic) Bounding box. 22 | from_type: (str) Type/Format of the given bounding box. 23 | to_type: (str) Type/Format of the resulting bounding box. 24 | image_size: (tuple(int,int)) Image size as (w, h) tuple, it is required if the one side of the 25 | boxes requires scaling. 26 | return_values: (bool) Whether to return values as a Tuple, or BoundingBox object. 27 | True by default for compatibility purposes. 28 | 29 | Return: 30 | Bounding box in type `to_type`. 31 | """ 32 | if not isinstance(bbox, BaseBoundingBox): 33 | if not from_type: 34 | raise ValueError("if `bbox` is not a BoundingBox object, `from_type` is required.") 35 | bbox = load_bbox(name=from_type, values=bbox, image_size=image_size, **kwargs) 36 | source_to_target = getattr(bbox, f"to_{to_type}") 37 | target_bbox = source_to_target() 38 | if return_values: 39 | return target_bbox.values 40 | return target_bbox 41 | 42 | 43 | def compute_intersection(bbox1: GenericBboxType, bbox2: GenericBboxType, bbox_type: str = "coco", **kwargs): 44 | """ 45 | Computes intersection area between given bounding boxes. 46 | 47 | Args: 48 | bbox1: Bounding box 1. 49 | bbox2: Bounding box 2. 50 | bbox_type: Format of the bounding boxes. It's 'coco' [x-tl, y-tl, w, h] by default. 51 | 52 | Returns: 53 | Intersection area if bounding boxes intersect, 0 otherwise. 54 | """ 55 | bbox1 = load_bbox(name=bbox_type, values=bbox1, **kwargs) 56 | bbox2 = load_bbox(name=bbox_type, values=bbox2, **kwargs) 57 | return bbox1 * bbox2 58 | 59 | 60 | def compute_area(bbox: GenericBboxType, bbox_type: str = "coco", **kwargs): 61 | """ 62 | Computes the area of given bounding box. 63 | """ 64 | return compute_intersection(bbox, bbox, bbox_type, **kwargs) 65 | 66 | 67 | def compute_union(bbox1: GenericBboxType, bbox2: GenericBboxType, bbox_type: str = "coco", **kwargs): 68 | """ 69 | Computes union area of given boxes. 70 | 71 | Args: 72 | bbox1: Bounding box 1. 73 | bbox2: Bounding box 2. 74 | bbox_type: Format of the bounding boxes. It's 'coco' [x-tl, y-tl, w, h] by default. 75 | 76 | Returns: 77 | Union area. 78 | """ 79 | intersection = compute_intersection(bbox1, bbox2, bbox_type=bbox_type, **kwargs) 80 | area1 = compute_area(bbox1, bbox_type=bbox_type, **kwargs) 81 | area2 = compute_area(bbox2, bbox_type=bbox_type, **kwargs) 82 | return area1 + area2 - intersection 83 | 84 | 85 | def compute_iou(bbox1: GenericBboxType, bbox2: GenericBboxType, bbox_type: str = "coco", **kwargs): 86 | """ 87 | Computes Intersection over Union (IoU) (special form of Jaccard Index) metric. 88 | 89 | Args: 90 | bbox1: Bounding box 1. 91 | bbox2: Bounding box 2. 92 | bbox_type: Format of the bounding boxes. It's 'coco' [x-tl, y-tl, w, h] by default. 93 | 94 | Returns: 95 | Intersection over Union ratio. 96 | """ 97 | return compute_intersection(bbox1, bbox2, bbox_type, **kwargs) / compute_union(bbox1, bbox2, bbox_type, **kwargs) 98 | -------------------------------------------------------------------------------- /tests/pybboxes/conftest.py: -------------------------------------------------------------------------------- 1 | """ 2 | Testing the package pybboxes. 3 | Default image/bbox selected from the following source 4 | https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/ 5 | """ 6 | import inspect 7 | import os 8 | from typing import Optional 9 | 10 | import numpy as np 11 | import pytest 12 | 13 | from tests.pybboxes import EXPECTED_OUTPUTS 14 | from tests.utils import load_json 15 | 16 | 17 | @pytest.fixture 18 | def seed(): 19 | return 42 20 | 21 | 22 | @pytest.fixture(scope="package") 23 | def image_size(): 24 | return 640, 480 25 | 26 | 27 | @pytest.fixture(scope="package") 28 | def bbox_area(): 29 | return 322 * 117 # w x h 30 | 31 | 32 | @pytest.fixture(scope="package") 33 | def albumentations_bbox(): 34 | return [0.153125, 0.71875, 0.65625, 0.9625] 35 | 36 | 37 | @pytest.fixture(scope="package") 38 | def unnormalized_bbox_shift_amount(): 39 | return (2, 2) 40 | 41 | 42 | @pytest.fixture(scope="package") 43 | def normalized_bbox_shift_amount(): 44 | return (0.05, 0.03) 45 | 46 | 47 | @pytest.fixture(scope="package") 48 | def scale_factor(): 49 | return 0.5 50 | 51 | 52 | @pytest.fixture(scope="package") 53 | def coco_bbox(): 54 | return [98, 345, 322, 117] 55 | 56 | 57 | @pytest.fixture(scope="package") 58 | def fiftyone_bbox(): 59 | return [0.153125, 0.71875, 0.503125, 0.24375] 60 | 61 | 62 | @pytest.fixture(scope="package") 63 | def voc_bbox(): 64 | return [98, 345, 420, 462] 65 | 66 | 67 | @pytest.fixture(scope="package") 68 | def yolo_bbox(): 69 | return [0.4046875, 0.840625, 0.503125, 0.24375] 70 | 71 | 72 | @pytest.fixture 73 | def multiple_bbox_shape(): 74 | return 8, 3, 100, 2 75 | 76 | 77 | @pytest.fixture 78 | def expected_multiple_bbox_shape(): 79 | return 8, 3, 100 80 | 81 | 82 | @pytest.fixture 83 | def multiple_albumentations_bboxes(multiple_bbox_shape, seed): 84 | np.random.seed(seed) 85 | a = np.random.uniform(0, 0.5, size=multiple_bbox_shape) 86 | b = np.random.uniform(0.5, 1, size=multiple_bbox_shape) 87 | return np.concatenate([a, b], -1) 88 | 89 | 90 | @pytest.fixture 91 | def multiple_coco_bboxes(multiple_bbox_shape, image_size, seed): 92 | np.random.seed(seed) 93 | w, h = image_size 94 | a = np.random.randint(0, min(w, h) - 30, size=multiple_bbox_shape) 95 | b = np.random.randint(1, 30, size=multiple_bbox_shape) 96 | return np.concatenate([a, b], -1) 97 | 98 | 99 | @pytest.fixture 100 | def multiple_fiftyone_bboxes(multiple_bbox_shape, seed): 101 | np.random.seed(seed) 102 | a = np.random.uniform(0, 0.8, size=multiple_bbox_shape) 103 | b = np.random.uniform(0, 0.2, size=multiple_bbox_shape) 104 | return np.concatenate([a, b], -1) 105 | 106 | 107 | @pytest.fixture 108 | def multiple_voc_bboxes(multiple_bbox_shape, image_size, seed): 109 | np.random.seed(seed) 110 | w, h = image_size 111 | cut = min(w, h) // 2 112 | a = np.random.randint(0, cut, size=multiple_bbox_shape) 113 | b = np.random.randint(cut, min(w, h), size=multiple_bbox_shape) 114 | return np.concatenate([a, b], -1) 115 | 116 | 117 | @pytest.fixture 118 | def multiple_yolo_bboxes(multiple_bbox_shape, seed): 119 | np.random.seed(seed) 120 | a = np.random.uniform(0, 0.6, size=multiple_bbox_shape) 121 | b = np.random.uniform(0, 0.2, size=multiple_bbox_shape) 122 | return np.concatenate([a, b], -1) 123 | 124 | 125 | def get_expected_output(prefix: Optional[str] = None): 126 | def wrapper(fn, *args, **kwargs): 127 | module_name = os.path.basename(inspect.getfile(fn)).replace(".py", "") 128 | path = os.path.join(EXPECTED_OUTPUTS, prefix, f"{module_name}.json") 129 | test_name = fn.__name__.replace("output_", "") 130 | fn.output = load_json(path)[test_name] 131 | return fn 132 | 133 | if prefix is None: 134 | prefix = "" 135 | return wrapper 136 | -------------------------------------------------------------------------------- /pybboxes/utils/io.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import struct 4 | from typing import Dict, List, Optional, Union 5 | 6 | import yaml 7 | 8 | 9 | def get_image_size(file_path: str): 10 | """ 11 | Return (width, height) for a given img file content - no external 12 | dependencies except the os and struct modules from Python core 13 | """ 14 | with open(file_path, "rb") as fhandle: 15 | head = fhandle.read(24) 16 | if len(head) != 24: 17 | return None 18 | if head.startswith(b"\x89PNG\r\n\x1a\n"): 19 | check = struct.unpack(">i", head[16:20])[0] 20 | if check != 0x0D0A1A0A: 21 | return None 22 | width, height = struct.unpack(">ii", head[16:24]) 23 | elif head[:2] == b"\xff\xd8": 24 | try: 25 | fhandle.seek(0) # Read 0xff next 26 | size = 2 27 | ftype = 0 28 | while not 0xC0 <= ftype <= 0xCF: 29 | fhandle.seek(size, 1) 30 | byte = fhandle.read(1) 31 | while ord(byte) == 0xFF: 32 | byte = fhandle.read(1) 33 | ftype = ord(byte) 34 | size = struct.unpack(">H", fhandle.read(2))[0] - 2 35 | # We are at a SOFn block 36 | fhandle.seek(1, 1) # Skip `precision' byte. 37 | height, width = struct.unpack(">HH", fhandle.read(4)) 38 | except Exception: 39 | return None 40 | else: 41 | return None 42 | return width, height 43 | 44 | 45 | class IndentfulDumper(yaml.Dumper): 46 | def increase_indent(self, flow=False, indentless=False): 47 | return super(IndentfulDumper, self).increase_indent(flow, False) 48 | 49 | 50 | def assure_overridable(f): 51 | """ 52 | Wrapper allowing easy use of overwrite-safe functionality. All of the write-helpers 53 | use this wrapper. In case of a conflict, it raises an exception. 54 | """ 55 | 56 | def wrapper(obj, fp, **kwargs): 57 | overwrite = kwargs.get("overwrite", True) 58 | if os.path.exists(fp) and not overwrite: 59 | raise ValueError(f"Path {fp} already exists. To overwrite, use `overwrite=True`.") 60 | return f(obj, fp, **kwargs) 61 | 62 | return wrapper 63 | 64 | 65 | def read_json(fp: str, **kwargs) -> Union[Dict, List]: 66 | """ 67 | Reads a JSON file given path. 68 | 69 | Args: 70 | fp: (str) File path. 71 | 72 | Return: 73 | Dictionary or List of dictionaries depending on the content. 74 | """ 75 | with open(fp, "r") as fd_in: 76 | data = json.load(fd_in, **kwargs) 77 | return data 78 | 79 | 80 | def read_yaml(fp: str) -> Union[Dict, List]: 81 | """ 82 | Reads a YAML file given path. 83 | 84 | Args: 85 | fp: (str) File path. 86 | 87 | Return: 88 | Generic Python object. 89 | """ 90 | with open(fp, "r") as fd_in: 91 | data = yaml.safe_load(fd_in) 92 | return data 93 | 94 | 95 | @assure_overridable 96 | def write_json(obj: Union[Dict, List], fp: str, encoding: Optional[str] = None, **kwargs) -> None: 97 | """ 98 | Writes a Python dictionary or list object to the given path in JSON format. 99 | 100 | Args: 101 | obj: (dict, list) Python dictionary or list object. 102 | fp: (str) Path of the output file. 103 | encoding: (Optional(str)) Encoding for writing. 104 | """ 105 | with open(fp, "w", encoding=encoding) as fd_out: 106 | json.dump(obj, fd_out, **kwargs) 107 | 108 | 109 | @assure_overridable 110 | def write_yaml(obj: Dict, fp: str, indent_blocks: bool = True, **kwargs) -> None: 111 | """ 112 | Writes a Python dictionary to the given path in YAML format. 113 | 114 | Args: 115 | obj: (any) Serializable Python object. 116 | fp: (str) Path of the output file. 117 | indent_blocks: (bool) Whether dump with indents. 118 | """ 119 | with open(fp, "w") as fd_out: 120 | if indent_blocks: 121 | d = yaml.dump(obj, Dumper=IndentfulDumper, **kwargs) 122 | fd_out.write(d) 123 | else: 124 | yaml.safe_dump(obj, fd_out, **kwargs) 125 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/test_coco_bounding_box.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pybboxes import BoundingBox, CocoBoundingBox 5 | from tests.utils import assert_almost_equal 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def coco_bounding_box(coco_bbox, image_size): 10 | return BoundingBox.from_coco(*coco_bbox, image_size=image_size) 11 | 12 | 13 | @pytest.fixture(scope="module") 14 | def coco_oob_bounding_box(): 15 | return [100, 105, 460, 425] 16 | 17 | 18 | @pytest.fixture(scope="module") 19 | def coco_bounding_box2(coco_bbox, image_size): 20 | np.random.seed(42) 21 | coco_bbox2 = coco_bbox + np.random.randint(-5, 5, size=4) 22 | return BoundingBox.from_coco(*coco_bbox2, image_size=image_size) 23 | 24 | 25 | @pytest.fixture 26 | def coco_multi_array_zeroth(): 27 | return 102, 435, 20, 18 28 | 29 | 30 | @pytest.fixture() 31 | def scaled_coco_box(): 32 | return 145, 362, 228, 83 33 | 34 | 35 | @pytest.fixture(scope="function") 36 | def coco_area_computations_expected_output(): 37 | return { 38 | "total_area": 75258, 39 | "union": 38664, 40 | "intersection": 36594, 41 | "iou": 0.9464618249534451, 42 | "ratio": 1.0023946360153257, 43 | "difference": 1080, 44 | } 45 | 46 | 47 | def test_area_computations(coco_bounding_box, coco_bounding_box2, coco_area_computations_expected_output): 48 | actual_output = { 49 | "total_area": coco_bounding_box.area + coco_bounding_box2.area, 50 | "union": coco_bounding_box + coco_bounding_box2, 51 | "intersection": coco_bounding_box * coco_bounding_box2, 52 | "iou": coco_bounding_box.iou(coco_bounding_box2), 53 | "ratio": coco_bounding_box / coco_bounding_box2, 54 | "difference": coco_bounding_box - coco_bounding_box2, 55 | } 56 | assert_almost_equal(actual=actual_output, desired=coco_area_computations_expected_output) 57 | 58 | 59 | def test_from_array(multiple_coco_bboxes, image_size, expected_multiple_bbox_shape, coco_multi_array_zeroth): 60 | coco_boxes = CocoBoundingBox.from_array(multiple_coco_bboxes, image_size=image_size) 61 | assert_almost_equal(actual=coco_boxes.shape, desired=expected_multiple_bbox_shape) 62 | assert_almost_equal( 63 | actual=coco_boxes.flatten()[0].values, desired=coco_multi_array_zeroth, ignore_numeric_type_changes=True 64 | ) 65 | 66 | 67 | @pytest.mark.parametrize( 68 | "box_values,expected_out", 69 | [ 70 | ((270, 350, 400, 450), (270, 350, 370, 130)), 71 | ((-50, -50, 342, 190), (0, 0, 292, 140)), 72 | ((153, 150, 490, 580), (153, 150, 487, 330)), 73 | ], 74 | ) 75 | def test_clamp(box_values, expected_out, image_size): 76 | coco_box = CocoBoundingBox(*box_values, image_size=image_size) 77 | coco_box.clamp() 78 | 79 | assert_almost_equal(actual=coco_box.values, desired=expected_out, ignore_numeric_type_changes=True) 80 | 81 | 82 | def test_scale(coco_bounding_box, scaled_coco_box, scale_factor): 83 | _, _, w, h = coco_bounding_box.values 84 | 85 | coco_bounding_box.scale(scale_factor) 86 | 87 | assert_almost_equal(actual=coco_bounding_box.values, desired=scaled_coco_box, ignore_numeric_type_changes=True) 88 | 89 | actual_area = coco_bounding_box.area 90 | desired_area = w * h * scale_factor 91 | assert actual_area - desired_area < 10**2 92 | 93 | 94 | def test_shift(coco_bounding_box, unnormalized_bbox_shift_amount): 95 | x_tl, y_tl, w, h = coco_bounding_box.values 96 | desired = (x_tl + unnormalized_bbox_shift_amount[0], y_tl + unnormalized_bbox_shift_amount[1], w, h) 97 | actual_output = coco_bounding_box.shift(unnormalized_bbox_shift_amount) 98 | 99 | assert_almost_equal(actual=list(actual_output.values), desired=list(desired)) 100 | 101 | 102 | def test_oob(coco_oob_bounding_box, image_size): 103 | with pytest.raises(ValueError): 104 | BoundingBox.from_coco(*coco_oob_bounding_box, image_size=image_size, strict=True) 105 | 106 | coco_box = BoundingBox.from_coco(*coco_oob_bounding_box, image_size=image_size) 107 | assert coco_box.is_oob is True 108 | 109 | 110 | # Conversions 111 | 112 | 113 | def test_to_albumentations(coco_bounding_box, albumentations_bbox): 114 | coco2albumentations_bbox = coco_bounding_box.to_albumentations() 115 | assert_almost_equal(actual=list(coco2albumentations_bbox.values), desired=albumentations_bbox) 116 | 117 | 118 | def test_to_fiftyone(coco_bounding_box, fiftyone_bbox): 119 | coco2fiftyone_bbox = coco_bounding_box.to_fiftyone() 120 | assert_almost_equal(actual=list(coco2fiftyone_bbox.values), desired=fiftyone_bbox) 121 | 122 | 123 | def test_to_voc(coco_bounding_box, voc_bbox): 124 | coco2voc_bbox = coco_bounding_box.to_voc() 125 | assert_almost_equal(actual=list(coco2voc_bbox.values), desired=voc_bbox) 126 | 127 | 128 | def test_to_yolo(coco_bounding_box, yolo_bbox): 129 | coco2yolo_bbox = coco_bounding_box.to_yolo() 130 | assert_almost_equal(actual=list(coco2yolo_bbox.values), desired=yolo_bbox) 131 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/test_voc_bounding_box.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pybboxes import BoundingBox, VocBoundingBox 5 | from tests.utils import assert_almost_equal 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def voc_bounding_box(voc_bbox, image_size): 10 | return BoundingBox.from_voc(*voc_bbox, image_size=image_size) 11 | 12 | 13 | @pytest.fixture(scope="module") 14 | def voc_oob_bounding_box(): 15 | return [100, 105, 560, 530] 16 | 17 | 18 | @pytest.fixture() 19 | def voc_multi_array_zeroth(): 20 | return 102, 179, 433, 457 21 | 22 | 23 | @pytest.fixture(scope="module") 24 | def voc_bounding_box2(voc_bbox, image_size): 25 | np.random.seed(42) 26 | voc_bbox2 = voc_bbox + np.random.randint(-5, 5, size=4) 27 | return BoundingBox.from_voc(*voc_bbox2, image_size=image_size) 28 | 29 | 30 | @pytest.fixture() 31 | def scaled_voc_box(): 32 | return 145, 362, 373, 445 33 | 34 | 35 | @pytest.fixture(scope="function") 36 | def voc_area_computations_expected_output(): 37 | return { 38 | "total_area": 75788, 39 | "union": 38552, 40 | "intersection": 37236, 41 | "iou": 0.9658642871965138, 42 | "ratio": 0.9884556855748544, 43 | "difference": 438, 44 | } 45 | 46 | 47 | def test_area_computations(voc_bounding_box, voc_bounding_box2, voc_area_computations_expected_output): 48 | actual_output = { 49 | "total_area": voc_bounding_box.area + voc_bounding_box2.area, 50 | "union": voc_bounding_box + voc_bounding_box2, 51 | "intersection": voc_bounding_box * voc_bounding_box2, 52 | "iou": voc_bounding_box.iou(voc_bounding_box2), 53 | "ratio": voc_bounding_box / voc_bounding_box2, 54 | "difference": voc_bounding_box - voc_bounding_box2, 55 | } 56 | assert_almost_equal(actual=actual_output, desired=voc_area_computations_expected_output) 57 | 58 | 59 | def test_from_array(multiple_voc_bboxes, image_size, expected_multiple_bbox_shape, voc_multi_array_zeroth): 60 | voc_boxes = VocBoundingBox.from_array(multiple_voc_bboxes, image_size=image_size) 61 | assert_almost_equal(actual=voc_boxes.shape, desired=expected_multiple_bbox_shape) 62 | assert_almost_equal( 63 | actual=voc_boxes.flatten()[0].values, desired=voc_multi_array_zeroth, ignore_numeric_type_changes=True 64 | ) 65 | 66 | 67 | @pytest.mark.parametrize( 68 | "box_values,expected_out", 69 | [ 70 | ((270, 350, 400, 450), (270, 350, 400, 450)), 71 | ((-50, -50, 342, 190), (0, 0, 342, 190)), 72 | ((153, 150, 490, 580), (153, 150, 490, 480)), 73 | ], 74 | ) 75 | def test_clamp(box_values, expected_out, image_size): 76 | voc_box = VocBoundingBox(*box_values, image_size=image_size) 77 | voc_box.clamp() 78 | 79 | assert_almost_equal(actual=voc_box.values, desired=expected_out, ignore_numeric_type_changes=True) 80 | 81 | 82 | def test_scale(voc_bounding_box, scaled_voc_box, scale_factor): 83 | x_tl, y_tl, x_br, y_br = voc_bounding_box.values 84 | w, h = (x_br - x_tl), (y_br - y_tl) 85 | 86 | voc_bounding_box.scale(scale_factor) 87 | 88 | assert_almost_equal(actual=voc_bounding_box.values, desired=scaled_voc_box, ignore_numeric_type_changes=True) 89 | 90 | actual_area = voc_bounding_box.area 91 | desired_area = w * h * scale_factor 92 | assert actual_area - desired_area < 10**2 93 | 94 | 95 | def test_shift(voc_bounding_box, unnormalized_bbox_shift_amount): 96 | x_tl, y_tl, x_br, y_br = voc_bounding_box.values 97 | desired = ( 98 | x_tl + unnormalized_bbox_shift_amount[0], 99 | y_tl + unnormalized_bbox_shift_amount[1], 100 | x_br + unnormalized_bbox_shift_amount[0], 101 | y_br + unnormalized_bbox_shift_amount[1], 102 | ) 103 | actual_output = voc_bounding_box.shift(unnormalized_bbox_shift_amount) 104 | 105 | assert_almost_equal(actual=actual_output.values, desired=desired) 106 | 107 | 108 | def test_oob(voc_oob_bounding_box, image_size): 109 | with pytest.raises(ValueError): 110 | BoundingBox.from_albumentations(*voc_oob_bounding_box, image_size=image_size, strict=True) 111 | 112 | voc_box = BoundingBox.from_albumentations(*voc_oob_bounding_box, image_size=image_size) 113 | assert voc_box.is_oob is True 114 | 115 | 116 | # Conversions 117 | 118 | 119 | def test_to_albumentations(voc_bounding_box, albumentations_bbox): 120 | voc2albumentations_bbox = voc_bounding_box.to_albumentations() 121 | assert_almost_equal(actual=list(voc2albumentations_bbox.values), desired=albumentations_bbox) 122 | 123 | 124 | def test_to_fiftyone(voc_bounding_box, fiftyone_bbox): 125 | voc2fiftyone_bbox = voc_bounding_box.to_fiftyone() 126 | assert_almost_equal(actual=list(voc2fiftyone_bbox.values), desired=fiftyone_bbox) 127 | 128 | 129 | def test_to_coco(voc_bounding_box, coco_bbox): 130 | voc2coco_bbox = voc_bounding_box.to_coco() 131 | assert_almost_equal(actual=list(voc2coco_bbox.values), desired=coco_bbox) 132 | 133 | 134 | def test_to_yolo(voc_bounding_box, yolo_bbox): 135 | voc2yolo_bbox = voc_bounding_box.to_yolo() 136 | assert_almost_equal(actual=list(voc2yolo_bbox.values), desired=yolo_bbox) 137 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/test_yolo_bounding_box.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pybboxes import BoundingBox, YoloBoundingBox 5 | from tests.utils import assert_almost_equal 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def yolo_bounding_box(yolo_bbox, image_size): 10 | return BoundingBox.from_yolo(*yolo_bbox, image_size=image_size) 11 | 12 | 13 | @pytest.fixture(scope="module") 14 | def yolo_oob_bounding_box(): 15 | return [0.515625, 0.6614583333333334, 0.71875, 0.8854166666666666] 16 | 17 | 18 | @pytest.fixture(scope="module") 19 | def yolo_bounding_box2(yolo_bbox, image_size): 20 | np.random.seed(42) 21 | yolo_bbox2 = yolo_bbox + np.random.uniform(-0.05, 0.01, size=4) 22 | return BoundingBox.from_yolo(*yolo_bbox2, image_size=image_size) 23 | 24 | 25 | @pytest.fixture() 26 | def scaled_yolo_box(): 27 | return 0.4046875, 0.840625, 0.3557630992844818, 0.17235727791422098 28 | 29 | 30 | @pytest.fixture() 31 | def yolo_multi_array_zeroth(): 32 | return 0.22472407130841748, 0.5704285838459496, 0.024769205341163492, 0.014107127518591313 33 | 34 | 35 | @pytest.fixture(scope="function") 36 | def yolo_area_computations_expected_output(): 37 | return { 38 | "total_area": 72654, 39 | "union": 39434, 40 | "intersection": 33220, 41 | "iou": 0.8424202464878024, 42 | "ratio": 1.0770154373927958, 43 | "difference": 4454, 44 | } 45 | 46 | 47 | def test_area_computations(yolo_bounding_box, yolo_bounding_box2, yolo_area_computations_expected_output): 48 | actual_output = { 49 | "total_area": yolo_bounding_box.area + yolo_bounding_box2.area, 50 | "union": yolo_bounding_box + yolo_bounding_box2, 51 | "intersection": yolo_bounding_box * yolo_bounding_box2, 52 | "iou": yolo_bounding_box.iou(yolo_bounding_box2), 53 | "ratio": yolo_bounding_box / yolo_bounding_box2, 54 | "difference": yolo_bounding_box - yolo_bounding_box2, 55 | } 56 | assert_almost_equal(actual=actual_output, desired=yolo_area_computations_expected_output) 57 | 58 | 59 | def test_from_array(multiple_yolo_bboxes, image_size, expected_multiple_bbox_shape, yolo_multi_array_zeroth): 60 | yolo_boxes = YoloBoundingBox.from_array(multiple_yolo_bboxes, image_size=image_size) 61 | assert_almost_equal(actual=yolo_boxes.shape, desired=expected_multiple_bbox_shape) 62 | assert_almost_equal( 63 | actual=yolo_boxes.flatten()[0].values, desired=yolo_multi_array_zeroth, ignore_numeric_type_changes=True 64 | ) 65 | 66 | 67 | @pytest.mark.parametrize( 68 | "box_values,expected_out", 69 | [ 70 | ((0.15625, 0.21875, 0.875, 0.9), (0.296875, 0.334375, 0.59375, 0.66875)), 71 | ((-0.05, -0.05, 0.5, 0.5), (0.1, 0.1, 0.2, 0.2)), 72 | ((0.3, 0.4, 0.7, 0.7), (0.325, 0.4, 0.65, 0.7)), 73 | ], 74 | ) 75 | def test_clamp(box_values, expected_out, image_size): 76 | yolo_box = YoloBoundingBox(*box_values, image_size=image_size) 77 | yolo_box.clamp() 78 | 79 | assert_almost_equal(actual=yolo_box.values, desired=expected_out, ignore_numeric_type_changes=True) 80 | 81 | 82 | def test_shift(yolo_bounding_box, normalized_bbox_shift_amount): 83 | x_c, y_c, w, h = yolo_bounding_box.values 84 | desired = (x_c + normalized_bbox_shift_amount[0], y_c + normalized_bbox_shift_amount[1], w, h) 85 | actual_output = yolo_bounding_box.shift(normalized_bbox_shift_amount) 86 | 87 | assert_almost_equal(actual=actual_output.values, desired=desired) 88 | 89 | 90 | def test_scale(yolo_bounding_box, scaled_yolo_box, scale_factor): 91 | _, _, w, h = yolo_bounding_box.values 92 | image_width, image_height = yolo_bounding_box.image_size 93 | w, h = w * image_width, h * image_height 94 | 95 | yolo_bounding_box.scale(scale_factor) 96 | 97 | assert_almost_equal(actual=yolo_bounding_box.values, desired=scaled_yolo_box, ignore_numeric_type_changes=True) 98 | 99 | actual_area = yolo_bounding_box.area 100 | desired_area = w * h * scale_factor 101 | assert actual_area - desired_area < 10**2 102 | 103 | 104 | def test_oob(yolo_oob_bounding_box, image_size): 105 | with pytest.raises(ValueError): 106 | BoundingBox.from_yolo(*yolo_oob_bounding_box, image_size=image_size, strict=True) 107 | 108 | yolo_box = BoundingBox.from_yolo(*yolo_oob_bounding_box, image_size=image_size, strict=False) 109 | assert yolo_box.is_oob is True 110 | 111 | 112 | # Conversions 113 | 114 | 115 | def test_to_albumentations(yolo_bounding_box, albumentations_bbox): 116 | yolo2albumentations_bbox = yolo_bounding_box.to_albumentations() 117 | assert_almost_equal(actual=list(yolo2albumentations_bbox.values), desired=albumentations_bbox) 118 | 119 | 120 | def test_to_coco(yolo_bounding_box, coco_bbox): 121 | yolo2coco_bbox = yolo_bounding_box.to_coco() 122 | assert_almost_equal(actual=list(yolo2coco_bbox.values), desired=coco_bbox) 123 | 124 | 125 | def test_to_fiftyone(yolo_bounding_box, fiftyone_bbox): 126 | yolo2fiftyone_bbox = yolo_bounding_box.to_fiftyone() 127 | assert_almost_equal(actual=list(yolo2fiftyone_bbox.values), desired=fiftyone_bbox) 128 | 129 | 130 | def test_to_voc(yolo_bounding_box, voc_bbox): 131 | yolo2voc_bbox = yolo_bounding_box.to_voc() 132 | assert_almost_equal(actual=list(yolo2voc_bbox.values), desired=voc_bbox) 133 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/test_fiftyone_bounding_box.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pybboxes import BoundingBox, FiftyoneBoundingBox 5 | from tests.utils import assert_almost_equal 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def fiftyone_bounding_box(fiftyone_bbox, image_size): 10 | return BoundingBox.from_fiftyone(*fiftyone_bbox, image_size=image_size) 11 | 12 | 13 | @pytest.fixture(scope="module") 14 | def fiftyone_oob_bounding_box(): 15 | return [0.15625, 0.21875, 0.71875, 0.8854166666666666] 16 | 17 | 18 | @pytest.fixture 19 | def fiftyone_multi_array_zeroth(): 20 | return 0.29963209507789, 0.760571445127933, 0.024769205341163492, 0.014107127518591313 21 | 22 | 23 | @pytest.fixture(scope="module") 24 | def fiftyone_bounding_box2(fiftyone_bbox, image_size): 25 | np.random.seed(42) 26 | fiftyone_bbox2 = fiftyone_bbox + np.random.uniform(-0.05, 0.01, size=4) 27 | return BoundingBox.from_fiftyone(*fiftyone_bbox2, image_size=image_size) 28 | 29 | 30 | @pytest.fixture() 31 | def scaled_fiftyone_box(): 32 | return 0.22680595035775913, 0.7544463610428895, 0.35576309928448174, 0.17235727791422092 33 | 34 | 35 | @pytest.fixture(scope="function") 36 | def fiftyone_area_computations_expected_output(): 37 | return { 38 | "total_area": 72972, 39 | "union": 39672, 40 | "intersection": 33300, 41 | "iou": 0.8393829401088929, 42 | "ratio": 1.0673125956144824, 43 | "difference": 4374, 44 | } 45 | 46 | 47 | def test_area_computations(fiftyone_bounding_box, fiftyone_bounding_box2, fiftyone_area_computations_expected_output): 48 | actual_output = { 49 | "total_area": fiftyone_bounding_box.area + fiftyone_bounding_box2.area, 50 | "union": fiftyone_bounding_box + fiftyone_bounding_box2, 51 | "intersection": fiftyone_bounding_box * fiftyone_bounding_box2, 52 | "iou": fiftyone_bounding_box.iou(fiftyone_bounding_box2), 53 | "ratio": fiftyone_bounding_box / fiftyone_bounding_box2, 54 | "difference": fiftyone_bounding_box - fiftyone_bounding_box2, 55 | } 56 | assert_almost_equal(actual=actual_output, desired=fiftyone_area_computations_expected_output) 57 | 58 | 59 | def test_from_array(multiple_fiftyone_bboxes, image_size, expected_multiple_bbox_shape, fiftyone_multi_array_zeroth): 60 | fo_boxes = FiftyoneBoundingBox.from_array(multiple_fiftyone_bboxes, image_size=image_size) 61 | assert_almost_equal(actual=fo_boxes.shape, desired=expected_multiple_bbox_shape) 62 | assert_almost_equal( 63 | actual=fo_boxes.flatten()[0].values, desired=fiftyone_multi_array_zeroth, ignore_numeric_type_changes=True 64 | ) 65 | 66 | 67 | @pytest.mark.parametrize( 68 | "box_values,expected_out", 69 | [ 70 | ((0.15625, 0.21875, 0.875, 0.9), (0.15625, 0.21875, 0.84375, 0.78125)), 71 | ((-0.05, -0.05, 0.5, 0.5), (0.0, 0.0, 0.45, 0.45)), 72 | ((0.3, 0.4, 1, 0.7), (0.3, 0.4, 0.7, 0.6)), 73 | ], 74 | ) 75 | def test_clamp(box_values, expected_out, image_size): 76 | fo_box = FiftyoneBoundingBox(*box_values, image_size=image_size) 77 | fo_box.clamp() 78 | 79 | assert_almost_equal(actual=fo_box.values, desired=expected_out, ignore_numeric_type_changes=True) 80 | 81 | 82 | def test_scale(fiftyone_bounding_box, scaled_fiftyone_box, scale_factor): 83 | _, _, w, h = fiftyone_bounding_box.values 84 | image_width, image_height = fiftyone_bounding_box.image_size 85 | w, h = w * image_width, h * image_height 86 | 87 | fiftyone_bounding_box.scale(scale_factor) 88 | 89 | assert_almost_equal( 90 | actual=fiftyone_bounding_box.values, desired=scaled_fiftyone_box, ignore_numeric_type_changes=True 91 | ) 92 | 93 | actual_area = fiftyone_bounding_box.area 94 | desired_area = w * h * scale_factor 95 | assert actual_area - desired_area < 10**2 96 | 97 | 98 | def test_shift(fiftyone_bounding_box, normalized_bbox_shift_amount): 99 | x_tl, y_tl, w, h = fiftyone_bounding_box.values 100 | desired = (x_tl + normalized_bbox_shift_amount[0], y_tl + normalized_bbox_shift_amount[1], w, h) 101 | actual_output = fiftyone_bounding_box.shift(normalized_bbox_shift_amount) 102 | 103 | assert_almost_equal(actual=actual_output.values, desired=desired) 104 | 105 | 106 | def test_oob(fiftyone_oob_bounding_box, image_size): 107 | with pytest.raises(ValueError): 108 | BoundingBox.from_fiftyone(*fiftyone_oob_bounding_box, image_size=image_size, strict=True) 109 | 110 | fo_box = BoundingBox.from_fiftyone(*fiftyone_oob_bounding_box, image_size=image_size) 111 | assert fo_box.is_oob is True 112 | 113 | 114 | # Conversions 115 | 116 | 117 | def test_to_albumentations(fiftyone_bounding_box, albumentations_bbox): 118 | fiftyone2albumentations_bbox = fiftyone_bounding_box.to_albumentations() 119 | assert_almost_equal(actual=list(fiftyone2albumentations_bbox.values), desired=albumentations_bbox) 120 | 121 | 122 | def test_to_coco(fiftyone_bounding_box, coco_bbox): 123 | fiftyone2coco_bbox = fiftyone_bounding_box.to_coco() 124 | assert_almost_equal(actual=list(fiftyone2coco_bbox.values), desired=coco_bbox) 125 | 126 | 127 | def test_to_voc(fiftyone_bounding_box, voc_bbox): 128 | fiftyone2voc_bbox = fiftyone_bounding_box.to_voc() 129 | assert_almost_equal(actual=list(fiftyone2voc_bbox.values), desired=voc_bbox) 130 | 131 | 132 | def test_to_yolo(fiftyone_bounding_box, yolo_bbox): 133 | fiftyone2yolo_bbox = fiftyone_bounding_box.to_yolo() 134 | assert_almost_equal(actual=list(fiftyone2yolo_bbox.values), desired=yolo_bbox) 135 | -------------------------------------------------------------------------------- /tests/pybboxes/boxes/test_albumentations_bounding_box.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from pybboxes import AlbumentationsBoundingBox, BoundingBox 5 | from tests.utils import assert_almost_equal 6 | 7 | 8 | @pytest.fixture(scope="function") 9 | def albumentations_bounding_box(albumentations_bbox, image_size): 10 | return BoundingBox.from_albumentations(*albumentations_bbox, image_size=image_size) 11 | 12 | 13 | @pytest.fixture(scope="module") 14 | def albumentations_oob_bounding_box(): 15 | return [0.15625, 0.21875, 0.875, 1.1041666666666667] 16 | 17 | 18 | @pytest.fixture 19 | def albumentations_multi_array_zeroth(): 20 | return 0.18727005942368125, 0.4753571532049581, 0.5619230133529087, 0.5352678187964783 21 | 22 | 23 | @pytest.fixture(scope="module") 24 | def albumentations_bounding_box2(albumentations_bbox, image_size): 25 | np.random.seed(42) 26 | albumentations_bbox2 = albumentations_bbox + np.random.uniform(-0.05, 0.05, size=4) 27 | return BoundingBox.from_albumentations(*albumentations_bbox2, image_size=image_size) 28 | 29 | 30 | @pytest.fixture() 31 | def scaled_albumentations_box(): 32 | return 0.22680595035775913, 0.7544463610428895, 0.5825690496422409, 0.9268036389571105 33 | 34 | 35 | @pytest.fixture() 36 | def clamped_albumentations_box(): 37 | return 0.22680595035775913, 0.7544463610428895, 0.5825690496422409, 0.9268036389571105 38 | 39 | 40 | @pytest.fixture(scope="function") 41 | def albumentations_area_computations_expected_output(): 42 | return { 43 | "total_area": 72174, 44 | "union": 41584, 45 | "intersection": 30590, 46 | "iou": 0.7356194690265486, 47 | "ratio": 1.092, 48 | "difference": 7084, 49 | } 50 | 51 | 52 | def test_area_computations( 53 | albumentations_bounding_box, albumentations_bounding_box2, albumentations_area_computations_expected_output 54 | ): 55 | actual_output = { 56 | "total_area": albumentations_bounding_box.area + albumentations_bounding_box2.area, 57 | "union": albumentations_bounding_box + albumentations_bounding_box2, 58 | "intersection": albumentations_bounding_box * albumentations_bounding_box2, 59 | "iou": albumentations_bounding_box.iou(albumentations_bounding_box2), 60 | "ratio": albumentations_bounding_box / albumentations_bounding_box2, 61 | "difference": albumentations_bounding_box - albumentations_bounding_box2, 62 | } 63 | assert_almost_equal(actual=actual_output, desired=albumentations_area_computations_expected_output) 64 | 65 | 66 | def test_from_array( 67 | multiple_albumentations_bboxes, image_size, expected_multiple_bbox_shape, albumentations_multi_array_zeroth 68 | ): 69 | alb_boxes = AlbumentationsBoundingBox.from_array(multiple_albumentations_bboxes, image_size=image_size) 70 | assert_almost_equal(actual=alb_boxes.shape, desired=expected_multiple_bbox_shape) 71 | assert_almost_equal( 72 | alb_boxes.flatten()[0].values, albumentations_multi_array_zeroth, ignore_numeric_type_changes=True 73 | ) 74 | 75 | 76 | @pytest.mark.parametrize( 77 | "box_values,expected_out", 78 | [ 79 | ((0.15625, 0.21875, 0.875, 1.1041666666666667), (0.15625, 0.21875, 0.875, 1)), 80 | ((-0.05, -0.05, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5)), 81 | ((0.3, 0.4, 1.04, 0.7), (0.3, 0.4, 1.0, 0.7)), 82 | ], 83 | ) 84 | def test_clamp(box_values, expected_out, image_size): 85 | alb_box = AlbumentationsBoundingBox(*box_values, image_size=image_size) 86 | alb_box.clamp() 87 | 88 | assert_almost_equal(actual=alb_box.values, desired=expected_out, ignore_numeric_type_changes=True) 89 | 90 | 91 | def test_scale(albumentations_bounding_box, scaled_albumentations_box, scale_factor): 92 | x_tl, y_tl, x_br, y_br = albumentations_bounding_box.values 93 | image_width, image_height = albumentations_bounding_box.image_size 94 | w, h = (x_br - x_tl) * image_width, (y_br - y_tl) * image_height 95 | 96 | albumentations_bounding_box.scale(scale_factor) 97 | 98 | assert_almost_equal( 99 | actual=albumentations_bounding_box.values, desired=scaled_albumentations_box, ignore_numeric_type_changes=True 100 | ) 101 | 102 | actual_area = albumentations_bounding_box.area 103 | desired_area = w * h * scale_factor 104 | assert actual_area - desired_area < 10**2 105 | 106 | 107 | def test_shift(albumentations_bounding_box, normalized_bbox_shift_amount): 108 | x_tl, y_tl, x_br, y_br = albumentations_bounding_box.values 109 | desired = ( 110 | x_tl + normalized_bbox_shift_amount[0], 111 | y_tl + normalized_bbox_shift_amount[1], 112 | x_br + normalized_bbox_shift_amount[0], 113 | y_br + normalized_bbox_shift_amount[1], 114 | ) 115 | actual_output = albumentations_bounding_box.shift(normalized_bbox_shift_amount) 116 | 117 | assert_almost_equal(actual=actual_output.values, desired=desired, decimal=2) 118 | 119 | 120 | def test_oob(albumentations_oob_bounding_box, image_size): 121 | with pytest.raises(ValueError): 122 | BoundingBox.from_albumentations(*albumentations_oob_bounding_box, image_size=image_size, strict=True) 123 | 124 | alb_box = BoundingBox.from_albumentations(*albumentations_oob_bounding_box, image_size=image_size) 125 | assert alb_box.is_oob is True 126 | 127 | 128 | # Conversions 129 | 130 | 131 | def test_to_coco(albumentations_bounding_box, coco_bbox): 132 | alb2coco_bbox = albumentations_bounding_box.to_coco() 133 | assert_almost_equal(actual=list(alb2coco_bbox.values), desired=coco_bbox) 134 | 135 | 136 | def test_to_fiftyone(albumentations_bounding_box, fiftyone_bbox): 137 | alb2fiftyone_bbox = albumentations_bounding_box.to_fiftyone() 138 | assert_almost_equal(actual=list(alb2fiftyone_bbox.values), desired=fiftyone_bbox) 139 | 140 | 141 | def test_to_voc(albumentations_bounding_box, voc_bbox): 142 | alb2voc_bbox = albumentations_bounding_box.to_voc() 143 | assert_almost_equal(actual=list(alb2voc_bbox.values), desired=voc_bbox) 144 | 145 | 146 | def test_to_yolo(albumentations_bounding_box, yolo_bbox): 147 | alb2yolo_bbox = albumentations_bounding_box.to_yolo() 148 | assert_almost_equal(actual=list(alb2yolo_bbox.values), desired=yolo_bbox) 149 | -------------------------------------------------------------------------------- /pybboxes/boxes/bbox.py: -------------------------------------------------------------------------------- 1 | from importlib import import_module 2 | from typing import Tuple, Union 3 | 4 | from numpy import sqrt 5 | 6 | from pybboxes.boxes.base import BaseBoundingBox 7 | 8 | 9 | def load_bbox( 10 | name: str, values, image_size: Tuple[int, int] = None, return_values: bool = False, from_voc: bool = False, **kwargs 11 | ) -> BaseBoundingBox: 12 | def pascalize(snake_string: str) -> str: 13 | return snake_string.title().replace("_", "") 14 | 15 | module_name = f"{name}_bounding_box" 16 | module_path = f"pybboxes.boxes.{module_name}" 17 | klass_name = pascalize(module_name) 18 | module = import_module(module_path) 19 | klass = getattr(module, klass_name) 20 | if from_voc: 21 | # Used to convert from Generic (VOC) style 22 | bbox = klass.from_voc(*values, image_size=image_size, **kwargs) 23 | else: 24 | bbox = klass(*values, image_size=image_size, **kwargs) 25 | if return_values: 26 | return bbox.values 27 | return bbox 28 | 29 | 30 | class BoundingBox(BaseBoundingBox): 31 | def __init__( 32 | self, 33 | x_tl: int, 34 | y_tl: int, 35 | x_br: int, 36 | y_br: int, 37 | image_size: Tuple[int, int] = None, 38 | strict: bool = False, 39 | ): 40 | super(BoundingBox, self).__init__(x_tl, y_tl, x_br, y_br, image_size=image_size, strict=strict) 41 | 42 | def _correct_value_types(self, x_tl: int, y_tl: int, x_br: int, y_br: int) -> Tuple: 43 | return round(x_tl), round(y_tl), round(x_br), round(y_br) 44 | 45 | def _validate_values(self, x_tl: int, y_tl: int, x_br: int, y_br: int): 46 | image_width, image_height = self.image_size 47 | if x_tl > x_br or y_tl > y_br: 48 | raise ValueError("Incorrect BoundingBox format. Must be in type [x-tl, y-tl, x-br, y-br].") 49 | elif (x_tl, y_tl) == (x_br, y_br): 50 | raise ValueError("Given top-left and bottom-right points must be distinct.") 51 | elif ( 52 | not 0 <= x_tl < x_br 53 | or not 0 <= y_tl < y_br 54 | or (image_width is not None and x_br > image_width) 55 | or (image_height is not None and y_br > image_height) 56 | ): 57 | if self.strict: 58 | raise ValueError( 59 | "Given bounding box values is out of bounds. " 60 | "To silently skip out of bounds cases pass 'strict=False'." 61 | ) 62 | self._is_oob = True 63 | elif not self.is_image_size_null(): 64 | self._is_oob = False 65 | 66 | def clamp(self) -> "BoundingBox": 67 | if self.is_image_size_null() or not self.is_oob: 68 | return self 69 | x_tl, y_tl, x_br, y_br = self.raw_values 70 | width, height = self.image_size 71 | x_tl = max(x_tl, 0) 72 | y_tl = max(y_tl, 0) 73 | x_br = min(x_br, width) 74 | y_br = min(y_br, height) 75 | new_values = (x_tl, y_tl, x_br, y_br) 76 | self._validate_and_set_values(*new_values) 77 | return self 78 | 79 | def scale(self, factor: float) -> "BoundingBox": 80 | if factor <= 0: 81 | raise ValueError("Scaling 'factor' must be a positive value.") 82 | x_tl, y_tl, x_br, y_br = self.raw_values 83 | w, h = x_br - x_tl, y_br - y_tl 84 | x_c, y_c = x_tl + w / 2, y_tl + h / 2 85 | 86 | # Apply sqrt for both w and h to scale w.r.t area. 87 | w *= sqrt(factor) 88 | h *= sqrt(factor) 89 | new_values = (x_c - w / 2, y_c - h / 2, x_c + w / 2, y_c + h / 2) 90 | self._validate_and_set_values(*new_values) 91 | return self 92 | 93 | def shift(self, amount: Tuple[int, int]) -> "BoundingBox": 94 | x_tl, y_tl, x_br, y_br = self.raw_values 95 | horizontal_shift, vertical_shift = amount 96 | 97 | new_values = (x_tl + horizontal_shift, y_tl + vertical_shift, x_br + horizontal_shift, y_br + vertical_shift) 98 | self._validate_and_set_values(*new_values) 99 | return self 100 | 101 | def _to_bbox_type(self, name: str, return_values: bool) -> BaseBoundingBox: 102 | return load_bbox( 103 | name, 104 | values=self.raw_values, 105 | image_size=self.image_size, 106 | return_values=return_values, 107 | from_voc=True, 108 | strict=self.strict, 109 | ) 110 | 111 | def to_albumentations( 112 | self, return_values: bool = False, **kwargs 113 | ) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 114 | return self._to_bbox_type("albumentations", return_values, **kwargs) 115 | 116 | def to_coco(self, return_values: bool = False, **kwargs) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 117 | return self._to_bbox_type("coco", return_values, **kwargs) 118 | 119 | def to_fiftyone(self, return_values: bool = False, **kwargs) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 120 | return self._to_bbox_type("fiftyone", return_values, **kwargs) 121 | 122 | def to_voc(self, return_values: bool = False, **kwargs) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 123 | return self._to_bbox_type("voc", return_values, **kwargs) 124 | 125 | def to_yolo(self, return_values: bool = False, **kwargs) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 126 | return self._to_bbox_type("yolo", return_values, **kwargs) 127 | 128 | @classmethod 129 | def from_voc( 130 | cls, 131 | x_tl: int, 132 | y_tl: int, 133 | x_br: int, 134 | y_br: int, 135 | image_size: Tuple[int, int] = None, 136 | strict: bool = True, 137 | ) -> "BaseBoundingBox": 138 | return load_bbox("voc", values=(x_tl, y_tl, x_br, y_br), image_size=image_size, strict=strict) 139 | 140 | @classmethod 141 | def from_albumentations( 142 | cls, 143 | x_tl: float, 144 | y_tl: float, 145 | x_br: float, 146 | y_br: float, 147 | image_size: Tuple[int, int] = None, 148 | strict: bool = False, 149 | ): 150 | return load_bbox("albumentations", values=(x_tl, y_tl, x_br, y_br), image_size=image_size, strict=strict) 151 | 152 | @classmethod 153 | def from_coco(cls, x_tl: int, y_tl: int, w: int, h: int, image_size: Tuple[int, int] = None, strict: bool = False): 154 | return load_bbox("coco", values=(x_tl, y_tl, w, h), image_size=image_size, strict=strict) 155 | 156 | @classmethod 157 | def from_fiftyone( 158 | cls, x_tl: float, y_tl: float, w: float, h: float, image_size: Tuple[int, int] = None, strict: bool = False 159 | ): 160 | return load_bbox("fiftyone", values=(x_tl, y_tl, w, h), image_size=image_size, strict=strict) 161 | 162 | @classmethod 163 | def from_yolo( 164 | cls, x_c: float, y_c: float, w: float, h: float, image_size: Tuple[int, int] = None, strict: bool = False 165 | ): 166 | return load_bbox("yolo", values=(x_c, y_c, w, h), image_size=image_size, strict=strict) 167 | -------------------------------------------------------------------------------- /tests/pybboxes/annotations/test_annotations_conversion.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import shutil 4 | from collections import Counter 5 | from concurrent.futures import ThreadPoolExecutor, as_completed 6 | from functools import partial 7 | 8 | import pytest 9 | from huggingface_hub import HfApi, hf_hub_download 10 | from pycocotools.coco import COCO 11 | from tqdm import tqdm 12 | 13 | from pybboxes.annotations import Annotations 14 | 15 | # hugging face repo from where we will be downloading our fixture for unit testing 16 | repo_id = "gauravparajuli/coco_test_set_pybboxes" 17 | 18 | sample_yolo_dataset_path = str(os.path.join("tests", "pybboxes", "annotations", "testing_data_yolo")) 19 | sample_voc_dataset_path = str(os.path.join("tests", "pybboxes", "annotations", "testing_data_voc")) 20 | sample_coco_dataset_path = str( 21 | os.path.join("tests", "pybboxes", "annotations", "testing_data_coco", "annotations_coco.json") 22 | ) # source 23 | persist_coco_test_path = str( 24 | os.path.join("tests", "pybboxes", "annotations", "persist_as_coco_test.json") 25 | ) # file generated during test_persist_as_coco 26 | 27 | sample_images = str(os.path.join("tests", "pybboxes", "annotations", "testing_data_images")) 28 | 29 | 30 | def downloadfile(filename, local_dir): 31 | hf_hub_download( 32 | repo_id=repo_id, 33 | repo_type="dataset", 34 | filename=filename, 35 | local_dir=local_dir, 36 | ) 37 | 38 | 39 | def count_files(directory, extensions): 40 | all_files = [] 41 | for ext in extensions: 42 | all_files.extend(glob.glob(f"{directory}/*{ext}")) 43 | return Counter(file.split(".")[-1] for file in all_files) 44 | 45 | 46 | sample_coco_dataset = Annotations(annotation_type="coco") 47 | 48 | 49 | def test_import_from_fiftyone(): 50 | anns = Annotations(annotation_type="fiftyone") 51 | with pytest.raises(NotImplementedError): 52 | anns.load_from_fiftyone() 53 | 54 | 55 | def test_import_from_albumentations(): 56 | anns = Annotations(annotation_type="albumentations") 57 | with pytest.raises(NotImplementedError): 58 | anns.load_from_albumentations() 59 | 60 | 61 | def test_save_as_fiftyone(): 62 | anns = Annotations(annotation_type="albumentations") 63 | with pytest.raises(NotImplementedError): 64 | anns.save_as_fiftyone() 65 | 66 | 67 | def test_save_as_albumentations(): 68 | anns = Annotations(annotation_type="fiftyone") 69 | with pytest.raises(NotImplementedError): 70 | anns.save_as_albumentations() 71 | 72 | 73 | def test_annotations_initialization(): 74 | # annotation_type should be either: yolo, coco, voc, albumentations or fiftyone 75 | with pytest.raises(ValueError): 76 | anns = Annotations(annotation_type="not_this_type") 77 | 78 | 79 | def test_annotations_only_appropriate_loading_method_allowed(): 80 | # tests if unappropriate method is used to load annotations 81 | anns = Annotations("yolo") 82 | with pytest.raises(TypeError): 83 | anns.load_from_voc(labels_dir="./labels") 84 | with pytest.raises(TypeError): 85 | anns.load_from_coco(json_path="./sample.json") 86 | 87 | anns = Annotations("coco") 88 | with pytest.raises(TypeError): 89 | anns.load_from_yolo(labels_dir="./labels", images_dir="./images", classes_file="./classes.txt") 90 | 91 | 92 | def test_import_from_coco(): 93 | anns = sample_coco_dataset 94 | anns.load_from_coco(sample_coco_dataset_path) 95 | 96 | assert (type(anns.names_mapping)) == dict 97 | assert anns.names_mapping == dict(raccoons=0, raccoon=1) 98 | 99 | # randomly test the accuracy of annotations here 100 | 101 | 102 | @pytest.mark.depends(on=["test_save_as_yolo"]) 103 | def test_import_from_yolo(): 104 | anns = Annotations(annotation_type="yolo") 105 | anns.load_from_yolo( 106 | labels_dir=sample_yolo_dataset_path, 107 | images_dir=sample_images, 108 | classes_file=str(os.path.join(sample_yolo_dataset_path, "classes.txt")), 109 | ) 110 | 111 | assert (type(anns.names_mapping)) == dict 112 | assert anns.names_mapping == dict(raccoons=0, raccoon=1) 113 | 114 | 115 | @pytest.mark.depends(on=["test_save_as_voc"]) 116 | def test_import_from_voc(): 117 | anns = Annotations(annotation_type="voc") 118 | anns.load_from_voc(labels_dir=sample_voc_dataset_path) 119 | 120 | assert (type(anns.names_mapping)) == dict 121 | assert anns.names_mapping == dict(raccoon=0) # as raccoons label was not used in any bounding boxes, 122 | # plus there is not a file that lists all the available class in voc format 123 | # there was a loss of information 124 | # when converting from coco format to voc format 125 | 126 | 127 | @pytest.mark.depends(on=["test_import_from_coco"]) 128 | def test_save_as_coco(): 129 | persist_coco_path = str(os.path.join("tests", "pybboxes", "annotations", "persist_as_coco_test.json")) 130 | sample_coco_dataset.save_as_coco(export_file=persist_coco_path) 131 | 132 | coco = COCO(persist_coco_path) 133 | 134 | assert len(coco.getImgIds()) == 196 135 | assert len(coco.getCatIds()) == 2 136 | 137 | 138 | @pytest.mark.depends(on=["test_import_from_coco"]) 139 | def test_save_as_yolo(): 140 | sample_coco_dataset.save_as_yolo(sample_yolo_dataset_path) 141 | 142 | assert ( 143 | count_files(sample_yolo_dataset_path, extensions=[".txt"])["txt"] == 197 144 | ) # 196 annotation files, 1 classes.txt file 145 | 146 | 147 | @pytest.mark.depends(on=["test_import_from_coco"]) 148 | def test_save_as_voc(): 149 | sample_coco_dataset.save_as_voc(sample_voc_dataset_path) 150 | 151 | assert count_files(sample_voc_dataset_path, extensions=[".xml"])["xml"] == 196 # 196 annotation files 152 | 153 | 154 | @pytest.fixture(scope="session", autouse=True) 155 | def cleanup(): 156 | # setup code here 157 | api = HfApi() 158 | files = api.list_repo_files(repo_id=repo_id, repo_type="dataset") 159 | files = [file for file in files if (".json" in file or ".jpg" in file)] # filter .gitattributes and README.md 160 | 161 | annotationfilename = files.pop(0) # annotations_coco.json 162 | downloadfile( 163 | annotationfilename, local_dir=os.path.dirname(sample_coco_dataset_path) 164 | ) # download annotation file in a separate folder 165 | 166 | # now download test dataset images 167 | with ThreadPoolExecutor() as executor: 168 | partial_downloadfile = partial(downloadfile, local_dir=sample_images) 169 | futures = [executor.submit(partial_downloadfile, filename) for filename in files] 170 | with tqdm(total=len(futures), desc="downloading test set for unit testing", unit="file") as pbar: 171 | for future in as_completed(futures): 172 | pbar.set_description_str = future.result() 173 | pbar.update(1) # update the progress bar for each completed download 174 | 175 | yield 176 | 177 | # clean up the folders that we created after all the tests have ran 178 | shutil.rmtree(sample_voc_dataset_path) 179 | shutil.rmtree(sample_yolo_dataset_path) 180 | os.remove(persist_coco_test_path) # remove the test file 181 | -------------------------------------------------------------------------------- /pybboxes/boxes/base.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from typing import List, Tuple, Union 3 | 4 | import numpy as np 5 | 6 | from pybboxes.types.box_2d import Box 7 | 8 | NORMALIZED_BOXES = ["albumentations", "fiftyone", "yolo"] 9 | 10 | 11 | class BaseBoundingBox(Box, ABC): 12 | def __init__( 13 | self, 14 | v1: Union[int, float], 15 | v2: Union[int, float], 16 | v3: Union[int, float], 17 | v4: Union[int, float], 18 | image_size: Tuple[int, int] = None, 19 | strict: bool = False, 20 | ): 21 | self._image_size = image_size 22 | self.strict = strict 23 | self._is_oob = None 24 | self._validate_and_set_values(v1, v2, v3, v4) 25 | voc_values = self.to_voc(return_values=True) 26 | super(BaseBoundingBox, self).__init__(*voc_values) 27 | 28 | def __repr__(self): 29 | image_width, image_height = self.image_size 30 | str_vals = " ".join([f"{v:.4f}" if isinstance(v, float) else str(v) for v in self.values]) 31 | return f"<[{str_vals}] ({self.width}x{self.height}) | Image: " f"({image_width or '?'}x{image_height or '?'})>" 32 | 33 | @property 34 | def is_oob(self) -> Union[bool, None]: 35 | """ 36 | Whether the box is OOB (Out-of-bounds). 37 | 38 | Returns: 39 | None -> unknown. False -> Not OOB. True -> OOB. 40 | """ 41 | return self._is_oob 42 | 43 | @property 44 | def image_size(self): 45 | if self._image_size is not None: 46 | return self._image_size 47 | else: 48 | return None, None 49 | 50 | @image_size.setter 51 | def image_size(self, image_size: Tuple[int, int]): 52 | self._image_size = image_size 53 | 54 | def is_image_size_null(self): 55 | if self.image_size == (None, None): 56 | return True 57 | return False 58 | 59 | @property 60 | def values(self) -> Tuple: 61 | return self._values 62 | 63 | def _correct_value_types(self, *values) -> Tuple: 64 | return values 65 | 66 | @abstractmethod 67 | def _validate_values(self, *values): 68 | pass 69 | 70 | def _set_values(self, *values): 71 | """ 72 | This method is intended to be "final", and should not be overridden in child classes. 73 | """ 74 | self._values = values 75 | 76 | def _validate_and_set_values(self, *values) -> None: 77 | """ 78 | Validate and sets given values if validation is successful. 79 | """ 80 | self.raw_values = values 81 | values = self._correct_value_types(*values) 82 | self._validate_values(*values) 83 | self._set_values(*values) 84 | 85 | def to_albumentations(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 86 | return self.to_voc().to_albumentations(return_values) 87 | 88 | def to_coco(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 89 | return self.to_voc().to_coco(return_values) 90 | 91 | def to_fiftyone(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 92 | return self.to_voc().to_fiftyone(return_values) 93 | 94 | @abstractmethod 95 | def to_voc(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 96 | pass 97 | 98 | def to_yolo(self, return_values: bool = False) -> Union[Tuple[int, int, int, int], "BaseBoundingBox"]: 99 | return self.to_voc().to_yolo(return_values) 100 | 101 | @property 102 | def name(self): 103 | return self.__class__.__name__.lower().replace("boundingbox", "") 104 | 105 | def _generic_operation(self, op: str, *args, **kwargs) -> None: 106 | refined_box = self.to_voc() 107 | box_op = getattr(refined_box, op) 108 | refined_box = box_op(*args, **kwargs) 109 | box_conversion = getattr(refined_box, f"to_{self.name}") 110 | refined_box = box_conversion() 111 | 112 | self.__init__(*refined_box.values, image_size=self.image_size, strict=self.strict) 113 | 114 | def clamp(self) -> "BaseBoundingBox": 115 | """ 116 | Clamps the box with respect to the image borders. If the box is not OOB, does nothing. 117 | """ 118 | self._generic_operation("clamp") 119 | return self 120 | 121 | def scale(self, factor: float) -> "BaseBoundingBox": 122 | self._generic_operation("scale", factor) 123 | return self 124 | 125 | def shift(self, amount: Tuple) -> "BaseBoundingBox": 126 | """ 127 | Perform a shift operation on the bounding box inplace. 128 | 129 | Args: 130 | amount: The amount to shift the bounding box. The first value is the 131 | amount to shift the x-coordinate, and the second value is the 132 | amount to shift the y-coordinate. 133 | """ 134 | if self.name in NORMALIZED_BOXES: 135 | width, height = self.image_size 136 | amount = (amount[0] * width, amount[1] * height) 137 | self._generic_operation("shift", amount) 138 | return self 139 | 140 | @classmethod 141 | @abstractmethod 142 | def from_voc( 143 | cls, 144 | x_tl: int, 145 | y_tl: int, 146 | x_br: int, 147 | y_br: int, 148 | image_size: Tuple[int, int] = None, 149 | strict: bool = True, 150 | ) -> "BaseBoundingBox": 151 | pass 152 | 153 | @classmethod 154 | def from_array_vectorize(cls, ar: np.ndarray): 155 | constructor = cls.from_array 156 | vconstructor = np.vectorize(constructor) 157 | return vconstructor(ar) 158 | 159 | @classmethod 160 | def from_array(cls, ar: Union[Tuple, List, np.ndarray], **kwargs) -> Union[np.ndarray, "BaseBoundingBox"]: 161 | """ 162 | Takes input values containing at least a single bbox values. Input can be multidimensional 163 | array as long as the last dimension (-1) has length of 4, i.e for any array as input, the shape 164 | should look like (x,y,z,4) and the output is of shape (x,y,z). 165 | 166 | Args: 167 | ar: Input values as a tuple or array. If the input is an array, the dimension is preserved as is 168 | and each bounding box values is converted to the `BoundingBox` object. 169 | **kwargs: Additional keyword arguments for construction, see :py:meth:`BoundingBox.__init__` 170 | 171 | Notes: 172 | This method is intended to be "final", and should not be overridden in child classes. 173 | 174 | Returns: 175 | Either a `BoundingBox` object constructed from input values or list of `BoundingBox` objects 176 | as an array. 177 | """ 178 | if not isinstance(ar, np.ndarray): 179 | ar = np.array(ar) 180 | if ar.shape[-1] != 4: 181 | raise ValueError(f"Given input array must have bounding box values at dim -1 as 4, got shape {ar.shape}.") 182 | if ar.ndim == 1: 183 | return cls(*ar, **kwargs) 184 | vf = np.vectorize(cls.from_array, signature="(n) -> ()", excluded={"image_size", "strict"}) 185 | return vf(ar, **kwargs) 186 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |