├── detr ├── tox.ini ├── util │ ├── __init__.py │ ├── box_ops.py │ └── plot_utils.py ├── d2 │ ├── detr │ │ ├── __init__.py │ │ ├── config.py │ │ ├── dataset_mapper.py │ │ └── detr.py │ ├── configs │ │ ├── detr_256_6_6_torchvision.yaml │ │ └── detr_segm_256_6_6_torchvision.yaml │ ├── README.md │ ├── converter.py │ └── train_net.py ├── requirements.txt ├── README.md ├── models │ ├── __init__.py │ ├── position_encoding.py │ ├── matcher.py │ ├── backbone.py │ ├── transformer.py │ └── segmentation.py ├── Dockerfile ├── .circleci │ └── config.yml ├── datasets │ ├── __init__.py │ ├── panoptic_eval.py │ ├── coco_panoptic.py │ ├── coco.py │ ├── coco_eval.py │ └── transforms.py ├── run_with_submitit.py ├── hubconf.py ├── engine.py ├── test_all.py ├── engine_multi.py ├── LICENSE └── main.py ├── example_table.jpg ├── visualization.jpg ├── resources ├── table1.jpg ├── table2.jpg ├── table3.jpg ├── visualization1.jpg ├── visualization2.jpg └── visualization3.jpg ├── CODE_OF_CONDUCT.md ├── environment.yml ├── src ├── structure_config.json ├── detection_config.json └── transforms.py ├── LICENSE ├── SUPPORT.md ├── .gitignore ├── .github └── workflows │ └── codeql-analysis.yml ├── SECURITY.md ├── README.md └── main.py /detr/tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | ignore = F401,E402,F403,W503,W504 4 | -------------------------------------------------------------------------------- /detr/util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /example_table.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/example_table.jpg -------------------------------------------------------------------------------- /visualization.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/visualization.jpg -------------------------------------------------------------------------------- /resources/table1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/table1.jpg -------------------------------------------------------------------------------- /resources/table2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/table2.jpg -------------------------------------------------------------------------------- /resources/table3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/table3.jpg -------------------------------------------------------------------------------- /resources/visualization1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/visualization1.jpg -------------------------------------------------------------------------------- /resources/visualization2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/visualization2.jpg -------------------------------------------------------------------------------- /resources/visualization3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baulbo/table-transformer-simple-inference/HEAD/resources/visualization3.jpg -------------------------------------------------------------------------------- /detr/d2/detr/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .config import add_detr_config 3 | from .detr import Detr 4 | from .dataset_mapper import DetrDatasetMapper 5 | -------------------------------------------------------------------------------- /detr/requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools 3 | submitit 4 | torch>=1.5.0 5 | torchvision>=0.6.0 6 | git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi 7 | scipy 8 | onnx 9 | onnxruntime 10 | -------------------------------------------------------------------------------- /detr/README.md: -------------------------------------------------------------------------------- 1 | **DE⫶TR**: End-to-End Object Detection with Transformers 2 | ======== 3 | PyTorch training code and pretrained models for **DETR** (**DE**tection **TR**ansformer). 4 | 5 | # License 6 | DETR is released under the Apache 2.0 license. Please see the [LICENSE](LICENSE) file for more information. 7 | -------------------------------------------------------------------------------- /detr/models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .detr import build 3 | from .detr_multi import build as build_multi 4 | 5 | 6 | def build_model(args): 7 | return build(args) 8 | 9 | def build_model_multi(args): 10 | return build_multi(args) -------------------------------------------------------------------------------- /detr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update -qq && \ 6 | apt-get install -y git vim libgtk2.0-dev && \ 7 | rm -rf /var/cache/apk/* 8 | 9 | RUN pip --no-cache-dir install Cython 10 | 11 | COPY requirements.txt /workspace 12 | 13 | RUN pip --no-cache-dir install -r /workspace/requirements.txt 14 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Microsoft Open Source Code of Conduct 2 | 3 | This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). 4 | 5 | Resources: 6 | 7 | - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) 8 | - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) 9 | - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns 10 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: tables-detr 2 | channels: 3 | - conda-forge 4 | - pytorch 5 | - defaults 6 | dependencies: 7 | - python=3.6.10 8 | - pip 9 | - pytorch==1.5.0 10 | - cudatoolkit==10.1.243 11 | - jupyter 12 | - ipykernel 13 | - torchvision==0.6.0 14 | - pandas==0.25.3 15 | - opencv==4.1.2 16 | - scikit-learn==0.21.3 17 | - tqdm==4.39.0 18 | - itkwidgets==0.23.1 19 | - cython 20 | - numpy==1.17 21 | - pip: 22 | - scikit-image 23 | - git+https://github.com/philferriere/cocoapi.git#egg=pycocotools&subdirectory=PythonAPI 24 | - tensorboard==2.0.1 25 | - tensorboardx==1.9 26 | - scipy==1.1.0 27 | - jupytext==1.3.0 28 | - PyMuPDF==1.16.14 29 | -------------------------------------------------------------------------------- /detr/.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | jobs: 4 | python_lint: 5 | docker: 6 | - image: circleci/python:3.7 7 | steps: 8 | - checkout 9 | - run: 10 | command: | 11 | pip install --user --progress-bar off flake8 typing 12 | flake8 . 13 | 14 | test: 15 | docker: 16 | - image: circleci/python:3.7 17 | steps: 18 | - checkout 19 | - run: 20 | command: | 21 | pip install --user --progress-bar off scipy pytest 22 | pip install --user --progress-bar off --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html 23 | pip install --user --progress-bar off onnx onnxruntime 24 | pytest . 25 | 26 | workflows: 27 | build: 28 | jobs: 29 | - python_lint 30 | - test 31 | -------------------------------------------------------------------------------- /detr/datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch.utils.data 3 | import torchvision 4 | 5 | from .coco import build as build_coco 6 | 7 | 8 | def get_coco_api_from_dataset(dataset): 9 | for _ in range(10): 10 | # if isinstance(dataset, torchvision.datasets.CocoDetection): 11 | # break 12 | if isinstance(dataset, torch.utils.data.Subset): 13 | dataset = dataset.dataset 14 | if isinstance(dataset, torchvision.datasets.CocoDetection): 15 | return dataset.coco 16 | 17 | 18 | def build_dataset(image_set, args): 19 | if args.dataset_file == 'coco': 20 | return build_coco(image_set, args) 21 | if args.dataset_file == 'coco_panoptic': 22 | # to avoid making panopticapi required for coco 23 | from .coco_panoptic import build as build_coco_panoptic 24 | return build_coco_panoptic(image_set, args) 25 | raise ValueError(f'dataset {args.dataset_file} not supported') 26 | -------------------------------------------------------------------------------- /src/structure_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lr":5e-5, 3 | "lr_backbone":1e-5, 4 | "batch_size":2, 5 | "weight_decay":1e-4, 6 | "epochs":20, 7 | "lr_drop":1, 8 | "lr_gamma":0.9, 9 | "clip_max_norm":0.1, 10 | 11 | "backbone":"resnet18", 12 | "num_classes":6, 13 | "dilation":false, 14 | "position_embedding":"sine", 15 | "emphasized_weights":{}, 16 | 17 | "enc_layers":6, 18 | "dec_layers":6, 19 | "dim_feedforward":2048, 20 | "hidden_dim":256, 21 | "dropout":0.1, 22 | "nheads":8, 23 | "num_queries":125, 24 | "pre_norm":true, 25 | 26 | "masks":false, 27 | 28 | "aux_loss":false, 29 | 30 | "mask_loss_coef":1, 31 | "dice_loss_coef":1, 32 | "ce_loss_coef":1, 33 | "bbox_loss_coef":5, 34 | "giou_loss_coef":2, 35 | "eos_coef":0.4, 36 | 37 | "set_cost_class":1, 38 | "set_cost_bbox":5, 39 | "set_cost_giou":2, 40 | 41 | "device":"cuda", 42 | "seed":42, 43 | "start_epoch":0, 44 | "num_workers":1 45 | } 46 | -------------------------------------------------------------------------------- /src/detection_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lr":5e-5, 3 | "lr_backbone":1e-5, 4 | "batch_size":2, 5 | "weight_decay":1e-4, 6 | "epochs":20, 7 | "lr_drop":1, 8 | "lr_gamma":0.9, 9 | "clip_max_norm":0.1, 10 | 11 | "backbone":"resnet18", 12 | "num_classes":2, 13 | "dilation":false, 14 | "position_embedding":"sine", 15 | "emphasized_weights":{}, 16 | 17 | "enc_layers":6, 18 | "dec_layers":6, 19 | "dim_feedforward":2048, 20 | "hidden_dim":256, 21 | "dropout":0.1, 22 | "nheads":8, 23 | "num_queries":15, 24 | "pre_norm":true, 25 | 26 | "masks":false, 27 | 28 | "aux_loss":false, 29 | 30 | "mask_loss_coef":1, 31 | "dice_loss_coef":1, 32 | "ce_loss_coef":1, 33 | "bbox_loss_coef":5, 34 | "giou_loss_coef":2, 35 | "eos_coef":0.4, 36 | 37 | "set_cost_class":1, 38 | "set_cost_bbox":5, 39 | "set_cost_giou":2, 40 | 41 | "device":"cuda", 42 | "seed":42, 43 | "start_epoch":0, 44 | "num_workers":1 45 | } 46 | 47 | -------------------------------------------------------------------------------- /detr/d2/detr/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 3 | from detectron2.config import CfgNode as CN 4 | 5 | 6 | def add_detr_config(cfg): 7 | """ 8 | Add config for DETR. 9 | """ 10 | cfg.MODEL.DETR = CN() 11 | cfg.MODEL.DETR.NUM_CLASSES = 80 12 | 13 | # For Segmentation 14 | cfg.MODEL.DETR.FROZEN_WEIGHTS = '' 15 | 16 | # LOSS 17 | cfg.MODEL.DETR.GIOU_WEIGHT = 2.0 18 | cfg.MODEL.DETR.L1_WEIGHT = 5.0 19 | cfg.MODEL.DETR.DEEP_SUPERVISION = True 20 | cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1 21 | 22 | # TRANSFORMER 23 | cfg.MODEL.DETR.NHEADS = 8 24 | cfg.MODEL.DETR.DROPOUT = 0.1 25 | cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048 26 | cfg.MODEL.DETR.ENC_LAYERS = 6 27 | cfg.MODEL.DETR.DEC_LAYERS = 6 28 | cfg.MODEL.DETR.PRE_NORM = False 29 | 30 | cfg.MODEL.DETR.HIDDEN_DIM = 256 31 | cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100 32 | 33 | cfg.SOLVER.OPTIMIZER = "ADAMW" 34 | cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /detr/d2/configs/detr_256_6_6_torchvision.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "Detr" 3 | WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: False 7 | RESNETS: 8 | DEPTH: 50 9 | STRIDE_IN_1X1: False 10 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 11 | DETR: 12 | GIOU_WEIGHT: 2.0 13 | L1_WEIGHT: 5.0 14 | NUM_OBJECT_QUERIES: 100 15 | DATASETS: 16 | TRAIN: ("coco_2017_train",) 17 | TEST: ("coco_2017_val",) 18 | SOLVER: 19 | IMS_PER_BATCH: 64 20 | BASE_LR: 0.0001 21 | STEPS: (369600,) 22 | MAX_ITER: 554400 23 | WARMUP_FACTOR: 1.0 24 | WARMUP_ITERS: 10 25 | WEIGHT_DECAY: 0.0001 26 | OPTIMIZER: "ADAMW" 27 | BACKBONE_MULTIPLIER: 0.1 28 | CLIP_GRADIENTS: 29 | ENABLED: True 30 | CLIP_TYPE: "full_model" 31 | CLIP_VALUE: 0.01 32 | NORM_TYPE: 2.0 33 | INPUT: 34 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 35 | CROP: 36 | ENABLED: True 37 | TYPE: "absolute_range" 38 | SIZE: (384, 600) 39 | FORMAT: "RGB" 40 | TEST: 41 | EVAL_PERIOD: 4000 42 | DATALOADER: 43 | FILTER_EMPTY_ANNOTATIONS: False 44 | NUM_WORKERS: 4 45 | VERSION: 2 46 | -------------------------------------------------------------------------------- /detr/d2/configs/detr_segm_256_6_6_torchvision.yaml: -------------------------------------------------------------------------------- 1 | MODEL: 2 | META_ARCHITECTURE: "Detr" 3 | # WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" 4 | PIXEL_MEAN: [123.675, 116.280, 103.530] 5 | PIXEL_STD: [58.395, 57.120, 57.375] 6 | MASK_ON: True 7 | RESNETS: 8 | DEPTH: 50 9 | STRIDE_IN_1X1: False 10 | OUT_FEATURES: ["res2", "res3", "res4", "res5"] 11 | DETR: 12 | GIOU_WEIGHT: 2.0 13 | L1_WEIGHT: 5.0 14 | NUM_OBJECT_QUERIES: 100 15 | FROZEN_WEIGHTS: '' 16 | DATASETS: 17 | TRAIN: ("coco_2017_train",) 18 | TEST: ("coco_2017_val",) 19 | SOLVER: 20 | IMS_PER_BATCH: 64 21 | BASE_LR: 0.0001 22 | STEPS: (55440,) 23 | MAX_ITER: 92400 24 | WARMUP_FACTOR: 1.0 25 | WARMUP_ITERS: 10 26 | WEIGHT_DECAY: 0.0001 27 | OPTIMIZER: "ADAMW" 28 | BACKBONE_MULTIPLIER: 0.1 29 | CLIP_GRADIENTS: 30 | ENABLED: True 31 | CLIP_TYPE: "full_model" 32 | CLIP_VALUE: 0.01 33 | NORM_TYPE: 2.0 34 | INPUT: 35 | MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) 36 | CROP: 37 | ENABLED: True 38 | TYPE: "absolute_range" 39 | SIZE: (384, 600) 40 | FORMAT: "RGB" 41 | TEST: 42 | EVAL_PERIOD: 4000 43 | DATALOADER: 44 | FILTER_EMPTY_ANNOTATIONS: False 45 | NUM_WORKERS: 4 46 | VERSION: 2 47 | -------------------------------------------------------------------------------- /SUPPORT.md: -------------------------------------------------------------------------------- 1 | # TODO: The maintainer of this repo has not yet edited this file 2 | 3 | **REPO OWNER**: Do you want Customer Service & Support (CSS) support for this product/project? 4 | 5 | - **No CSS support:** Fill out this template with information about how to file issues and get help. 6 | - **Yes CSS support:** Fill out an intake form at [aka.ms/spot](https://aka.ms/spot). CSS will work with/help you to determine next steps. More details also available at [aka.ms/onboardsupport](https://aka.ms/onboardsupport). 7 | - **Not sure?** Fill out a SPOT intake as though the answer were "Yes". CSS will help you decide. 8 | 9 | *Then remove this first heading from this SUPPORT.MD file before publishing your repo.* 10 | 11 | # Support 12 | 13 | ## How to file issues and get help 14 | 15 | This project uses GitHub Issues to track bugs and feature requests. Please search the existing 16 | issues before filing new issues to avoid duplicates. For new issues, file your bug or 17 | feature request as a new Issue. 18 | 19 | For help and questions about using this project, please **REPO MAINTAINER: INSERT INSTRUCTIONS HERE 20 | FOR HOW TO ENGAGE REPO OWNERS OR COMMUNITY FOR HELP. COULD BE A STACK OVERFLOW TAG OR OTHER 21 | CHANNEL. WHERE WILL YOU HELP PEOPLE?**. 22 | 23 | ## Microsoft Support Policy 24 | 25 | Support for this **PROJECT or PRODUCT** is limited to the resources listed above. 26 | -------------------------------------------------------------------------------- /detr/datasets/panoptic_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import json 3 | import os 4 | 5 | import util.misc as utils 6 | 7 | try: 8 | from panopticapi.evaluation import pq_compute 9 | except ImportError: 10 | pass 11 | 12 | 13 | class PanopticEvaluator(object): 14 | def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): 15 | self.gt_json = ann_file 16 | self.gt_folder = ann_folder 17 | if utils.is_main_process(): 18 | if not os.path.exists(output_dir): 19 | os.mkdir(output_dir) 20 | self.output_dir = output_dir 21 | self.predictions = [] 22 | 23 | def update(self, predictions): 24 | for p in predictions: 25 | with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: 26 | f.write(p.pop("png_string")) 27 | 28 | self.predictions += predictions 29 | 30 | def synchronize_between_processes(self): 31 | all_predictions = utils.all_gather(self.predictions) 32 | merged_predictions = [] 33 | for p in all_predictions: 34 | merged_predictions += p 35 | self.predictions = merged_predictions 36 | 37 | def summarize(self): 38 | if utils.is_main_process(): 39 | json_data = {"annotations": self.predictions} 40 | predictions_json = os.path.join(self.output_dir, "predictions.json") 41 | with open(predictions_json, "w") as f: 42 | f.write(json.dumps(json_data)) 43 | return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) 44 | return None 45 | -------------------------------------------------------------------------------- /detr/d2/README.md: -------------------------------------------------------------------------------- 1 | Detectron2 wrapper for DETR 2 | ======= 3 | 4 | We provide a Detectron2 wrapper for DETR, thus providing a way to better integrate it in the existing detection ecosystem. It can be used for example to easily leverage datasets or backbones provided in Detectron2. 5 | 6 | This wrapper currently supports only box detection, and is intended to be as close as possible to the original implementation, and we checked that it indeed match the results. Some notable facts and caveats: 7 | - The data augmentation matches DETR's original data augmentation. This required patching the RandomCrop augmentation from Detectron2, so you'll need a version from the master branch from June 24th 2020 or more recent. 8 | - To match DETR's original backbone initialization, we use the weights of a ResNet50 trained on imagenet using torchvision. This network uses a different pixel mean and std than most of the backbones available in Detectron2 by default, so extra care must be taken when switching to another one. Note that no other torchvision models are available in Detectron2 as of now, though it may change in the future. 9 | - The gradient clipping mode is "full_model", which is not the default in Detectron2. 10 | 11 | # Usage 12 | 13 | To install Detectron2, please follow the [official installation instructions](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). 14 | 15 | ## Evaluating a model 16 | 17 | For convenience, we provide a conversion script to convert models trained by the main DETR training loop into the format of this wrapper. To download and convert the main Resnet50 model, simply do: 18 | 19 | ``` 20 | python converter.py --source_model https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --output_model converted_model.pth 21 | ``` 22 | 23 | You can then evaluate it using: 24 | ``` 25 | python train_net.py --eval-only --config configs/detr_256_6_6_torchvision.yaml MODEL.WEIGHTS "converted_model.pth" 26 | ``` 27 | 28 | 29 | ## Training 30 | 31 | To train DETR on a single node with 8 gpus, simply use: 32 | ``` 33 | python train_net.py --config configs/detr_256_6_6_torchvision.yaml --num-gpus 8 34 | ``` 35 | 36 | To fine-tune DETR for instance segmentation on a single node with 8 gpus, simply use: 37 | ``` 38 | python train_net.py --config configs/detr_segm_256_6_6_torchvision.yaml --num-gpus 8 MODEL.DETR.FROZEN_WEIGHTS 39 | ``` 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # models 132 | *.pth 133 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main, rohithpv ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '42 15 * * 6' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v1 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v1 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v1 72 | -------------------------------------------------------------------------------- /detr/d2/converter.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version. 4 | """ 5 | import json 6 | import argparse 7 | 8 | import numpy as np 9 | import torch 10 | 11 | 12 | def parse_args(): 13 | parser = argparse.ArgumentParser("D2 model converter") 14 | 15 | parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert") 16 | parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model") 17 | return parser.parse_args() 18 | 19 | 20 | def main(): 21 | args = parse_args() 22 | 23 | # D2 expects contiguous classes, so we need to remap the 92 classes from DETR 24 | # fmt: off 25 | coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 | 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 27 | 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 28 | 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91] 29 | # fmt: on 30 | 31 | coco_idx = np.array(coco_idx) 32 | 33 | if args.source_model.startswith("https"): 34 | checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True) 35 | else: 36 | checkpoint = torch.load(args.source_model, map_location="cpu") 37 | model_to_convert = checkpoint["model"] 38 | 39 | model_converted = {} 40 | for k in model_to_convert.keys(): 41 | old_k = k 42 | if "backbone" in k: 43 | k = k.replace("backbone.0.body.", "") 44 | if "layer" not in k: 45 | k = "stem." + k 46 | for t in [1, 2, 3, 4]: 47 | k = k.replace(f"layer{t}", f"res{t + 1}") 48 | for t in [1, 2, 3]: 49 | k = k.replace(f"bn{t}", f"conv{t}.norm") 50 | k = k.replace("downsample.0", "shortcut") 51 | k = k.replace("downsample.1", "shortcut.norm") 52 | k = "backbone.0.backbone." + k 53 | k = "detr." + k 54 | print(old_k, "->", k) 55 | if "class_embed" in old_k: 56 | v = model_to_convert[old_k].detach() 57 | if v.shape[0] == 92: 58 | shape_old = v.shape 59 | model_converted[k] = v[coco_idx] 60 | print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape)) 61 | continue 62 | model_converted[k] = model_to_convert[old_k].detach() 63 | 64 | model_to_save = {"model": model_converted} 65 | torch.save(model_to_save, args.output_model) 66 | 67 | 68 | if __name__ == "__main__": 69 | main() 70 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://docs.microsoft.com/en-us/previous-versions/tn-archive/cc751383(v=technet.10)), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://msrc.microsoft.com/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://www.microsoft.com/en-us/msrc/pgp-key-msrc). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://microsoft.com/msrc/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://www.microsoft.com/en-us/msrc/cvd). 40 | 41 | -------------------------------------------------------------------------------- /detr/util/box_ops.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Utilities for bounding box manipulation and GIoU. 4 | """ 5 | import torch 6 | from torchvision.ops.boxes import box_area 7 | 8 | 9 | def box_cxcywh_to_xyxy(x): 10 | x_c, y_c, w, h = x.unbind(-1) 11 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h), 12 | (x_c + 0.5 * w), (y_c + 0.5 * h)] 13 | return torch.stack(b, dim=-1) 14 | 15 | 16 | def box_xyxy_to_cxcywh(x): 17 | x0, y0, x1, y1 = x.unbind(-1) 18 | b = [(x0 + x1) / 2, (y0 + y1) / 2, 19 | (x1 - x0), (y1 - y0)] 20 | return torch.stack(b, dim=-1) 21 | 22 | 23 | # modified from torchvision to also return the union 24 | def box_iou(boxes1, boxes2): 25 | area1 = box_area(boxes1) 26 | area2 = box_area(boxes2) 27 | 28 | lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] 29 | rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] 30 | 31 | wh = (rb - lt).clamp(min=0) # [N,M,2] 32 | inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] 33 | 34 | union = area1[:, None] + area2 - inter 35 | 36 | iou = inter / union 37 | return iou, union 38 | 39 | 40 | def generalized_box_iou(boxes1, boxes2): 41 | """ 42 | Generalized IoU from https://giou.stanford.edu/ 43 | 44 | The boxes should be in [x0, y0, x1, y1] format 45 | 46 | Returns a [N, M] pairwise matrix, where N = len(boxes1) 47 | and M = len(boxes2) 48 | """ 49 | # degenerate boxes gives inf / nan results 50 | # so do an early check 51 | assert (boxes1[:, 2:] >= boxes1[:, :2]).all() 52 | assert (boxes2[:, 2:] >= boxes2[:, :2]).all() 53 | iou, union = box_iou(boxes1, boxes2) 54 | 55 | lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) 56 | rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) 57 | 58 | wh = (rb - lt).clamp(min=0) # [N,M,2] 59 | area = wh[:, :, 0] * wh[:, :, 1] 60 | 61 | return iou - (area - union) / area 62 | 63 | 64 | def masks_to_boxes(masks): 65 | """Compute the bounding boxes around the provided masks 66 | 67 | The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. 68 | 69 | Returns a [N, 4] tensors, with the boxes in xyxy format 70 | """ 71 | if masks.numel() == 0: 72 | return torch.zeros((0, 4), device=masks.device) 73 | 74 | h, w = masks.shape[-2:] 75 | 76 | y = torch.arange(0, h, dtype=torch.float) 77 | x = torch.arange(0, w, dtype=torch.float) 78 | y, x = torch.meshgrid(y, x) 79 | 80 | x_mask = (masks * x.unsqueeze(0)) 81 | x_max = x_mask.flatten(1).max(-1)[0] 82 | x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] 83 | 84 | y_mask = (masks * y.unsqueeze(0)) 85 | y_max = y_mask.flatten(1).max(-1)[0] 86 | y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] 87 | 88 | return torch.stack([x_min, y_min, x_max, y_max], 1) 89 | -------------------------------------------------------------------------------- /detr/models/position_encoding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Various positional encodings for the transformer. 4 | """ 5 | import math 6 | import torch 7 | from torch import nn 8 | 9 | from util.misc import NestedTensor 10 | 11 | 12 | class PositionEmbeddingSine(nn.Module): 13 | """ 14 | This is a more standard version of the position embedding, very similar to the one 15 | used by the Attention is all you need paper, generalized to work on images. 16 | """ 17 | def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): 18 | super().__init__() 19 | self.num_pos_feats = num_pos_feats 20 | self.temperature = temperature 21 | self.normalize = normalize 22 | if scale is not None and normalize is False: 23 | raise ValueError("normalize should be True if scale is passed") 24 | if scale is None: 25 | scale = 2 * math.pi 26 | self.scale = scale 27 | 28 | def forward(self, tensor_list: NestedTensor): 29 | x = tensor_list.tensors 30 | mask = tensor_list.mask 31 | assert mask is not None 32 | not_mask = ~mask 33 | y_embed = not_mask.cumsum(1, dtype=torch.float32) 34 | x_embed = not_mask.cumsum(2, dtype=torch.float32) 35 | if self.normalize: 36 | eps = 1e-6 37 | y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale 38 | x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale 39 | 40 | dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) 41 | dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) 42 | 43 | pos_x = x_embed[:, :, :, None] / dim_t 44 | pos_y = y_embed[:, :, :, None] / dim_t 45 | pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) 46 | pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) 47 | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) 48 | return pos 49 | 50 | 51 | class PositionEmbeddingLearned(nn.Module): 52 | """ 53 | Absolute pos embedding, learned. 54 | """ 55 | def __init__(self, num_pos_feats=256): 56 | super().__init__() 57 | self.row_embed = nn.Embedding(50, num_pos_feats) 58 | self.col_embed = nn.Embedding(50, num_pos_feats) 59 | self.reset_parameters() 60 | 61 | def reset_parameters(self): 62 | nn.init.uniform_(self.row_embed.weight) 63 | nn.init.uniform_(self.col_embed.weight) 64 | 65 | def forward(self, tensor_list: NestedTensor): 66 | x = tensor_list.tensors 67 | h, w = x.shape[-2:] 68 | i = torch.arange(w, device=x.device) 69 | j = torch.arange(h, device=x.device) 70 | x_emb = self.col_embed(i) 71 | y_emb = self.row_embed(j) 72 | pos = torch.cat([ 73 | x_emb.unsqueeze(0).repeat(h, 1, 1), 74 | y_emb.unsqueeze(1).repeat(1, w, 1), 75 | ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) 76 | return pos 77 | 78 | 79 | def build_position_encoding(args): 80 | N_steps = args.hidden_dim // 2 81 | if args.position_embedding in ('v2', 'sine'): 82 | # TODO find a better way of exposing other arguments 83 | position_embedding = PositionEmbeddingSine(N_steps, normalize=True) 84 | elif args.position_embedding in ('v3', 'learned'): 85 | position_embedding = PositionEmbeddingLearned(N_steps) 86 | else: 87 | raise ValueError(f"not supported {args.position_embedding}") 88 | 89 | return position_embedding 90 | -------------------------------------------------------------------------------- /detr/run_with_submitit.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | A script to run multinode training with submitit. 4 | """ 5 | import argparse 6 | import os 7 | import uuid 8 | from pathlib import Path 9 | 10 | import main as detection 11 | import submitit 12 | 13 | 14 | def parse_args(): 15 | detection_parser = detection.get_args_parser() 16 | parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser]) 17 | parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") 18 | parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request") 19 | parser.add_argument("--timeout", default=60, type=int, help="Duration of the job") 20 | parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") 21 | return parser.parse_args() 22 | 23 | 24 | def get_shared_folder() -> Path: 25 | user = os.getenv("USER") 26 | if Path("/checkpoint/").is_dir(): 27 | p = Path(f"/checkpoint/{user}/experiments") 28 | p.mkdir(exist_ok=True) 29 | return p 30 | raise RuntimeError("No shared folder available") 31 | 32 | 33 | def get_init_file(): 34 | # Init file must not exist, but it's parent dir must exist. 35 | os.makedirs(str(get_shared_folder()), exist_ok=True) 36 | init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" 37 | if init_file.exists(): 38 | os.remove(str(init_file)) 39 | return init_file 40 | 41 | 42 | class Trainer(object): 43 | def __init__(self, args): 44 | self.args = args 45 | 46 | def __call__(self): 47 | import main as detection 48 | 49 | self._setup_gpu_args() 50 | detection.main(self.args) 51 | 52 | def checkpoint(self): 53 | import os 54 | import submitit 55 | from pathlib import Path 56 | 57 | self.args.dist_url = get_init_file().as_uri() 58 | checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") 59 | if os.path.exists(checkpoint_file): 60 | self.args.resume = checkpoint_file 61 | print("Requeuing ", self.args) 62 | empty_trainer = type(self)(self.args) 63 | return submitit.helpers.DelayedSubmission(empty_trainer) 64 | 65 | def _setup_gpu_args(self): 66 | import submitit 67 | from pathlib import Path 68 | 69 | job_env = submitit.JobEnvironment() 70 | self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) 71 | self.args.gpu = job_env.local_rank 72 | self.args.rank = job_env.global_rank 73 | self.args.world_size = job_env.num_tasks 74 | print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") 75 | 76 | 77 | def main(): 78 | args = parse_args() 79 | if args.job_dir == "": 80 | args.job_dir = get_shared_folder() / "%j" 81 | 82 | # Note that the folder will depend on the job_id, to easily track experiments 83 | executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) 84 | 85 | # cluster setup is defined by environment variables 86 | num_gpus_per_node = args.ngpus 87 | nodes = args.nodes 88 | timeout_min = args.timeout 89 | 90 | executor.update_parameters( 91 | mem_gb=40 * num_gpus_per_node, 92 | gpus_per_node=num_gpus_per_node, 93 | tasks_per_node=num_gpus_per_node, # one task per GPU 94 | cpus_per_task=10, 95 | nodes=nodes, 96 | timeout_min=timeout_min, # max is 60 * 72 97 | ) 98 | 99 | executor.update_parameters(name="detr") 100 | 101 | args.dist_url = get_init_file().as_uri() 102 | args.output_dir = args.job_dir 103 | 104 | trainer = Trainer(args) 105 | job = executor.submit(trainer) 106 | 107 | print("Submitted job_id:", job.job_id) 108 | 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /detr/datasets/coco_panoptic.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import json 3 | from pathlib import Path 4 | 5 | import numpy as np 6 | import torch 7 | from PIL import Image 8 | 9 | from panopticapi.utils import rgb2id 10 | from util.box_ops import masks_to_boxes 11 | 12 | from .coco import make_coco_transforms 13 | 14 | 15 | class CocoPanoptic: 16 | def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): 17 | with open(ann_file, 'r') as f: 18 | self.coco = json.load(f) 19 | 20 | # sort 'images' field so that they are aligned with 'annotations' 21 | # i.e., in alphabetical order 22 | self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) 23 | # sanity check 24 | if "annotations" in self.coco: 25 | for img, ann in zip(self.coco['images'], self.coco['annotations']): 26 | assert img['file_name'][:-4] == ann['file_name'][:-4] 27 | 28 | self.img_folder = img_folder 29 | self.ann_folder = ann_folder 30 | self.ann_file = ann_file 31 | self.transforms = transforms 32 | self.return_masks = return_masks 33 | 34 | def __getitem__(self, idx): 35 | ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] 36 | img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') 37 | ann_path = Path(self.ann_folder) / ann_info['file_name'] 38 | 39 | img = Image.open(img_path).convert('RGB') 40 | w, h = img.size 41 | if "segments_info" in ann_info: 42 | masks = np.asarray(Image.open(ann_path), dtype=np.uint32) 43 | masks = rgb2id(masks) 44 | 45 | ids = np.array([ann['id'] for ann in ann_info['segments_info']]) 46 | masks = masks == ids[:, None, None] 47 | 48 | masks = torch.as_tensor(masks, dtype=torch.uint8) 49 | labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) 50 | 51 | target = {} 52 | target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) 53 | if self.return_masks: 54 | target['masks'] = masks 55 | target['labels'] = labels 56 | 57 | target["boxes"] = masks_to_boxes(masks) 58 | 59 | target['size'] = torch.as_tensor([int(h), int(w)]) 60 | target['orig_size'] = torch.as_tensor([int(h), int(w)]) 61 | if "segments_info" in ann_info: 62 | for name in ['iscrowd', 'area']: 63 | target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) 64 | 65 | if self.transforms is not None: 66 | img, target = self.transforms(img, target) 67 | 68 | return img, target 69 | 70 | def __len__(self): 71 | return len(self.coco['images']) 72 | 73 | def get_height_and_width(self, idx): 74 | img_info = self.coco['images'][idx] 75 | height = img_info['height'] 76 | width = img_info['width'] 77 | return height, width 78 | 79 | 80 | def build(image_set, args): 81 | img_folder_root = Path(args.coco_path) 82 | ann_folder_root = Path(args.coco_panoptic_path) 83 | assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' 84 | assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' 85 | mode = 'panoptic' 86 | PATHS = { 87 | "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), 88 | "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), 89 | } 90 | 91 | img_folder, ann_file = PATHS[image_set] 92 | img_folder_path = img_folder_root / img_folder 93 | ann_folder = ann_folder_root / f'{mode}_{img_folder}' 94 | ann_file = ann_folder_root / ann_file 95 | 96 | dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, 97 | transforms=make_coco_transforms(image_set), return_masks=args.masks) 98 | 99 | return dataset 100 | -------------------------------------------------------------------------------- /detr/models/matcher.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Modules to compute the matching cost and solve the corresponding LSAP. 4 | """ 5 | import torch 6 | from scipy.optimize import linear_sum_assignment 7 | from torch import nn 8 | 9 | from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou 10 | 11 | 12 | class HungarianMatcher(nn.Module): 13 | """This class computes an assignment between the targets and the predictions of the network 14 | 15 | For efficiency reasons, the targets don't include the no_object. Because of this, in general, 16 | there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, 17 | while the others are un-matched (and thus treated as non-objects). 18 | """ 19 | 20 | def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): 21 | """Creates the matcher 22 | 23 | Params: 24 | cost_class: This is the relative weight of the classification error in the matching cost 25 | cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost 26 | cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost 27 | """ 28 | super().__init__() 29 | self.cost_class = cost_class 30 | self.cost_bbox = cost_bbox 31 | self.cost_giou = cost_giou 32 | assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" 33 | 34 | @torch.no_grad() 35 | def forward(self, outputs, targets): 36 | """ Performs the matching 37 | 38 | Params: 39 | outputs: This is a dict that contains at least these entries: 40 | "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits 41 | "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates 42 | 43 | targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: 44 | "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth 45 | objects in the target) containing the class labels 46 | "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates 47 | 48 | Returns: 49 | A list of size batch_size, containing tuples of (index_i, index_j) where: 50 | - index_i is the indices of the selected predictions (in order) 51 | - index_j is the indices of the corresponding selected targets (in order) 52 | For each batch element, it holds: 53 | len(index_i) = len(index_j) = min(num_queries, num_target_boxes) 54 | """ 55 | bs, num_queries = outputs["pred_logits"].shape[:2] 56 | 57 | # We flatten to compute the cost matrices in a batch 58 | out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] 59 | out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] 60 | 61 | # Also concat the target labels and boxes 62 | tgt_ids = torch.cat([v["labels"] for v in targets]) 63 | tgt_bbox = torch.cat([v["boxes"] for v in targets]) 64 | 65 | # Compute the classification cost. Contrary to the loss, we don't use the Negative Log Likelihood, 66 | # but approximate it in 1 - proba[target class]. 67 | # The 1 is a constant that doesn't change the matching, it can be ommitted. 68 | cost_class = -out_prob[:, tgt_ids] 69 | 70 | # Compute the L1 cost between boxes 71 | cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) 72 | 73 | # Compute the giou cost betwen boxes 74 | cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) 75 | 76 | # Final cost matrix 77 | C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou 78 | C = C.view(bs, num_queries, -1).cpu() 79 | 80 | sizes = [len(v["boxes"]) for v in targets] 81 | indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] 82 | return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] 83 | 84 | 85 | def build_matcher(args): 86 | return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) 87 | -------------------------------------------------------------------------------- /detr/models/backbone.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Backbone modules. 4 | """ 5 | from collections import OrderedDict 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | import torchvision 10 | from torch import nn 11 | from torchvision.models._utils import IntermediateLayerGetter 12 | from typing import Dict, List 13 | 14 | from util.misc import NestedTensor, is_main_process 15 | 16 | from .position_encoding import build_position_encoding 17 | 18 | 19 | class FrozenBatchNorm2d(torch.nn.Module): 20 | """ 21 | BatchNorm2d where the batch statistics and the affine parameters are fixed. 22 | 23 | Copy-paste from torchvision.misc.ops with added eps before rqsrt, 24 | without which any other models than torchvision.models.resnet[18,34,50,101] 25 | produce nans. 26 | """ 27 | 28 | def __init__(self, n): 29 | super(FrozenBatchNorm2d, self).__init__() 30 | self.register_buffer("weight", torch.ones(n)) 31 | self.register_buffer("bias", torch.zeros(n)) 32 | self.register_buffer("running_mean", torch.zeros(n)) 33 | self.register_buffer("running_var", torch.ones(n)) 34 | 35 | def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, 36 | missing_keys, unexpected_keys, error_msgs): 37 | num_batches_tracked_key = prefix + 'num_batches_tracked' 38 | if num_batches_tracked_key in state_dict: 39 | del state_dict[num_batches_tracked_key] 40 | 41 | super(FrozenBatchNorm2d, self)._load_from_state_dict( 42 | state_dict, prefix, local_metadata, strict, 43 | missing_keys, unexpected_keys, error_msgs) 44 | 45 | def forward(self, x): 46 | # move reshapes to the beginning 47 | # to make it fuser-friendly 48 | w = self.weight.reshape(1, -1, 1, 1) 49 | b = self.bias.reshape(1, -1, 1, 1) 50 | rv = self.running_var.reshape(1, -1, 1, 1) 51 | rm = self.running_mean.reshape(1, -1, 1, 1) 52 | eps = 1e-5 53 | scale = w * (rv + eps).rsqrt() 54 | bias = b - rm * scale 55 | return x * scale + bias 56 | 57 | 58 | class BackboneBase(nn.Module): 59 | 60 | def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): 61 | super().__init__() 62 | for name, parameter in backbone.named_parameters(): 63 | if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: 64 | parameter.requires_grad_(False) 65 | if return_interm_layers: 66 | return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} 67 | else: 68 | return_layers = {'layer4': "0"} 69 | self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) 70 | self.num_channels = num_channels 71 | 72 | def forward(self, tensor_list: NestedTensor): 73 | xs = self.body(tensor_list.tensors) 74 | out: Dict[str, NestedTensor] = {} 75 | for name, x in xs.items(): 76 | m = tensor_list.mask 77 | assert m is not None 78 | mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] 79 | out[name] = NestedTensor(x, mask) 80 | return out 81 | 82 | 83 | class Backbone(BackboneBase): 84 | """ResNet backbone with frozen BatchNorm.""" 85 | def __init__(self, name: str, 86 | train_backbone: bool, 87 | return_interm_layers: bool, 88 | dilation: bool): 89 | backbone = getattr(torchvision.models, name)( 90 | replace_stride_with_dilation=[False, False, dilation], 91 | pretrained=True, norm_layer=FrozenBatchNorm2d) 92 | num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 93 | super().__init__(backbone, train_backbone, num_channels, return_interm_layers) 94 | 95 | 96 | class Joiner(nn.Sequential): 97 | def __init__(self, backbone, position_embedding): 98 | super().__init__(backbone, position_embedding) 99 | 100 | def forward(self, tensor_list: NestedTensor): 101 | xs = self[0](tensor_list) 102 | out: List[NestedTensor] = [] 103 | pos = [] 104 | for name, x in xs.items(): 105 | out.append(x) 106 | # position encoding 107 | pos.append(self[1](x).to(x.tensors.dtype)) 108 | 109 | return out, pos 110 | 111 | 112 | def build_backbone(args): 113 | position_embedding = build_position_encoding(args) 114 | train_backbone = args.lr_backbone > 0 115 | return_interm_layers = args.masks 116 | backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) 117 | model = Joiner(backbone, position_embedding) 118 | model.num_channels = backbone.num_channels 119 | return model 120 | -------------------------------------------------------------------------------- /detr/util/plot_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plotting utilities to visualize training logs. 3 | """ 4 | import torch 5 | import pandas as pd 6 | import numpy as np 7 | import seaborn as sns 8 | import matplotlib.pyplot as plt 9 | 10 | from pathlib import Path, PurePath 11 | 12 | 13 | def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): 14 | ''' 15 | Function to plot specific fields from training log(s). Plots both training and test results. 16 | 17 | :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file 18 | - fields = which results to plot from each log file - plots both training and test for each field. 19 | - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots 20 | - log_name = optional, name of log file if different than default 'log.txt'. 21 | 22 | :: Outputs - matplotlib plots of results in fields, color coded for each log file. 23 | - solid lines are training results, dashed lines are test results. 24 | 25 | ''' 26 | func_name = "plot_utils.py::plot_logs" 27 | 28 | # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, 29 | # convert single Path to list to avoid 'not iterable' error 30 | 31 | if not isinstance(logs, list): 32 | if isinstance(logs, PurePath): 33 | logs = [logs] 34 | print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") 35 | else: 36 | raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ 37 | Expect list[Path] or single Path obj, received {type(logs)}") 38 | 39 | # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir 40 | for i, dir in enumerate(logs): 41 | if not isinstance(dir, PurePath): 42 | raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") 43 | if not dir.exists(): 44 | raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") 45 | # verify log_name exists 46 | fn = Path(dir / log_name) 47 | if not fn.exists(): 48 | print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") 49 | print(f"--> full path of missing log file: {fn}") 50 | return 51 | 52 | # load log file(s) and plot 53 | dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] 54 | 55 | fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) 56 | 57 | for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): 58 | for j, field in enumerate(fields): 59 | if field == 'mAP': 60 | coco_eval = pd.DataFrame( 61 | np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] 62 | ).ewm(com=ewm_col).mean() 63 | axs[j].plot(coco_eval, c=color) 64 | else: 65 | df.interpolate().ewm(com=ewm_col).mean().plot( 66 | y=[f'train_{field}', f'test_{field}'], 67 | ax=axs[j], 68 | color=[color] * 2, 69 | style=['-', '--'] 70 | ) 71 | for ax, field in zip(axs, fields): 72 | ax.legend([Path(p).name for p in logs]) 73 | ax.set_title(field) 74 | 75 | 76 | def plot_precision_recall(files, naming_scheme='iter'): 77 | if naming_scheme == 'exp_id': 78 | # name becomes exp_id 79 | names = [f.parts[-3] for f in files] 80 | elif naming_scheme == 'iter': 81 | names = [f.stem for f in files] 82 | else: 83 | raise ValueError(f'not supported {naming_scheme}') 84 | fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) 85 | for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): 86 | data = torch.load(f) 87 | # precision is n_iou, n_points, n_cat, n_area, max_det 88 | precision = data['precision'] 89 | recall = data['params'].recThrs 90 | scores = data['scores'] 91 | # take precision for all classes, all areas and 100 detections 92 | precision = precision[0, :, :, 0, -1].mean(1) 93 | scores = scores[0, :, :, 0, -1].mean(1) 94 | prec = precision.mean() 95 | rec = data['recall'][0, :, 0, -1].mean() 96 | print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + 97 | f'score={scores.mean():0.3f}, ' + 98 | f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' 99 | ) 100 | axs[0].plot(recall, precision, c=color) 101 | axs[1].plot(recall, scores, c=color) 102 | 103 | axs[0].set_title('Precision / Recall') 104 | axs[0].legend(names) 105 | axs[1].set_title('Scores / Recall') 106 | axs[1].legend(names) 107 | return fig, axs 108 | -------------------------------------------------------------------------------- /detr/d2/detr/dataset_mapper.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import copy 3 | import logging 4 | 5 | import numpy as np 6 | import torch 7 | 8 | from detectron2.data import detection_utils as utils 9 | from detectron2.data import transforms as T 10 | from detectron2.data.transforms import TransformGen 11 | 12 | __all__ = ["DetrDatasetMapper"] 13 | 14 | 15 | def build_transform_gen(cfg, is_train): 16 | """ 17 | Create a list of :class:`TransformGen` from config. 18 | Returns: 19 | list[TransformGen] 20 | """ 21 | if is_train: 22 | min_size = cfg.INPUT.MIN_SIZE_TRAIN 23 | max_size = cfg.INPUT.MAX_SIZE_TRAIN 24 | sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING 25 | else: 26 | min_size = cfg.INPUT.MIN_SIZE_TEST 27 | max_size = cfg.INPUT.MAX_SIZE_TEST 28 | sample_style = "choice" 29 | if sample_style == "range": 30 | assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) 31 | 32 | logger = logging.getLogger(__name__) 33 | tfm_gens = [] 34 | if is_train: 35 | tfm_gens.append(T.RandomFlip()) 36 | tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) 37 | if is_train: 38 | logger.info("TransformGens used in training: " + str(tfm_gens)) 39 | return tfm_gens 40 | 41 | 42 | class DetrDatasetMapper: 43 | """ 44 | A callable which takes a dataset dict in Detectron2 Dataset format, 45 | and map it into a format used by DETR. 46 | 47 | The callable currently does the following: 48 | 49 | 1. Read the image from "file_name" 50 | 2. Applies geometric transforms to the image and annotation 51 | 3. Find and applies suitable cropping to the image and annotation 52 | 4. Prepare image and annotation to Tensors 53 | """ 54 | 55 | def __init__(self, cfg, is_train=True): 56 | if cfg.INPUT.CROP.ENABLED and is_train: 57 | self.crop_gen = [ 58 | T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), 59 | T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), 60 | ] 61 | else: 62 | self.crop_gen = None 63 | 64 | self.mask_on = cfg.MODEL.MASK_ON 65 | self.tfm_gens = build_transform_gen(cfg, is_train) 66 | logging.getLogger(__name__).info( 67 | "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) 68 | ) 69 | 70 | self.img_format = cfg.INPUT.FORMAT 71 | self.is_train = is_train 72 | 73 | def __call__(self, dataset_dict): 74 | """ 75 | Args: 76 | dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. 77 | 78 | Returns: 79 | dict: a format that builtin models in detectron2 accept 80 | """ 81 | dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below 82 | image = utils.read_image(dataset_dict["file_name"], format=self.img_format) 83 | utils.check_image_size(dataset_dict, image) 84 | 85 | if self.crop_gen is None: 86 | image, transforms = T.apply_transform_gens(self.tfm_gens, image) 87 | else: 88 | if np.random.rand() > 0.5: 89 | image, transforms = T.apply_transform_gens(self.tfm_gens, image) 90 | else: 91 | image, transforms = T.apply_transform_gens( 92 | self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image 93 | ) 94 | 95 | image_shape = image.shape[:2] # h, w 96 | 97 | # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, 98 | # but not efficient on large generic data structures due to the use of pickle & mp.Queue. 99 | # Therefore it's important to use torch.Tensor. 100 | dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) 101 | 102 | if not self.is_train: 103 | # USER: Modify this if you want to keep them for some reason. 104 | dataset_dict.pop("annotations", None) 105 | return dataset_dict 106 | 107 | if "annotations" in dataset_dict: 108 | # USER: Modify this if you want to keep them for some reason. 109 | for anno in dataset_dict["annotations"]: 110 | if not self.mask_on: 111 | anno.pop("segmentation", None) 112 | anno.pop("keypoints", None) 113 | 114 | # USER: Implement additional transformations if you have other types of data 115 | annos = [ 116 | utils.transform_instance_annotations(obj, transforms, image_shape) 117 | for obj in dataset_dict.pop("annotations") 118 | if obj.get("iscrowd", 0) == 0 119 | ] 120 | instances = utils.annotations_to_instances(annos, image_shape) 121 | dataset_dict["instances"] = utils.filter_empty_instances(instances) 122 | return dataset_dict 123 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > :warning: **25/05/2022 Read before using**: this repo will not be updated in the future. Note that you should not use this code if you want to use all the features that the official repo has to offer. You could use it as a good example for inference, but you really shouldn't use our post-processing since the official repo does a much better job at this. 2 | 3 | # Table Transformer Simple Inference 4 | This repository contains code to run simple inference and export the cells of a table (incl. text in cell), as a pandas DataFrame. Note that not all the features that the official repository offers are included. The resulting DataFrame is constructed based on the column and row predictions, and will probably not work on complex tables. The repo is built on top of [this](https://github.com/phamquiluan/table-transformer) fork of the official repo. 5 | 6 | We worked with tables that were already cropped out, but you could also first apply table detection using the [pre-trained weights](https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth) from the official repo. 7 | 8 |

9 | 10 | 11 |

12 | 13 |

14 | 15 | 16 |

17 | 18 |

19 | 20 | 21 |

22 | 23 | ## Setup Guide 24 | You can refer to the environment.yml file to set up your environement with conda. We used a virtual environment, which means that you can either put all the requirements in the environment.yml file into a requirements.txt file or just install them manually. 25 | 26 | ### Create Virtual Environment 27 | 28 | ```bash 29 | python -m virtualenv venv 30 | source venv/bin/activate 31 | ``` 32 | 33 | ### PyTorch 34 | To install PyTorch you might be able to use the command below if your CUDA and Python version (3.8.10) overlap. If not, you can use the get [started guide](https://pytorch.org/get-started/locally/) to compose your install command. 35 | 36 | ```bash 37 | pip install torch==1.10.2+cu113 torchvision==0.11.3+cu113 torchaudio==0.10.2+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html 38 | ``` 39 | 40 | ### Detectron2 41 | Use the command below and if it doesn't work you can use the official [installation guide](https://detectron2.readthedocs.io/en/latest/tutorials/install.html) 42 | ```bash 43 | pip install "detectron2@git+https://github.com/facebookresearch/detectron2.git@v0.5#egg=detectron2" 44 | ``` 45 | 46 | ### PyTesseract 47 | The Tesseract OCR Python wrapper is used for text recognition on each cell in the tables. For more information about the installation you can refer to the [pytesseract](https://github.com/madmaze/pytesseract) GitHub repo. 48 | 49 | ```bash 50 | pip install pip install pytesseract 51 | ``` 52 | 53 | Next, you need to download additional language packs, although it might be that English is supported out of the box. You can download the language packs from either the [tessdata](https://github.com/tesseract-ocr/tessdata) or [tessdata_fast](https://github.com/tesseract-ocr/tessdata_fast) repository. Keep in mind that you have to make a speed/accuracy compromise when using the fast packs. 54 | 55 | You can either clone the whole repository or download a single pack. During development the (format=language:abbreviation:packname) English='eng'=eng.traineddata, French='fra'=fra.traineddata, and German='deu'=deu.traineddata lanaguage packs were used. 56 | 57 | Put the language packs in a directory called tessdata and set the TESSDATA_PREFIX environment variable like we do below. 58 | 59 | ```bash 60 | export TESSDATA_PREFIX=/home/user/tessdata 61 | ``` 62 | 63 | ### Pre-trained Model Weights 64 | Assuming that you have already cropped out the table after table detection, you can use the pre-trained model below for table structure reocgnition. Put the model in the root of this repository after the download has finished or change the path to the model in the code. 65 | 66 | Table Structure Recognition: 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 |
ModelScheduleAP50AP75APARGriTSTopGriTSConGriTSLocAccConFileSize
DETR R1820 Epochs0.9700.9410.9020.9350.98490.98500.97860.8243Weights110 MB
101 | 102 | 103 | ## Running Things Locally 104 | You can run the main.py script which will use the 'example_table.jpg' as input to the model and output 'visualization.jpg' containing the visualization of the predictions on the original image. 105 | 106 | ```bash 107 | python main.py 108 | ``` 109 | 110 | ## Official Repository 111 | The official repository can be found [here](https://github.com/microsoft/table-transformer). If you want to extract more complex tables you will have to add the appropriate post-processing yourself, but you can use the official source code as a reference. :) 112 | -------------------------------------------------------------------------------- /detr/d2/train_net.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | DETR Training Script. 4 | 5 | This script is a simplified version of the training script in detectron2/tools. 6 | """ 7 | import os 8 | import sys 9 | 10 | # fmt: off 11 | sys.path.insert(1, os.path.join(sys.path[0], '..')) 12 | # fmt: on 13 | 14 | import time 15 | from typing import Any, Dict, List, Set 16 | 17 | import torch 18 | 19 | import detectron2.utils.comm as comm 20 | from d2.detr import DetrDatasetMapper, add_detr_config 21 | from detectron2.checkpoint import DetectionCheckpointer 22 | from detectron2.config import get_cfg 23 | from detectron2.data import MetadataCatalog, build_detection_train_loader 24 | from detectron2.engine import AutogradProfiler, DefaultTrainer, default_argument_parser, default_setup, launch 25 | from detectron2.evaluation import COCOEvaluator, verify_results 26 | 27 | from detectron2.solver.build import maybe_add_gradient_clipping 28 | 29 | 30 | class Trainer(DefaultTrainer): 31 | """ 32 | Extension of the Trainer class adapted to DETR. 33 | """ 34 | 35 | def __init__(self, cfg): 36 | """ 37 | Args: 38 | cfg (CfgNode): 39 | """ 40 | self.clip_norm_val = 0.0 41 | if cfg.SOLVER.CLIP_GRADIENTS.ENABLED: 42 | if cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": 43 | self.clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE 44 | super().__init__(cfg) 45 | 46 | def run_step(self): 47 | assert self.model.training, "[Trainer] model was changed to eval mode!" 48 | start = time.perf_counter() 49 | data = next(self._data_loader_iter) 50 | data_time = time.perf_counter() - start 51 | 52 | loss_dict = self.model(data) 53 | losses = sum(loss_dict.values()) 54 | self._detect_anomaly(losses, loss_dict) 55 | 56 | metrics_dict = loss_dict 57 | metrics_dict["data_time"] = data_time 58 | self._write_metrics(metrics_dict) 59 | 60 | self.optimizer.zero_grad() 61 | losses.backward() 62 | if self.clip_norm_val > 0.0: 63 | torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip_norm_val) 64 | self.optimizer.step() 65 | 66 | @classmethod 67 | def build_evaluator(cls, cfg, dataset_name, output_folder=None): 68 | """ 69 | Create evaluator(s) for a given dataset. 70 | This uses the special metadata "evaluator_type" associated with each builtin dataset. 71 | For your own dataset, you can simply create an evaluator manually in your 72 | script and do not have to worry about the hacky if-else logic here. 73 | """ 74 | if output_folder is None: 75 | output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") 76 | return COCOEvaluator(dataset_name, cfg, True, output_folder) 77 | 78 | @classmethod 79 | def build_train_loader(cls, cfg): 80 | if "Detr" == cfg.MODEL.META_ARCHITECTURE: 81 | mapper = DetrDatasetMapper(cfg, True) 82 | else: 83 | mapper = None 84 | return build_detection_train_loader(cfg, mapper=mapper) 85 | 86 | @classmethod 87 | def build_optimizer(cls, cfg, model): 88 | params: List[Dict[str, Any]] = [] 89 | memo: Set[torch.nn.parameter.Parameter] = set() 90 | for key, value in model.named_parameters(recurse=True): 91 | if not value.requires_grad: 92 | continue 93 | # Avoid duplicating parameters 94 | if value in memo: 95 | continue 96 | memo.add(value) 97 | lr = cfg.SOLVER.BASE_LR 98 | weight_decay = cfg.SOLVER.WEIGHT_DECAY 99 | if "backbone" in key: 100 | lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER 101 | params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] 102 | 103 | optimizer_type = cfg.SOLVER.OPTIMIZER 104 | if optimizer_type == "SGD": 105 | optimizer = torch.optim.SGD(params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM) 106 | elif optimizer_type == "ADAMW": 107 | optimizer = torch.optim.AdamW(params, cfg.SOLVER.BASE_LR) 108 | else: 109 | raise NotImplementedError(f"no optimizer type {optimizer_type}") 110 | if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": 111 | optimizer = maybe_add_gradient_clipping(cfg, optimizer) 112 | return optimizer 113 | 114 | 115 | def setup(args): 116 | """ 117 | Create configs and perform basic setups. 118 | """ 119 | cfg = get_cfg() 120 | add_detr_config(cfg) 121 | cfg.merge_from_file(args.config_file) 122 | cfg.merge_from_list(args.opts) 123 | cfg.freeze() 124 | default_setup(cfg, args) 125 | return cfg 126 | 127 | 128 | def main(args): 129 | cfg = setup(args) 130 | 131 | if args.eval_only: 132 | model = Trainer.build_model(cfg) 133 | DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume) 134 | res = Trainer.test(cfg, model) 135 | if comm.is_main_process(): 136 | verify_results(cfg, res) 137 | return res 138 | 139 | trainer = Trainer(cfg) 140 | trainer.resume_or_load(resume=args.resume) 141 | return trainer.train() 142 | 143 | 144 | if __name__ == "__main__": 145 | args = default_argument_parser().parse_args() 146 | print("Command Line Args:", args) 147 | launch( 148 | main, 149 | args.num_gpus, 150 | num_machines=args.num_machines, 151 | machine_rank=args.machine_rank, 152 | dist_url=args.dist_url, 153 | args=(args,), 154 | ) 155 | -------------------------------------------------------------------------------- /detr/datasets/coco.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | COCO dataset which returns image_id for evaluation. 4 | 5 | Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py 6 | """ 7 | from pathlib import Path 8 | 9 | import torch 10 | import torch.utils.data 11 | import torchvision 12 | from pycocotools import mask as coco_mask 13 | 14 | import datasets.transforms as T 15 | 16 | 17 | class CocoDetection(torchvision.datasets.CocoDetection): 18 | def __init__(self, img_folder, ann_file, transforms, return_masks): 19 | super(CocoDetection, self).__init__(img_folder, ann_file) 20 | self._transforms = transforms 21 | self.prepare = ConvertCocoPolysToMask(return_masks) 22 | 23 | def __getitem__(self, idx): 24 | img, target = super(CocoDetection, self).__getitem__(idx) 25 | image_id = self.ids[idx] 26 | target = {'image_id': image_id, 'annotations': target} 27 | img, target = self.prepare(img, target) 28 | if self._transforms is not None: 29 | img, target = self._transforms(img, target) 30 | return img, target 31 | 32 | 33 | def convert_coco_poly_to_mask(segmentations, height, width): 34 | masks = [] 35 | for polygons in segmentations: 36 | rles = coco_mask.frPyObjects(polygons, height, width) 37 | mask = coco_mask.decode(rles) 38 | if len(mask.shape) < 3: 39 | mask = mask[..., None] 40 | mask = torch.as_tensor(mask, dtype=torch.uint8) 41 | mask = mask.any(dim=2) 42 | masks.append(mask) 43 | if masks: 44 | masks = torch.stack(masks, dim=0) 45 | else: 46 | masks = torch.zeros((0, height, width), dtype=torch.uint8) 47 | return masks 48 | 49 | 50 | class ConvertCocoPolysToMask(object): 51 | def __init__(self, return_masks=False): 52 | self.return_masks = return_masks 53 | 54 | def __call__(self, image, target): 55 | w, h = image.size 56 | 57 | image_id = target["image_id"] 58 | image_id = torch.tensor([image_id]) 59 | 60 | anno = target["annotations"] 61 | 62 | anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] 63 | 64 | boxes = [obj["bbox"] for obj in anno] 65 | # guard against no boxes via resizing 66 | boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) 67 | boxes[:, 2:] += boxes[:, :2] 68 | boxes[:, 0::2].clamp_(min=0, max=w) 69 | boxes[:, 1::2].clamp_(min=0, max=h) 70 | 71 | classes = [obj["category_id"] for obj in anno] 72 | classes = torch.tensor(classes, dtype=torch.int64) 73 | 74 | if self.return_masks: 75 | segmentations = [obj["segmentation"] for obj in anno] 76 | masks = convert_coco_poly_to_mask(segmentations, h, w) 77 | 78 | keypoints = None 79 | if anno and "keypoints" in anno[0]: 80 | keypoints = [obj["keypoints"] for obj in anno] 81 | keypoints = torch.as_tensor(keypoints, dtype=torch.float32) 82 | num_keypoints = keypoints.shape[0] 83 | if num_keypoints: 84 | keypoints = keypoints.view(num_keypoints, -1, 3) 85 | 86 | keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) 87 | boxes = boxes[keep] 88 | classes = classes[keep] 89 | if self.return_masks: 90 | masks = masks[keep] 91 | if keypoints is not None: 92 | keypoints = keypoints[keep] 93 | 94 | target = {} 95 | target["boxes"] = boxes 96 | target["labels"] = classes 97 | if self.return_masks: 98 | target["masks"] = masks 99 | target["image_id"] = image_id 100 | if keypoints is not None: 101 | target["keypoints"] = keypoints 102 | 103 | # for conversion to coco api 104 | area = torch.tensor([obj["area"] for obj in anno]) 105 | iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) 106 | target["area"] = area[keep] 107 | target["iscrowd"] = iscrowd[keep] 108 | 109 | target["orig_size"] = torch.as_tensor([int(h), int(w)]) 110 | target["size"] = torch.as_tensor([int(h), int(w)]) 111 | 112 | return image, target 113 | 114 | 115 | def make_coco_transforms(image_set): 116 | 117 | normalize = T.Compose([ 118 | T.ToTensor(), 119 | T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 120 | ]) 121 | 122 | scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] 123 | 124 | if image_set == 'train': 125 | return T.Compose([ 126 | T.RandomHorizontalFlip(), 127 | T.RandomSelect( 128 | T.RandomResize(scales, max_size=1333), 129 | T.Compose([ 130 | T.RandomResize([400, 500, 600]), 131 | T.RandomSizeCrop(384, 600), 132 | T.RandomResize(scales, max_size=1333), 133 | ]) 134 | ), 135 | normalize, 136 | ]) 137 | 138 | if image_set == 'val': 139 | return T.Compose([ 140 | T.RandomResize([800], max_size=1333), 141 | normalize, 142 | ]) 143 | 144 | raise ValueError(f'unknown {image_set}') 145 | 146 | 147 | def build(image_set, args): 148 | root = Path(args.coco_path) 149 | assert root.exists(), f'provided COCO path {root} does not exist' 150 | mode = 'instances' 151 | PATHS = { 152 | "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), 153 | "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), 154 | } 155 | 156 | img_folder, ann_file = PATHS[image_set] 157 | dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) 158 | return dataset 159 | -------------------------------------------------------------------------------- /src/transforms.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (C) 2021 Microsoft Corporation 3 | """ 4 | import random 5 | import math 6 | 7 | import torch 8 | import PIL 9 | from PIL import ImageFilter 10 | from torchvision.transforms import functional as F 11 | 12 | 13 | def _flip_coco_person_keypoints(kps, width): 14 | flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] 15 | flipped_data = kps[:, flip_inds] 16 | flipped_data[..., 0] = width - flipped_data[..., 0] 17 | # Maintain COCO convention that if visibility == 0, then x, y = 0 18 | inds = flipped_data[..., 2] == 0 19 | flipped_data[inds] = 0 20 | return flipped_data 21 | 22 | 23 | def box_cxcywh_to_xyxy(x): 24 | x_c, y_c, w, h = x.unbind(-1) 25 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h), 26 | (x_c + 0.5 * w), (y_c + 0.5 * h)] 27 | return torch.stack(b, dim=-1) 28 | 29 | 30 | def box_xyxy_to_cxcywh(x): 31 | x0, y0, x1, y1 = x.unbind(-1) 32 | b = [(x0 + x1) / 2, (y0 + y1) / 2, 33 | (x1 - x0), (y1 - y0)] 34 | return torch.stack(b, dim=-1) 35 | 36 | 37 | class Compose(object): 38 | def __init__(self, transforms): 39 | self.transforms = transforms 40 | 41 | def __call__(self, image, target): 42 | for t in self.transforms: 43 | image, target = t(image, target) 44 | return image, target 45 | 46 | 47 | class RandomHorizontalFlip(object): 48 | def __init__(self, prob): 49 | self.prob = prob 50 | 51 | def __call__(self, image, target): 52 | if random.random() < self.prob: 53 | height, width = image.shape[-2:] 54 | image = image.flip(-1) 55 | bbox = target["boxes"] 56 | bbox[:, [0, 2]] = width - bbox[:, [2, 0]] 57 | target["boxes"] = bbox 58 | if "masks" in target: 59 | target["masks"] = target["masks"].flip(-1) 60 | if "keypoints" in target: 61 | keypoints = target["keypoints"] 62 | keypoints = _flip_coco_person_keypoints(keypoints, width) 63 | target["keypoints"] = keypoints 64 | return image, target 65 | 66 | 67 | class RandomCrop(object): 68 | def __init__(self, prob, left_scale, top_scale, right_scale, bottom_scale): 69 | self.prob = prob 70 | self.left_scale = left_scale 71 | self.top_scale = top_scale 72 | self.right_scale = right_scale 73 | self.bottom_scale = bottom_scale 74 | 75 | def __call__(self, image, target): 76 | if random.random() < self.prob: 77 | width, height = image.size 78 | left = int(math.floor(width * 0.5 * self.left_scale * random.random())) 79 | top = int(math.floor(height * 0.5 * self.top_scale * random.random())) 80 | right = width - int(math.floor(width * 0.5 * self.right_scale * random.random())) 81 | bottom = height - int(math.floor(height * 0.5 * self.bottom_scale * random.random())) 82 | cropped_image = image.crop((left, top, right, bottom)) 83 | cropped_bboxes = [] 84 | cropped_labels = [] 85 | for bbox, label in zip(target["boxes"], target["labels"]): 86 | bbox = [max(bbox[0], left) - left, 87 | max(bbox[1], top) - top, 88 | min(bbox[2], right) - left, 89 | min(bbox[3], bottom) - top] 90 | if bbox[0] < bbox[2] and bbox[1] < bbox[3]: 91 | cropped_bboxes.append(bbox) 92 | cropped_labels.append(label) 93 | 94 | if len(cropped_bboxes) > 0: 95 | target["boxes"] = torch.as_tensor(cropped_bboxes, dtype=torch.float32) 96 | target["labels"] = torch.as_tensor(cropped_labels, dtype=torch.int64) 97 | return cropped_image, target 98 | 99 | return image, target 100 | 101 | 102 | class RandomBlur(object): 103 | def __init__(self, prob, max_radius): 104 | self.prob = prob 105 | self.max_radius = max_radius 106 | 107 | def __call__(self, image, target): 108 | if random.random() < self.prob: 109 | radius = random.random() * self.max_radius 110 | image = image.filter(filter=ImageFilter.GaussianBlur(radius=radius)) 111 | 112 | return image, target 113 | 114 | 115 | class RandomResize(object): 116 | def __init__(self, prob, min_scale_factor, max_scale_factor): 117 | self.prob = prob 118 | self.min_scale_factor = min_scale_factor 119 | self.max_scale_factor = max_scale_factor 120 | 121 | def __call__(self, image, target): 122 | if random.random() < self.prob: 123 | prob = random.random() 124 | scale_factor = prob*self.max_scale_factor + (1-prob)*self.min_scale_factor 125 | new_width = int(round(scale_factor * image.width)) 126 | new_height = int(round(scale_factor * image.height)) 127 | resized_image = image.resize((new_width, new_height), resample=PIL.Image.LANCZOS) 128 | resized_bboxes = [] 129 | resized_labels = [] 130 | for bbox, label in zip(target["boxes"], target["labels"]): 131 | bbox = [elem*scale_factor for elem in bbox] 132 | if bbox[0] < bbox[2] - 1 and bbox[1] < bbox[3] - 1: 133 | resized_bboxes.append(bbox) 134 | resized_labels.append(label) 135 | 136 | if len(resized_bboxes) > 0: 137 | target["boxes"] = torch.as_tensor(resized_bboxes, dtype=torch.float32) 138 | target["labels"] = torch.as_tensor(resized_labels, dtype=torch.int64) 139 | return resized_image, target 140 | 141 | return image, target 142 | 143 | 144 | class Normalize(object): 145 | def __init__(self, mean, std): 146 | self.mean = mean 147 | self.std = std 148 | 149 | def __call__(self, image, target=None): 150 | image = F.normalize(image, mean=self.mean, std=self.std) 151 | if target is None: 152 | return image, None 153 | target = target.copy() 154 | h, w = image.shape[-2:] 155 | if "boxes" in target: 156 | boxes = target["boxes"] 157 | boxes = box_xyxy_to_cxcywh(boxes) 158 | boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) 159 | target["boxes"] = boxes 160 | return image, target 161 | 162 | 163 | class ToTensor(object): 164 | def __call__(self, image, target): 165 | image = F.to_tensor(image) 166 | return image, target 167 | -------------------------------------------------------------------------------- /detr/hubconf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch 3 | 4 | from models.backbone import Backbone, Joiner 5 | from models.detr import DETR, PostProcess 6 | from models.position_encoding import PositionEmbeddingSine 7 | from models.segmentation import DETRsegm, PostProcessPanoptic 8 | from models.transformer import Transformer 9 | 10 | dependencies = ["torch", "torchvision"] 11 | 12 | 13 | def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): 14 | hidden_dim = 256 15 | backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) 16 | pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) 17 | backbone_with_pos_enc = Joiner(backbone, pos_enc) 18 | backbone_with_pos_enc.num_channels = backbone.num_channels 19 | transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) 20 | detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) 21 | if mask: 22 | return DETRsegm(detr) 23 | return detr 24 | 25 | 26 | def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): 27 | """ 28 | DETR R50 with 6 encoder and 6 decoder layers. 29 | 30 | Achieves 42/62.4 AP/AP50 on COCO val5k. 31 | """ 32 | model = _make_detr("resnet50", dilation=False, num_classes=num_classes) 33 | if pretrained: 34 | checkpoint = torch.hub.load_state_dict_from_url( 35 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True 36 | ) 37 | model.load_state_dict(checkpoint["model"]) 38 | if return_postprocessor: 39 | return model, PostProcess() 40 | return model 41 | 42 | 43 | def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): 44 | """ 45 | DETR-DC5 R50 with 6 encoder and 6 decoder layers. 46 | 47 | The last block of ResNet-50 has dilation to increase 48 | output resolution. 49 | Achieves 43.3/63.1 AP/AP50 on COCO val5k. 50 | """ 51 | model = _make_detr("resnet50", dilation=True, num_classes=num_classes) 52 | if pretrained: 53 | checkpoint = torch.hub.load_state_dict_from_url( 54 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True 55 | ) 56 | model.load_state_dict(checkpoint["model"]) 57 | if return_postprocessor: 58 | return model, PostProcess() 59 | return model 60 | 61 | 62 | def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): 63 | """ 64 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 65 | 66 | Achieves 43.5/63.8 AP/AP50 on COCO val5k. 67 | """ 68 | model = _make_detr("resnet101", dilation=False, num_classes=num_classes) 69 | if pretrained: 70 | checkpoint = torch.hub.load_state_dict_from_url( 71 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True 72 | ) 73 | model.load_state_dict(checkpoint["model"]) 74 | if return_postprocessor: 75 | return model, PostProcess() 76 | return model 77 | 78 | 79 | def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): 80 | """ 81 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 82 | 83 | The last block of ResNet-101 has dilation to increase 84 | output resolution. 85 | Achieves 44.9/64.7 AP/AP50 on COCO val5k. 86 | """ 87 | model = _make_detr("resnet101", dilation=True, num_classes=num_classes) 88 | if pretrained: 89 | checkpoint = torch.hub.load_state_dict_from_url( 90 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True 91 | ) 92 | model.load_state_dict(checkpoint["model"]) 93 | if return_postprocessor: 94 | return model, PostProcess() 95 | return model 96 | 97 | 98 | def detr_resnet50_panoptic( 99 | pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False 100 | ): 101 | """ 102 | DETR R50 with 6 encoder and 6 decoder layers. 103 | Achieves 43.4 PQ on COCO val5k. 104 | 105 | threshold is the minimum confidence required for keeping segments in the prediction 106 | """ 107 | model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) 108 | is_thing_map = {i: i <= 90 for i in range(250)} 109 | if pretrained: 110 | checkpoint = torch.hub.load_state_dict_from_url( 111 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", 112 | map_location="cpu", 113 | check_hash=True, 114 | ) 115 | model.load_state_dict(checkpoint["model"]) 116 | if return_postprocessor: 117 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 118 | return model 119 | 120 | 121 | def detr_resnet50_dc5_panoptic( 122 | pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False 123 | ): 124 | """ 125 | DETR-DC5 R50 with 6 encoder and 6 decoder layers. 126 | 127 | The last block of ResNet-50 has dilation to increase 128 | output resolution. 129 | Achieves 44.6 on COCO val5k. 130 | 131 | threshold is the minimum confidence required for keeping segments in the prediction 132 | """ 133 | model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) 134 | is_thing_map = {i: i <= 90 for i in range(250)} 135 | if pretrained: 136 | checkpoint = torch.hub.load_state_dict_from_url( 137 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", 138 | map_location="cpu", 139 | check_hash=True, 140 | ) 141 | model.load_state_dict(checkpoint["model"]) 142 | if return_postprocessor: 143 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 144 | return model 145 | 146 | 147 | def detr_resnet101_panoptic( 148 | pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False 149 | ): 150 | """ 151 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 152 | 153 | Achieves 45.1 PQ on COCO val5k. 154 | 155 | threshold is the minimum confidence required for keeping segments in the prediction 156 | """ 157 | model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) 158 | is_thing_map = {i: i <= 90 for i in range(250)} 159 | if pretrained: 160 | checkpoint = torch.hub.load_state_dict_from_url( 161 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", 162 | map_location="cpu", 163 | check_hash=True, 164 | ) 165 | model.load_state_dict(checkpoint["model"]) 166 | if return_postprocessor: 167 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 168 | return model 169 | -------------------------------------------------------------------------------- /detr/engine.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Train and eval functions used in main.py 4 | """ 5 | import math 6 | import os 7 | import sys 8 | from typing import Iterable 9 | 10 | import torch 11 | 12 | import util.misc as utils 13 | from datasets.coco_eval import CocoEvaluator 14 | from datasets.panoptic_eval import PanopticEvaluator 15 | 16 | 17 | def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, 18 | data_loader: Iterable, optimizer: torch.optim.Optimizer, 19 | device: torch.device, epoch: int, max_norm: float = 0, 20 | max_batches_per_epoch: int = None, print_freq=100): 21 | model.train() 22 | criterion.train() 23 | metric_logger = utils.MetricLogger(delimiter=" ") 24 | metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) 25 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 26 | header = 'Epoch: [{}]'.format(epoch) 27 | 28 | batch_count = 0 29 | for samples, targets in metric_logger.log_every(data_loader, print_freq, header): 30 | batch_count += 1 31 | if not max_batches_per_epoch is None and batch_count > max_batches_per_epoch: 32 | break 33 | samples = samples.to(device) 34 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 35 | 36 | outputs = model(samples) 37 | loss_dict = criterion(outputs, targets) 38 | weight_dict = criterion.weight_dict 39 | losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) 40 | 41 | # reduce losses over all GPUs for logging purposes 42 | loss_dict_reduced = utils.reduce_dict(loss_dict) 43 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 44 | for k, v in loss_dict_reduced.items()} 45 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 46 | for k, v in loss_dict_reduced.items() if k in weight_dict} 47 | losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) 48 | 49 | loss_value = losses_reduced_scaled.item() 50 | 51 | if not math.isfinite(loss_value): 52 | print("Loss is {}, stopping training".format(loss_value)) 53 | print(loss_dict_reduced) 54 | sys.exit(1) 55 | 56 | optimizer.zero_grad() 57 | losses.backward() 58 | if max_norm > 0: 59 | torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) 60 | optimizer.step() 61 | 62 | metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) 63 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 64 | metric_logger.update(lr=optimizer.param_groups[0]["lr"]) 65 | # gather the stats from all processes 66 | metric_logger.synchronize_between_processes() 67 | print("Averaged stats:", metric_logger) 68 | return {k: meter.global_avg for k, meter in metric_logger.meters.items()} 69 | 70 | 71 | @torch.no_grad() 72 | def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): 73 | model.eval() 74 | criterion.eval() 75 | 76 | metric_logger = utils.MetricLogger(delimiter=" ") 77 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 78 | header = 'Test:' 79 | 80 | iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) 81 | coco_evaluator = CocoEvaluator(base_ds, iou_types) 82 | # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] 83 | 84 | panoptic_evaluator = None 85 | if 'panoptic' in postprocessors.keys(): 86 | panoptic_evaluator = PanopticEvaluator( 87 | data_loader.dataset.ann_file, 88 | data_loader.dataset.ann_folder, 89 | output_dir=os.path.join(output_dir, "panoptic_eval"), 90 | ) 91 | 92 | for samples, targets in metric_logger.log_every(data_loader, 1000, header): 93 | samples = samples.to(device) 94 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 95 | 96 | outputs = model(samples) 97 | loss_dict = criterion(outputs, targets) 98 | weight_dict = criterion.weight_dict 99 | 100 | # reduce losses over all GPUs for logging purposes 101 | loss_dict_reduced = utils.reduce_dict(loss_dict) 102 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 103 | for k, v in loss_dict_reduced.items() if k in weight_dict} 104 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 105 | for k, v in loss_dict_reduced.items()} 106 | metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), 107 | **loss_dict_reduced_scaled, 108 | **loss_dict_reduced_unscaled) 109 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 110 | 111 | orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) 112 | results = postprocessors['bbox'](outputs, orig_target_sizes) 113 | if 'segm' in postprocessors.keys(): 114 | target_sizes = torch.stack([t["size"] for t in targets], dim=0) 115 | results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) 116 | res = {target['image_id'].item(): output for target, output in zip(targets, results)} 117 | if coco_evaluator is not None: 118 | coco_evaluator.update(res) 119 | 120 | if panoptic_evaluator is not None: 121 | res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) 122 | for i, target in enumerate(targets): 123 | image_id = target["image_id"].item() 124 | file_name = f"{image_id:012d}.png" 125 | res_pano[i]["image_id"] = image_id 126 | res_pano[i]["file_name"] = file_name 127 | 128 | panoptic_evaluator.update(res_pano) 129 | 130 | # gather the stats from all processes 131 | metric_logger.synchronize_between_processes() 132 | print("Averaged stats:", metric_logger) 133 | if coco_evaluator is not None: 134 | coco_evaluator.synchronize_between_processes() 135 | if panoptic_evaluator is not None: 136 | panoptic_evaluator.synchronize_between_processes() 137 | 138 | # accumulate predictions from all images 139 | if coco_evaluator is not None: 140 | coco_evaluator.accumulate() 141 | coco_evaluator.summarize() 142 | panoptic_res = None 143 | if panoptic_evaluator is not None: 144 | panoptic_res = panoptic_evaluator.summarize() 145 | stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} 146 | if coco_evaluator is not None: 147 | if 'bbox' in postprocessors.keys(): 148 | stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() 149 | if 'segm' in postprocessors.keys(): 150 | stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() 151 | if panoptic_res is not None: 152 | stats['PQ_all'] = panoptic_res["All"] 153 | stats['PQ_th'] = panoptic_res["Things"] 154 | stats['PQ_st'] = panoptic_res["Stuff"] 155 | return stats, coco_evaluator 156 | -------------------------------------------------------------------------------- /detr/test_all.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import io 3 | import unittest 4 | 5 | import torch 6 | 7 | from models.matcher import HungarianMatcher 8 | from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned 9 | from models.backbone import Backbone, Joiner, BackboneBase 10 | from util import box_ops 11 | from util.misc import nested_tensor_from_tensor_list 12 | from hubconf import detr_resnet50, detr_resnet50_panoptic 13 | 14 | # onnxruntime requires python 3.5 or above 15 | try: 16 | import onnxruntime 17 | except ImportError: 18 | onnxruntime = None 19 | 20 | 21 | class Tester(unittest.TestCase): 22 | 23 | def test_box_cxcywh_to_xyxy(self): 24 | t = torch.rand(10, 4) 25 | r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t)) 26 | self.assertLess((t - r).abs().max(), 1e-5) 27 | 28 | @staticmethod 29 | def indices_torch2python(indices): 30 | return [(i.tolist(), j.tolist()) for i, j in indices] 31 | 32 | def test_hungarian(self): 33 | n_queries, n_targets, n_classes = 100, 15, 91 34 | logits = torch.rand(1, n_queries, n_classes + 1) 35 | boxes = torch.rand(1, n_queries, 4) 36 | tgt_labels = torch.randint(high=n_classes, size=(n_targets,)) 37 | tgt_boxes = torch.rand(n_targets, 4) 38 | matcher = HungarianMatcher() 39 | targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}] 40 | indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets) 41 | indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1), 42 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2) 43 | self.assertEqual(len(indices_single[0][0]), n_targets) 44 | self.assertEqual(len(indices_single[0][1]), n_targets) 45 | self.assertEqual(self.indices_torch2python(indices_single), 46 | self.indices_torch2python([indices_batched[0]])) 47 | self.assertEqual(self.indices_torch2python(indices_single), 48 | self.indices_torch2python([indices_batched[1]])) 49 | 50 | # test with empty targets 51 | tgt_labels_empty = torch.randint(high=n_classes, size=(0,)) 52 | tgt_boxes_empty = torch.rand(0, 4) 53 | targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}] 54 | indices = matcher({'pred_logits': logits.repeat(2, 1, 1), 55 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty) 56 | self.assertEqual(len(indices[1][0]), 0) 57 | indices = matcher({'pred_logits': logits.repeat(2, 1, 1), 58 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2) 59 | self.assertEqual(len(indices[0][0]), 0) 60 | 61 | def test_position_encoding_script(self): 62 | m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned() 63 | mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa 64 | 65 | def test_backbone_script(self): 66 | backbone = Backbone('resnet50', True, False, False) 67 | torch.jit.script(backbone) # noqa 68 | 69 | def test_model_script_detection(self): 70 | model = detr_resnet50(pretrained=False).eval() 71 | scripted_model = torch.jit.script(model) 72 | x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) 73 | out = model(x) 74 | out_script = scripted_model(x) 75 | self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) 76 | self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) 77 | 78 | def test_model_script_panoptic(self): 79 | model = detr_resnet50_panoptic(pretrained=False).eval() 80 | scripted_model = torch.jit.script(model) 81 | x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) 82 | out = model(x) 83 | out_script = scripted_model(x) 84 | self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) 85 | self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) 86 | self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"])) 87 | 88 | def test_model_detection_different_inputs(self): 89 | model = detr_resnet50(pretrained=False).eval() 90 | # support NestedTensor 91 | x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) 92 | out = model(x) 93 | self.assertIn('pred_logits', out) 94 | # and 4d Tensor 95 | x = torch.rand(1, 3, 200, 200) 96 | out = model(x) 97 | self.assertIn('pred_logits', out) 98 | # and List[Tensor[C, H, W]] 99 | x = torch.rand(3, 200, 200) 100 | out = model([x]) 101 | self.assertIn('pred_logits', out) 102 | 103 | 104 | @unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable') 105 | class ONNXExporterTester(unittest.TestCase): 106 | @classmethod 107 | def setUpClass(cls): 108 | torch.manual_seed(123) 109 | 110 | def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None, 111 | output_names=None, input_names=None): 112 | model.eval() 113 | 114 | onnx_io = io.BytesIO() 115 | # export to onnx with the first input 116 | torch.onnx.export(model, inputs_list[0], onnx_io, 117 | do_constant_folding=do_constant_folding, opset_version=12, 118 | dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names) 119 | # validate the exported model with onnx runtime 120 | for test_inputs in inputs_list: 121 | with torch.no_grad(): 122 | if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list): 123 | test_inputs = (nested_tensor_from_tensor_list(test_inputs),) 124 | test_ouputs = model(*test_inputs) 125 | if isinstance(test_ouputs, torch.Tensor): 126 | test_ouputs = (test_ouputs,) 127 | self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) 128 | 129 | def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): 130 | 131 | inputs, _ = torch.jit._flatten(inputs) 132 | outputs, _ = torch.jit._flatten(outputs) 133 | 134 | def to_numpy(tensor): 135 | if tensor.requires_grad: 136 | return tensor.detach().cpu().numpy() 137 | else: 138 | return tensor.cpu().numpy() 139 | 140 | inputs = list(map(to_numpy, inputs)) 141 | outputs = list(map(to_numpy, outputs)) 142 | 143 | ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) 144 | # compute onnxruntime output prediction 145 | ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) 146 | ort_outs = ort_session.run(None, ort_inputs) 147 | for i in range(0, len(outputs)): 148 | try: 149 | torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05) 150 | except AssertionError as error: 151 | if tolerate_small_mismatch: 152 | self.assertIn("(0.00%)", str(error), str(error)) 153 | else: 154 | raise 155 | 156 | def test_model_onnx_detection(self): 157 | model = detr_resnet50(pretrained=False).eval() 158 | dummy_image = torch.ones(1, 3, 800, 800) * 0.3 159 | model(dummy_image) 160 | 161 | # Test exported model on images of different size, or dummy input 162 | self.run_model( 163 | model, 164 | [(torch.rand(1, 3, 750, 800),)], 165 | input_names=["inputs"], 166 | output_names=["pred_logits", "pred_boxes"], 167 | tolerate_small_mismatch=True, 168 | ) 169 | 170 | 171 | if __name__ == '__main__': 172 | unittest.main() 173 | -------------------------------------------------------------------------------- /detr/engine_multi.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Train and eval functions used in main.py 4 | """ 5 | import math 6 | import os 7 | import sys 8 | from typing import Iterable 9 | 10 | import torch 11 | 12 | import util.misc as utils 13 | from datasets.coco_eval import CocoEvaluator 14 | from datasets.panoptic_eval import PanopticEvaluator 15 | 16 | 17 | def train_one_epoch(model_list, criterion_list, 18 | data_loader_list, optimizer: torch.optim.Optimizer, 19 | device: torch.device, epoch: int, max_norm: float = 0, 20 | max_batches_per_epoch: int = None, print_freq=100): 21 | for model in model_list: 22 | model.train() 23 | for criterion in criterion_list: 24 | criterion.train() 25 | 26 | #metric_loggers = [] 27 | #headers = [] 28 | #for model in model_list: 29 | metric_logger = utils.MetricLogger(delimiter=" ") 30 | metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) 31 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 32 | header = 'Epoch: [{}]'.format(epoch) 33 | 34 | # metric_loggers.append(metric_logger) 35 | # headers.append(header) 36 | 37 | batch_count = 0 38 | while not max_batches_per_epoch is None and batch_count <= max_batches_per_epoch: 39 | for model, criterion, data_loader in zip(model_list, criterion_list, data_loader_list): 40 | alternating_count = 0 41 | for samples, targets in metric_logger.log_every(data_loader, print_freq, header): 42 | alternating_count += 1 43 | if alternating_count > print_freq or (not max_batches_per_epoch is None and batch_count > max_batches_per_epoch): 44 | break 45 | batch_count += 1 46 | samples = samples.to(device) 47 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 48 | 49 | outputs = model(samples) 50 | loss_dict = criterion(outputs, targets) 51 | weight_dict = criterion.weight_dict 52 | losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) 53 | 54 | # reduce losses over all GPUs for logging purposes 55 | loss_dict_reduced = utils.reduce_dict(loss_dict) 56 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 57 | for k, v in loss_dict_reduced.items()} 58 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 59 | for k, v in loss_dict_reduced.items() if k in weight_dict} 60 | losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) 61 | 62 | loss_value = losses_reduced_scaled.item() 63 | 64 | if not math.isfinite(loss_value): 65 | print("Loss is {}, stopping training".format(loss_value)) 66 | print(loss_dict_reduced) 67 | sys.exit(1) 68 | 69 | optimizer.zero_grad() 70 | losses.backward() 71 | if max_norm > 0: 72 | torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) 73 | optimizer.step() 74 | 75 | metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) 76 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 77 | metric_logger.update(lr=optimizer.param_groups[0]["lr"]) 78 | # gather the stats from all processes 79 | metric_logger.synchronize_between_processes() 80 | #print("Averaged stats:", metric_logger) 81 | return {k: meter.global_avg for k, meter in metric_logger.meters.items()} 82 | 83 | 84 | @torch.no_grad() 85 | def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): 86 | model.eval() 87 | criterion.eval() 88 | 89 | metric_logger = utils.MetricLogger(delimiter=" ") 90 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 91 | header = 'Test:' 92 | 93 | iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) 94 | coco_evaluator = CocoEvaluator(base_ds, iou_types) 95 | # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] 96 | 97 | panoptic_evaluator = None 98 | if 'panoptic' in postprocessors.keys(): 99 | panoptic_evaluator = PanopticEvaluator( 100 | data_loader.dataset.ann_file, 101 | data_loader.dataset.ann_folder, 102 | output_dir=os.path.join(output_dir, "panoptic_eval"), 103 | ) 104 | 105 | for samples, targets in metric_logger.log_every(data_loader, 10, header): 106 | samples = samples.to(device) 107 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 108 | 109 | outputs = model(samples) 110 | loss_dict = criterion(outputs, targets) 111 | weight_dict = criterion.weight_dict 112 | 113 | # reduce losses over all GPUs for logging purposes 114 | loss_dict_reduced = utils.reduce_dict(loss_dict) 115 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 116 | for k, v in loss_dict_reduced.items() if k in weight_dict} 117 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 118 | for k, v in loss_dict_reduced.items()} 119 | metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), 120 | **loss_dict_reduced_scaled, 121 | **loss_dict_reduced_unscaled) 122 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 123 | 124 | orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) 125 | results = postprocessors['bbox'](outputs, orig_target_sizes) 126 | if 'segm' in postprocessors.keys(): 127 | target_sizes = torch.stack([t["size"] for t in targets], dim=0) 128 | results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) 129 | res = {target['image_id'].item(): output for target, output in zip(targets, results)} 130 | if coco_evaluator is not None: 131 | coco_evaluator.update(res) 132 | 133 | if panoptic_evaluator is not None: 134 | res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) 135 | for i, target in enumerate(targets): 136 | image_id = target["image_id"].item() 137 | file_name = f"{image_id:012d}.png" 138 | res_pano[i]["image_id"] = image_id 139 | res_pano[i]["file_name"] = file_name 140 | 141 | panoptic_evaluator.update(res_pano) 142 | 143 | # gather the stats from all processes 144 | metric_logger.synchronize_between_processes() 145 | print("Averaged stats:", metric_logger) 146 | if coco_evaluator is not None: 147 | coco_evaluator.synchronize_between_processes() 148 | if panoptic_evaluator is not None: 149 | panoptic_evaluator.synchronize_between_processes() 150 | 151 | # accumulate predictions from all images 152 | if coco_evaluator is not None: 153 | coco_evaluator.accumulate() 154 | coco_evaluator.summarize() 155 | panoptic_res = None 156 | if panoptic_evaluator is not None: 157 | panoptic_res = panoptic_evaluator.summarize() 158 | stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} 159 | if coco_evaluator is not None: 160 | if 'bbox' in postprocessors.keys(): 161 | stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() 162 | if 'segm' in postprocessors.keys(): 163 | stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() 164 | if panoptic_res is not None: 165 | stats['PQ_all'] = panoptic_res["All"] 166 | stats['PQ_th'] = panoptic_res["Things"] 167 | stats['PQ_st'] = panoptic_res["Stuff"] 168 | return stats, coco_evaluator 169 | -------------------------------------------------------------------------------- /detr/datasets/coco_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | COCO evaluator that works in distributed mode. 4 | 5 | Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py 6 | The difference is that there is less copy-pasting from pycocotools 7 | in the end of the file, as python3 can suppress prints with contextlib 8 | """ 9 | import os 10 | import contextlib 11 | import copy 12 | import numpy as np 13 | import torch 14 | 15 | from pycocotools.cocoeval import COCOeval 16 | from pycocotools.coco import COCO 17 | import pycocotools.mask as mask_util 18 | 19 | from util.misc import all_gather 20 | 21 | 22 | class CocoEvaluator(object): 23 | def __init__(self, coco_gt, iou_types): 24 | assert isinstance(iou_types, (list, tuple)) 25 | coco_gt = copy.deepcopy(coco_gt) 26 | self.coco_gt = coco_gt 27 | 28 | self.iou_types = iou_types 29 | self.coco_eval = {} 30 | for iou_type in iou_types: 31 | self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) 32 | 33 | self.img_ids = [] 34 | self.eval_imgs = {k: [] for k in iou_types} 35 | 36 | def update(self, predictions): 37 | img_ids = list(np.unique(list(predictions.keys()))) 38 | self.img_ids.extend(img_ids) 39 | 40 | for iou_type in self.iou_types: 41 | results = self.prepare(predictions, iou_type) 42 | 43 | # suppress pycocotools prints 44 | with open(os.devnull, 'w') as devnull: 45 | with contextlib.redirect_stdout(devnull): 46 | coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() 47 | coco_eval = self.coco_eval[iou_type] 48 | 49 | coco_eval.cocoDt = coco_dt 50 | coco_eval.params.imgIds = list(img_ids) 51 | img_ids, eval_imgs = evaluate(coco_eval) 52 | 53 | self.eval_imgs[iou_type].append(eval_imgs) 54 | 55 | def synchronize_between_processes(self): 56 | for iou_type in self.iou_types: 57 | self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) 58 | create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) 59 | 60 | def accumulate(self): 61 | for coco_eval in self.coco_eval.values(): 62 | coco_eval.accumulate() 63 | 64 | def summarize(self): 65 | for iou_type, coco_eval in self.coco_eval.items(): 66 | print("IoU metric: {}".format(iou_type)) 67 | coco_eval.summarize() 68 | 69 | def prepare(self, predictions, iou_type): 70 | if iou_type == "bbox": 71 | return self.prepare_for_coco_detection(predictions) 72 | elif iou_type == "segm": 73 | return self.prepare_for_coco_segmentation(predictions) 74 | elif iou_type == "keypoints": 75 | return self.prepare_for_coco_keypoint(predictions) 76 | else: 77 | raise ValueError("Unknown iou type {}".format(iou_type)) 78 | 79 | def prepare_for_coco_detection(self, predictions): 80 | coco_results = [] 81 | for original_id, prediction in predictions.items(): 82 | if len(prediction) == 0: 83 | continue 84 | 85 | boxes = prediction["boxes"] 86 | boxes = convert_to_xywh(boxes).tolist() 87 | scores = prediction["scores"].tolist() 88 | labels = prediction["labels"].tolist() 89 | 90 | coco_results.extend( 91 | [ 92 | { 93 | "image_id": original_id, 94 | "category_id": labels[k], 95 | "bbox": box, 96 | "score": scores[k], 97 | } 98 | for k, box in enumerate(boxes) 99 | ] 100 | ) 101 | return coco_results 102 | 103 | def prepare_for_coco_segmentation(self, predictions): 104 | coco_results = [] 105 | for original_id, prediction in predictions.items(): 106 | if len(prediction) == 0: 107 | continue 108 | 109 | scores = prediction["scores"] 110 | labels = prediction["labels"] 111 | masks = prediction["masks"] 112 | 113 | masks = masks > 0.5 114 | 115 | scores = prediction["scores"].tolist() 116 | labels = prediction["labels"].tolist() 117 | 118 | rles = [ 119 | mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] 120 | for mask in masks 121 | ] 122 | for rle in rles: 123 | rle["counts"] = rle["counts"].decode("utf-8") 124 | 125 | coco_results.extend( 126 | [ 127 | { 128 | "image_id": original_id, 129 | "category_id": labels[k], 130 | "segmentation": rle, 131 | "score": scores[k], 132 | } 133 | for k, rle in enumerate(rles) 134 | ] 135 | ) 136 | return coco_results 137 | 138 | def prepare_for_coco_keypoint(self, predictions): 139 | coco_results = [] 140 | for original_id, prediction in predictions.items(): 141 | if len(prediction) == 0: 142 | continue 143 | 144 | boxes = prediction["boxes"] 145 | boxes = convert_to_xywh(boxes).tolist() 146 | scores = prediction["scores"].tolist() 147 | labels = prediction["labels"].tolist() 148 | keypoints = prediction["keypoints"] 149 | keypoints = keypoints.flatten(start_dim=1).tolist() 150 | 151 | coco_results.extend( 152 | [ 153 | { 154 | "image_id": original_id, 155 | "category_id": labels[k], 156 | 'keypoints': keypoint, 157 | "score": scores[k], 158 | } 159 | for k, keypoint in enumerate(keypoints) 160 | ] 161 | ) 162 | return coco_results 163 | 164 | 165 | def convert_to_xywh(boxes): 166 | xmin, ymin, xmax, ymax = boxes.unbind(1) 167 | return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) 168 | 169 | 170 | def merge(img_ids, eval_imgs): 171 | all_img_ids = all_gather(img_ids) 172 | all_eval_imgs = all_gather(eval_imgs) 173 | 174 | merged_img_ids = [] 175 | for p in all_img_ids: 176 | merged_img_ids.extend(p) 177 | 178 | merged_eval_imgs = [] 179 | for p in all_eval_imgs: 180 | merged_eval_imgs.append(p) 181 | 182 | merged_img_ids = np.array(merged_img_ids) 183 | merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) 184 | 185 | # keep only unique (and in sorted order) images 186 | merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) 187 | merged_eval_imgs = merged_eval_imgs[..., idx] 188 | 189 | return merged_img_ids, merged_eval_imgs 190 | 191 | 192 | def create_common_coco_eval(coco_eval, img_ids, eval_imgs): 193 | img_ids, eval_imgs = merge(img_ids, eval_imgs) 194 | img_ids = list(img_ids) 195 | eval_imgs = list(eval_imgs.flatten()) 196 | 197 | coco_eval.evalImgs = eval_imgs 198 | coco_eval.params.imgIds = img_ids 199 | coco_eval._paramsEval = copy.deepcopy(coco_eval.params) 200 | 201 | 202 | ################################################################# 203 | # From pycocotools, just removed the prints and fixed 204 | # a Python3 bug about unicode not defined 205 | ################################################################# 206 | 207 | 208 | def evaluate(self): 209 | ''' 210 | Run per image evaluation on given images and store results (a list of dict) in self.evalImgs 211 | :return: None 212 | ''' 213 | # tic = time.time() 214 | # print('Running per image evaluation...') 215 | p = self.params 216 | # add backward compatibility if useSegm is specified in params 217 | if p.useSegm is not None: 218 | p.iouType = 'segm' if p.useSegm == 1 else 'bbox' 219 | print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) 220 | # print('Evaluate annotation type *{}*'.format(p.iouType)) 221 | p.imgIds = list(np.unique(p.imgIds)) 222 | if p.useCats: 223 | p.catIds = list(np.unique(p.catIds)) 224 | p.maxDets = sorted(p.maxDets) 225 | self.params = p 226 | 227 | self._prepare() 228 | # loop through images, area range, max detection number 229 | catIds = p.catIds if p.useCats else [-1] 230 | 231 | if p.iouType == 'segm' or p.iouType == 'bbox': 232 | computeIoU = self.computeIoU 233 | elif p.iouType == 'keypoints': 234 | computeIoU = self.computeOks 235 | self.ious = { 236 | (imgId, catId): computeIoU(imgId, catId) 237 | for imgId in p.imgIds 238 | for catId in catIds} 239 | 240 | evaluateImg = self.evaluateImg 241 | maxDet = p.maxDets[-1] 242 | evalImgs = [ 243 | evaluateImg(imgId, catId, areaRng, maxDet) 244 | for catId in catIds 245 | for areaRng in p.areaRng 246 | for imgId in p.imgIds 247 | ] 248 | # this is NOT in the pycocotools code, but could be done outside 249 | evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) 250 | self._paramsEval = copy.deepcopy(self.params) 251 | # toc = time.time() 252 | # print('DONE (t={:0.2f}s).'.format(toc-tic)) 253 | return p.imgIds, evalImgs 254 | 255 | ################################################################# 256 | # end of straight copy from pycocotools, just removing the prints 257 | ################################################################# 258 | -------------------------------------------------------------------------------- /detr/datasets/transforms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Transforms and data augmentation for both image + bbox. 4 | """ 5 | import random 6 | 7 | import PIL 8 | import torch 9 | import torchvision.transforms as T 10 | import torchvision.transforms.functional as F 11 | 12 | from util.box_ops import box_xyxy_to_cxcywh 13 | from util.misc import interpolate 14 | 15 | 16 | def crop(image, target, region): 17 | cropped_image = F.crop(image, *region) 18 | 19 | target = target.copy() 20 | i, j, h, w = region 21 | 22 | # should we do something wrt the original size? 23 | target["size"] = torch.tensor([h, w]) 24 | 25 | fields = ["labels", "area", "iscrowd"] 26 | 27 | if "boxes" in target and len(target["boxes"]) > 0: 28 | boxes = target["boxes"] 29 | max_size = torch.as_tensor([w, h], dtype=torch.float32) 30 | cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) 31 | cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) 32 | cropped_boxes = cropped_boxes.clamp(min=0) 33 | area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) 34 | target["boxes"] = cropped_boxes.reshape(-1, 4) 35 | target["area"] = area 36 | fields.append("boxes") 37 | 38 | if "masks" in target: 39 | # FIXME should we update the area here if there are no boxes? 40 | target['masks'] = target['masks'][:, i:i + h, j:j + w] 41 | fields.append("masks") 42 | 43 | # remove elements for which the boxes or masks that have zero area 44 | if "boxes" in target or "masks" in target: 45 | # favor boxes selection when defining which elements to keep 46 | # this is compatible with previous implementation 47 | if "boxes" in target and len(target["boxes"]) > 0: 48 | cropped_boxes = target['boxes'].reshape(-1, 2, 2) 49 | keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) 50 | else: 51 | keep = target['masks'].flatten(1).any(1) 52 | 53 | for field in fields: 54 | target[field] = target[field][keep] 55 | 56 | return cropped_image, target 57 | 58 | 59 | def hflip(image, target): 60 | flipped_image = F.hflip(image) 61 | 62 | w, h = image.size 63 | 64 | target = target.copy() 65 | if "boxes" in target and len(target["boxes"]) > 0: 66 | boxes = target["boxes"] 67 | boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) 68 | target["boxes"] = boxes 69 | 70 | if "masks" in target: 71 | target['masks'] = target['masks'].flip(-1) 72 | 73 | return flipped_image, target 74 | 75 | 76 | def resize(image, target, size, max_size=None): 77 | # size can be min_size (scalar) or (w, h) tuple 78 | 79 | def get_size_with_aspect_ratio(image_size, size, max_size=None): 80 | w, h = image_size 81 | if max_size is not None: 82 | min_original_size = float(min((w, h))) 83 | max_original_size = float(max((w, h))) 84 | if max_original_size / min_original_size * size > max_size: 85 | size = int(round(max_size * min_original_size / max_original_size)) 86 | 87 | if (w <= h and w == size) or (h <= w and h == size): 88 | return (h, w) 89 | 90 | if w < h: 91 | ow = size 92 | oh = int(size * h / w) 93 | else: 94 | oh = size 95 | ow = int(size * w / h) 96 | 97 | return (oh, ow) 98 | 99 | def get_size(image_size, size, max_size=None): 100 | if isinstance(size, (list, tuple)): 101 | return size[::-1] 102 | else: 103 | return get_size_with_aspect_ratio(image_size, size, max_size) 104 | 105 | size = get_size(image.size, size, max_size) 106 | rescaled_image = F.resize(image, size) 107 | 108 | if target is None: 109 | return rescaled_image, None 110 | 111 | ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) 112 | ratio_width, ratio_height = ratios 113 | 114 | target = target.copy() 115 | if "boxes" in target and len(target["boxes"]) > 0: 116 | boxes = target["boxes"] 117 | scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) 118 | target["boxes"] = scaled_boxes 119 | 120 | if "area" in target: 121 | area = target["area"] 122 | scaled_area = area * (ratio_width * ratio_height) 123 | target["area"] = scaled_area 124 | 125 | h, w = size 126 | target["size"] = torch.tensor([h, w]) 127 | 128 | if "masks" in target: 129 | target['masks'] = interpolate( 130 | target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 131 | 132 | return rescaled_image, target 133 | 134 | 135 | def pad(image, target, padding): 136 | # assumes that we only pad on the bottom right corners 137 | padded_image = F.pad(image, (0, 0, padding[0], padding[1])) 138 | if target is None: 139 | return padded_image, None 140 | target = target.copy() 141 | # should we do something wrt the original size? 142 | target["size"] = torch.tensor(padded_image.size[::-1]) 143 | if "masks" in target: 144 | target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) 145 | return padded_image, target 146 | 147 | 148 | class RandomCrop(object): 149 | def __init__(self, size): 150 | self.size = size 151 | 152 | def __call__(self, img, target): 153 | region = T.RandomCrop.get_params(img, self.size) 154 | return crop(img, target, region) 155 | 156 | 157 | class RandomSizeCrop(object): 158 | def __init__(self, min_size: int, max_size: int): 159 | self.min_size = min_size 160 | self.max_size = max_size 161 | 162 | def __call__(self, img: PIL.Image.Image, target: dict): 163 | w = random.randint(self.min_size, min(img.width, self.max_size)) 164 | h = random.randint(self.min_size, min(img.height, self.max_size)) 165 | region = T.RandomCrop.get_params(img, [h, w]) 166 | return crop(img, target, region) 167 | 168 | 169 | class CenterCrop(object): 170 | def __init__(self, size): 171 | self.size = size 172 | 173 | def __call__(self, img, target): 174 | image_width, image_height = img.size 175 | crop_height, crop_width = self.size 176 | crop_top = int(round((image_height - crop_height) / 2.)) 177 | crop_left = int(round((image_width - crop_width) / 2.)) 178 | return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) 179 | 180 | 181 | class RandomHorizontalFlip(object): 182 | def __init__(self, p=0.5): 183 | self.p = p 184 | 185 | def __call__(self, img, target): 186 | if random.random() < self.p: 187 | return hflip(img, target) 188 | return img, target 189 | 190 | 191 | class RandomResize(object): 192 | def __init__(self, sizes, max_size=None): 193 | assert isinstance(sizes, (list, tuple)) 194 | self.sizes = sizes 195 | self.max_size = max_size 196 | 197 | def __call__(self, img, target=None): 198 | size = random.choice(self.sizes) 199 | return resize(img, target, size, self.max_size) 200 | 201 | 202 | class RandomPad(object): 203 | def __init__(self, max_pad): 204 | self.max_pad = max_pad 205 | 206 | def __call__(self, img, target): 207 | pad_x = random.randint(0, self.max_pad) 208 | pad_y = random.randint(0, self.max_pad) 209 | return pad(img, target, (pad_x, pad_y)) 210 | 211 | 212 | class RandomSelect(object): 213 | """ 214 | Randomly selects between transforms1 and transforms2, 215 | with probability p for transforms1 and (1 - p) for transforms2 216 | """ 217 | def __init__(self, transforms1, transforms2, p=0.5): 218 | self.transforms1 = transforms1 219 | self.transforms2 = transforms2 220 | self.p = p 221 | 222 | def __call__(self, img, target): 223 | if random.random() < self.p: 224 | return self.transforms1(img, target) 225 | return self.transforms2(img, target) 226 | 227 | 228 | class ToTensor(object): 229 | def __call__(self, img, target): 230 | return F.to_tensor(img), target 231 | 232 | 233 | class RandomErasing(object): 234 | 235 | def __init__(self, *args, **kwargs): 236 | self.eraser = T.RandomErasing(*args, **kwargs) 237 | 238 | def __call__(self, img, target): 239 | return self.eraser(img), target 240 | 241 | 242 | class Normalize(object): 243 | def __init__(self, mean, std): 244 | self.mean = mean 245 | self.std = std 246 | 247 | def __call__(self, image, target=None): 248 | image = F.normalize(image, mean=self.mean, std=self.std) 249 | if target is None: 250 | return image, None 251 | target = target.copy() 252 | h, w = image.shape[-2:] 253 | if "boxes" in target and len(target["boxes"]) > 0: 254 | boxes = target["boxes"] 255 | boxes = box_xyxy_to_cxcywh(boxes) 256 | boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) 257 | target["boxes"] = boxes 258 | return image, target 259 | 260 | 261 | class Compose(object): 262 | def __init__(self, transforms): 263 | self.transforms = transforms 264 | 265 | def __call__(self, image, target): 266 | for t in self.transforms: 267 | image, target = t(image, target) 268 | return image, target 269 | 270 | def __repr__(self): 271 | format_string = self.__class__.__name__ + "(" 272 | for t in self.transforms: 273 | format_string += "\n" 274 | format_string += " {0}".format(t) 275 | format_string += "\n)" 276 | return format_string 277 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | """ 2 | "Inspired" by https://github.com/phamquiluan/table-transformer/blob/main/core.py 3 | which made simple inference easier 4 | """ 5 | import sys 6 | import logging 7 | import torch 8 | import json 9 | import os 10 | import random 11 | import numpy as np 12 | import torchvision.transforms.functional as F 13 | import cv2 14 | import pandas as pd 15 | 16 | from pytesseract import image_to_string 17 | 18 | from datetime import datetime 19 | from torch.utils.data import DataLoader 20 | from PIL import Image 21 | 22 | sys.path.append("detr") 23 | sys.path.append("src") 24 | from models import build_model 25 | import datasets.transforms as R 26 | 27 | logging.basicConfig( 28 | format="%(asctime)s | %(levelname)s: %(message)s", level=logging.NOTSET 29 | ) 30 | # TODO: document this code 31 | # TODO: remove everything that is not needed! 32 | def cells_to_dataframe(cells): 33 | d = {} 34 | for cell in cells: 35 | if cell['column'] not in d.keys(): 36 | d[cell['column']] = [] 37 | d[cell['column']].append(cell['content']) 38 | df = pd.DataFrame(data=d) 39 | return df 40 | 41 | 42 | def set_cell_text(cells, image, clean=False): 43 | for cell in cells: 44 | # crop & pad image 45 | xmin, ymin, xmax, ymax = cell['bbox'] 46 | roi = image.crop((xmin, ymin, xmax, ymax)) 47 | roi = add_padding(roi, 30) 48 | 49 | # OCR 50 | text = image_to_string(roi, lang='deu') # TODO: here you should use self.lang 51 | if clean: 52 | text.strip() 53 | text = text.replace('\n', '') 54 | cell['content'] = text 55 | 56 | def get_rows_and_columns(objs): 57 | cols = [obj for obj in objs if obj['label'] == 'table column'] 58 | rows = [obj for obj in objs if obj['label'] == 'table row'] 59 | # sort cols bottom right x coordinate 60 | cols.sort(key = lambda col : col['bbox'][2]) 61 | # sort rows bottom right y coordinate 62 | rows.sort(key = lambda row: row['bbox'][3]) 63 | 64 | return rows, cols 65 | 66 | def get_cells(objs): 67 | rows, cols = get_rows_and_columns(objs) 68 | 69 | cells = [] 70 | for i, col in enumerate(cols): 71 | c_xmin, c_ymin, c_xmax, c_ymax = col['bbox'] 72 | for j, row in enumerate(rows): 73 | r_xmin, r_ymin, r_xmax, r_ymax = row['bbox'] 74 | xmin, ymin = max(r_xmin, c_xmin), max(r_ymin, c_ymin) 75 | xmax, ymax = min(r_xmax, c_xmax), min(r_ymax, c_ymax) 76 | cell = { 77 | 'column' : i, 78 | 'row' : j, 79 | 'bbox' : [xmin, ymin, xmax, ymax] 80 | } 81 | cells.append(cell) 82 | 83 | return cells 84 | 85 | 86 | # Source file: postprocess.py 87 | def structure_table(objs, table_bbox): 88 | rows, cols = get_rows_and_columns(objs) 89 | 90 | # initial values are top and most left coordinates 91 | p_xmin, p_ymin, p_xmax, p_ymax = table_bbox 92 | p_ymax = p_ymin 93 | p_xmax = p_xmin 94 | """ 95 | scenario 1: border is on same line []][] 96 | then: keep xmin 97 | scenario 2: border 1 is smaller than border 2, there is gap [] [] 98 | scenario 3: border 1 is bigger than border 2, there is an overlap [[]] 99 | then: make xmin of current cell the xmax of the previous bbox 100 | """ 101 | for row in rows: 102 | xmin, ymin, xmax, ymax = row['bbox'] 103 | if not p_ymax == ymin: 104 | ymin = p_ymax 105 | row['bbox'] = xmin, ymin, xmax, ymax 106 | p_ymax = ymax 107 | # column bottom borders has to overlap with last row's bottom border 108 | bottom_y = rows[-1]['bbox'][3] 109 | for col in cols: 110 | xmin, ymin, xmax, ymax = col['bbox'] 111 | 112 | if not p_xmax == xmin: 113 | xmin = p_xmax 114 | col['bbox'] = xmin, ymin, xmax, ymax 115 | p_xmax = xmax 116 | 117 | return objs 118 | 119 | def filter_cols_and_rows(objs): 120 | """Filters anything else than columns and rows 121 | 122 | Returns: 123 | a list containing only column and row predictions 124 | """ 125 | objs = [obj for obj in objs if obj['label'] in ['table column', 'table row']] 126 | return objs 127 | 128 | def border_align(objs, table_bbox): 129 | """ 130 | for every row and column, 131 | """ 132 | for obj in objs: 133 | bbox = obj['bbox'] 134 | if obj['label'] == 'table row': 135 | bbox[0] = table_bbox[0] 136 | bbox[2] = table_bbox[2] 137 | elif obj['label'] == 'table column': 138 | bbox[1] = table_bbox[1] 139 | bbox[3] = table_bbox[3] 140 | obj['bbox'] = bbox 141 | 142 | return objs 143 | 144 | def predictions_to_objects(predictions, threshold, class_map): 145 | objs = [] 146 | labels = predictions["labels"].tolist() 147 | scores = enumerate(predictions["scores"].tolist()) 148 | for idx, score in scores: 149 | if score > threshold: 150 | label = labels[idx] 151 | bbox = predictions["boxes"][idx].tolist() 152 | obj = { 153 | "score" : score, 154 | "label" : class_map[label], 155 | "bbox" : list(map(int, bbox)) 156 | } 157 | objs.append(obj) 158 | return objs 159 | 160 | def visualize_structure(image, objs): 161 | image = np.array(image) 162 | for obj in objs: 163 | xmin, ymin, xmax, ymax = obj["bbox"] 164 | cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (245, 105, 66), 2) 165 | image = image[:, :, ::-1].copy() 166 | return image 167 | 168 | # Source file: postprocess.py (align_columns & align_rows) 169 | 170 | def add_padding(img, padding=50): 171 | """Adds padding to an image 172 | 173 | Args: 174 | img (PIL.Image): pillow image 175 | padding (int): number of pixels to use for padding 176 | 177 | Returns: 178 | image with padding 179 | """ 180 | 181 | w, h = img.size 182 | new_w = w + (padding * 2) 183 | new_h = h + (padding * 2) 184 | result = Image.new(img.mode, (new_w, new_h), (255, 255, 255)) 185 | result.paste(img, (padding, padding)) 186 | return result 187 | 188 | def load_args(json_path): 189 | """Loads arguments from JSON file 190 | 191 | Returns: 192 | data from the JSON file 193 | """ 194 | data = None 195 | with open(json_path) as f: 196 | data = json.load(f) 197 | return data 198 | 199 | # Source file: main.py 200 | def get_class_map(key="name"): 201 | assert key in ["index", "name"] 202 | if key == "name": 203 | return { 204 | 'table': 0, 205 | 'table column': 1, 206 | 'table row': 2, 207 | 'table column header': 3, 208 | 'table projected row header': 4, 209 | 'table spanning cell': 5, 210 | 'no object': 6 211 | } 212 | else: 213 | return { 214 | 0 : 'table', 215 | 1 : 'table column', 216 | 2 : 'table row', 217 | 3 : 'table column header', 218 | 4 : 'table projected row header', 219 | 5 : 'table spanning cell', 220 | 6 : 'no object' 221 | } 222 | 223 | # Source file: main.py 224 | def get_model(args, device): 225 | """ 226 | Loads DETR model on to the device specified. 227 | If a load path is specified, the state dict is updated accordingly. 228 | """ 229 | model, criterion, postprocessors = build_model(args) 230 | model.to(device) 231 | if args.model_load_path: 232 | print("loading model from checkpoint") 233 | loaded_state_dict = torch.load(args.model_load_path, 234 | map_location=device) 235 | model_state_dict = model.state_dict() 236 | pretrained_dict = { 237 | k: v 238 | for k, v in loaded_state_dict.items() 239 | if k in model_state_dict and model_state_dict[k].shape == v.shape 240 | } 241 | model_state_dict.update(pretrained_dict) 242 | model.load_state_dict(model_state_dict, strict=True) 243 | return model, criterion, postprocessors 244 | 245 | class TableInformer: 246 | def __init__(self, weight_path): 247 | # NOTE: in the example they here assert if the weight path exists 248 | args = load_args("./src/structure_config.json") 249 | args['model_load_path'] = weight_path 250 | args = type("Args", (object,), args) 251 | 252 | self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') 253 | self.model, criterion, self.postprocessors = get_model(args, self.device) 254 | self.model.eval() 255 | 256 | self.normalize = R.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 257 | 258 | def predict(self, image_path, debug=True, threshold=0.7): 259 | """ 260 | Pre-trained model was not trained on cropped tables, 261 | adding padding around the table results in better predictions! 262 | """ 263 | image = image_path 264 | padding = 100 265 | if isinstance(image_path, str): 266 | image = Image.open(image_path).convert("RGB") 267 | image = add_padding(image, padding=padding) 268 | 269 | w, h = image.size 270 | 271 | img_tensor = self.normalize(F.to_tensor(image))[0] 272 | img_tensor = torch.unsqueeze(img_tensor, 0).to(self.device) 273 | 274 | # inference 275 | outputs = None 276 | with torch.no_grad(): 277 | outputs = self.model(img_tensor) 278 | 279 | image_size = torch.unsqueeze(torch.as_tensor([int(h), int(w)]), 0).to( 280 | self.device 281 | ) 282 | results = self.postprocessors["bbox"](outputs, image_size)[0] 283 | 284 | # conversion to objects w/ score threshold 285 | objs = predictions_to_objects(results, threshold, get_class_map(key="index")) 286 | 287 | # align columns and rows to table border 288 | xmin, ymin, xman, ymax = padding, padding, w - padding, h - padding 289 | table_bbox = [xmin, ymin, xman, ymax] 290 | objs = border_align(objs, table_bbox) 291 | 292 | # fix overlapping and align objects 293 | objs = structure_table(objs, table_bbox) 294 | 295 | # keep only the columns and rows 296 | objs = filter_cols_and_rows(objs) 297 | 298 | # get cells based on cols and rows 299 | cells = get_cells(objs) 300 | set_cell_text(cells, image, clean=True) 301 | df = cells_to_dataframe(cells) 302 | 303 | if debug: 304 | visualization = visualize_structure(image, objs) 305 | out_path = "./visualization.jpg" 306 | cv2.imwrite(out_path, visualization) 307 | logging.info(f"Visualization can be found at '{out_path}'.") 308 | 309 | return results 310 | 311 | # MAIN 312 | weight_path = "pubtables1m_structure_detr_r18.pth" 313 | model = TableInformer(weight_path) 314 | image_path = "example_table.jpg" 315 | output = model.predict(image_path) 316 | -------------------------------------------------------------------------------- /detr/LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2020 - present, Facebook, Inc 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /detr/d2/detr/detr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import logging 3 | import math 4 | from typing import List 5 | 6 | import numpy as np 7 | import torch 8 | import torch.distributed as dist 9 | import torch.nn.functional as F 10 | from scipy.optimize import linear_sum_assignment 11 | from torch import nn 12 | 13 | from detectron2.layers import ShapeSpec 14 | from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess 15 | from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks 16 | from detectron2.utils.logger import log_first_n 17 | from fvcore.nn import giou_loss, smooth_l1_loss 18 | from models.backbone import Joiner 19 | from models.detr import DETR, SetCriterion 20 | from models.matcher import HungarianMatcher 21 | from models.position_encoding import PositionEmbeddingSine 22 | from models.transformer import Transformer 23 | from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm 24 | from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh 25 | from util.misc import NestedTensor 26 | from datasets.coco import convert_coco_poly_to_mask 27 | 28 | __all__ = ["Detr"] 29 | 30 | 31 | class MaskedBackbone(nn.Module): 32 | """ This is a thin wrapper around D2's backbone to provide padding masking""" 33 | 34 | def __init__(self, cfg): 35 | super().__init__() 36 | self.backbone = build_backbone(cfg) 37 | backbone_shape = self.backbone.output_shape() 38 | self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()] 39 | self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels 40 | 41 | def forward(self, images): 42 | features = self.backbone(images.tensor) 43 | masks = self.mask_out_padding( 44 | [features_per_level.shape for features_per_level in features.values()], 45 | images.image_sizes, 46 | images.tensor.device, 47 | ) 48 | assert len(features) == len(masks) 49 | for i, k in enumerate(features.keys()): 50 | features[k] = NestedTensor(features[k], masks[i]) 51 | return features 52 | 53 | def mask_out_padding(self, feature_shapes, image_sizes, device): 54 | masks = [] 55 | assert len(feature_shapes) == len(self.feature_strides) 56 | for idx, shape in enumerate(feature_shapes): 57 | N, _, H, W = shape 58 | masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device) 59 | for img_idx, (h, w) in enumerate(image_sizes): 60 | masks_per_feature_level[ 61 | img_idx, 62 | : int(np.ceil(float(h) / self.feature_strides[idx])), 63 | : int(np.ceil(float(w) / self.feature_strides[idx])), 64 | ] = 0 65 | masks.append(masks_per_feature_level) 66 | return masks 67 | 68 | 69 | @META_ARCH_REGISTRY.register() 70 | class Detr(nn.Module): 71 | """ 72 | Implement Detr 73 | """ 74 | 75 | def __init__(self, cfg): 76 | super().__init__() 77 | 78 | self.device = torch.device(cfg.MODEL.DEVICE) 79 | 80 | self.num_classes = cfg.MODEL.DETR.NUM_CLASSES 81 | self.mask_on = cfg.MODEL.MASK_ON 82 | hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM 83 | num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES 84 | # Transformer parameters: 85 | nheads = cfg.MODEL.DETR.NHEADS 86 | dropout = cfg.MODEL.DETR.DROPOUT 87 | dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD 88 | enc_layers = cfg.MODEL.DETR.ENC_LAYERS 89 | dec_layers = cfg.MODEL.DETR.DEC_LAYERS 90 | pre_norm = cfg.MODEL.DETR.PRE_NORM 91 | 92 | # Loss parameters: 93 | giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT 94 | l1_weight = cfg.MODEL.DETR.L1_WEIGHT 95 | deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION 96 | no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT 97 | 98 | N_steps = hidden_dim // 2 99 | d2_backbone = MaskedBackbone(cfg) 100 | backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) 101 | backbone.num_channels = d2_backbone.num_channels 102 | 103 | transformer = Transformer( 104 | d_model=hidden_dim, 105 | dropout=dropout, 106 | nhead=nheads, 107 | dim_feedforward=dim_feedforward, 108 | num_encoder_layers=enc_layers, 109 | num_decoder_layers=dec_layers, 110 | normalize_before=pre_norm, 111 | return_intermediate_dec=deep_supervision, 112 | ) 113 | 114 | self.detr = DETR( 115 | backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision 116 | ) 117 | if self.mask_on: 118 | frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS 119 | if frozen_weights != '': 120 | print("LOAD pre-trained weights") 121 | weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model'] 122 | new_weight = {} 123 | for k, v in weight.items(): 124 | if 'detr.' in k: 125 | new_weight[k.replace('detr.', '')] = v 126 | else: 127 | print(f"Skipping loading weight {k} from frozen model") 128 | del weight 129 | self.detr.load_state_dict(new_weight) 130 | del new_weight 131 | self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != '')) 132 | self.seg_postprocess = PostProcessSegm 133 | 134 | self.detr.to(self.device) 135 | 136 | # building criterion 137 | matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) 138 | weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} 139 | weight_dict["loss_giou"] = giou_weight 140 | if deep_supervision: 141 | aux_weight_dict = {} 142 | for i in range(dec_layers - 1): 143 | aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) 144 | weight_dict.update(aux_weight_dict) 145 | losses = ["labels", "boxes", "cardinality"] 146 | if self.mask_on: 147 | losses += ["masks"] 148 | self.criterion = SetCriterion( 149 | self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, 150 | ) 151 | self.criterion.to(self.device) 152 | 153 | pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) 154 | pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) 155 | self.normalizer = lambda x: (x - pixel_mean) / pixel_std 156 | self.to(self.device) 157 | 158 | def forward(self, batched_inputs): 159 | """ 160 | Args: 161 | batched_inputs: a list, batched outputs of :class:`DatasetMapper` . 162 | Each item in the list contains the inputs for one image. 163 | For now, each item in the list is a dict that contains: 164 | 165 | * image: Tensor, image in (C, H, W) format. 166 | * instances: Instances 167 | 168 | Other information that's included in the original dicts, such as: 169 | 170 | * "height", "width" (int): the output resolution of the model, used in inference. 171 | See :meth:`postprocess` for details. 172 | Returns: 173 | dict[str: Tensor]: 174 | mapping from a named loss to a tensor storing the loss. Used during training only. 175 | """ 176 | images = self.preprocess_image(batched_inputs) 177 | output = self.detr(images) 178 | 179 | if self.training: 180 | gt_instances = [x["instances"].to(self.device) for x in batched_inputs] 181 | 182 | targets = self.prepare_targets(gt_instances) 183 | loss_dict = self.criterion(output, targets) 184 | weight_dict = self.criterion.weight_dict 185 | for k in loss_dict.keys(): 186 | if k in weight_dict: 187 | loss_dict[k] *= weight_dict[k] 188 | return loss_dict 189 | else: 190 | box_cls = output["pred_logits"] 191 | box_pred = output["pred_boxes"] 192 | mask_pred = output["pred_masks"] if self.mask_on else None 193 | results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes) 194 | processed_results = [] 195 | for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): 196 | height = input_per_image.get("height", image_size[0]) 197 | width = input_per_image.get("width", image_size[1]) 198 | r = detector_postprocess(results_per_image, height, width) 199 | processed_results.append({"instances": r}) 200 | return processed_results 201 | 202 | def prepare_targets(self, targets): 203 | new_targets = [] 204 | for targets_per_image in targets: 205 | h, w = targets_per_image.image_size 206 | image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device) 207 | gt_classes = targets_per_image.gt_classes 208 | gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy 209 | gt_boxes = box_xyxy_to_cxcywh(gt_boxes) 210 | new_targets.append({"labels": gt_classes, "boxes": gt_boxes}) 211 | if self.mask_on and hasattr(targets_per_image, 'gt_masks'): 212 | gt_masks = targets_per_image.gt_masks 213 | gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w) 214 | new_targets[-1].update({'masks': gt_masks}) 215 | return new_targets 216 | 217 | def inference(self, box_cls, box_pred, mask_pred, image_sizes): 218 | """ 219 | Arguments: 220 | box_cls (Tensor): tensor of shape (batch_size, num_queries, K). 221 | The tensor predicts the classification probability for each query. 222 | box_pred (Tensor): tensors of shape (batch_size, num_queries, 4). 223 | The tensor predicts 4-vector (x,y,w,h) box 224 | regression values for every queryx 225 | image_sizes (List[torch.Size]): the input image sizes 226 | 227 | Returns: 228 | results (List[Instances]): a list of #images elements. 229 | """ 230 | assert len(box_cls) == len(image_sizes) 231 | results = [] 232 | 233 | # For each box we assign the best class or the second best if the best on is `no_object`. 234 | scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1) 235 | 236 | for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip( 237 | scores, labels, box_pred, image_sizes 238 | )): 239 | result = Instances(image_size) 240 | result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image)) 241 | 242 | result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0]) 243 | if self.mask_on: 244 | mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False) 245 | mask = mask[0].sigmoid() > 0.5 246 | B, N, H, W = mask_pred.shape 247 | mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32) 248 | result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device) 249 | 250 | result.scores = scores_per_image 251 | result.pred_classes = labels_per_image 252 | results.append(result) 253 | return results 254 | 255 | def preprocess_image(self, batched_inputs): 256 | """ 257 | Normalize, pad and batch the input images. 258 | """ 259 | images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs] 260 | images = ImageList.from_tensors(images) 261 | return images 262 | -------------------------------------------------------------------------------- /detr/main.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import argparse 3 | import datetime 4 | import json 5 | import random 6 | import time 7 | from pathlib import Path 8 | 9 | import numpy as np 10 | import torch 11 | from torch.utils.data import DataLoader, DistributedSampler 12 | 13 | import datasets 14 | import util.misc as utils 15 | from datasets import build_dataset, get_coco_api_from_dataset 16 | from engine import evaluate, train_one_epoch 17 | from models import build_model 18 | 19 | 20 | def get_args_parser(): 21 | parser = argparse.ArgumentParser('Set transformer detector', add_help=False) 22 | parser.add_argument('--lr', default=1e-4, type=float) 23 | parser.add_argument('--lr_backbone', default=1e-5, type=float) 24 | parser.add_argument('--batch_size', default=2, type=int) 25 | parser.add_argument('--weight_decay', default=1e-4, type=float) 26 | parser.add_argument('--epochs', default=300, type=int) 27 | parser.add_argument('--lr_drop', default=200, type=int) 28 | parser.add_argument('--clip_max_norm', default=0.1, type=float, 29 | help='gradient clipping max norm') 30 | 31 | # Model parameters 32 | parser.add_argument('--frozen_weights', type=str, default=None, 33 | help="Path to the pretrained model. If set, only the mask head will be trained") 34 | # * Backbone 35 | parser.add_argument('--backbone', default='resnet50', type=str, 36 | help="Name of the convolutional backbone to use") 37 | parser.add_argument('--dilation', action='store_true', 38 | help="If true, we replace stride with dilation in the last convolutional block (DC5)") 39 | parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), 40 | help="Type of positional embedding to use on top of the image features") 41 | 42 | # * Transformer 43 | parser.add_argument('--enc_layers', default=6, type=int, 44 | help="Number of encoding layers in the transformer") 45 | parser.add_argument('--dec_layers', default=6, type=int, 46 | help="Number of decoding layers in the transformer") 47 | parser.add_argument('--dim_feedforward', default=2048, type=int, 48 | help="Intermediate size of the feedforward layers in the transformer blocks") 49 | parser.add_argument('--hidden_dim', default=256, type=int, 50 | help="Size of the embeddings (dimension of the transformer)") 51 | parser.add_argument('--dropout', default=0.1, type=float, 52 | help="Dropout applied in the transformer") 53 | parser.add_argument('--nheads', default=8, type=int, 54 | help="Number of attention heads inside the transformer's attentions") 55 | parser.add_argument('--num_queries', default=100, type=int, 56 | help="Number of query slots") 57 | parser.add_argument('--pre_norm', action='store_true') 58 | 59 | # * Segmentation 60 | parser.add_argument('--masks', action='store_true', 61 | help="Train segmentation head if the flag is provided") 62 | 63 | # Loss 64 | parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', 65 | help="Disables auxiliary decoding losses (loss at each layer)") 66 | # * Matcher 67 | parser.add_argument('--set_cost_class', default=1, type=float, 68 | help="Class coefficient in the matching cost") 69 | parser.add_argument('--set_cost_bbox', default=5, type=float, 70 | help="L1 box coefficient in the matching cost") 71 | parser.add_argument('--set_cost_giou', default=2, type=float, 72 | help="giou box coefficient in the matching cost") 73 | # * Loss coefficients 74 | parser.add_argument('--mask_loss_coef', default=1, type=float) 75 | parser.add_argument('--dice_loss_coef', default=1, type=float) 76 | parser.add_argument('--bbox_loss_coef', default=5, type=float) 77 | parser.add_argument('--giou_loss_coef', default=2, type=float) 78 | parser.add_argument('--eos_coef', default=0.1, type=float, 79 | help="Relative classification weight of the no-object class") 80 | 81 | # dataset parameters 82 | parser.add_argument('--dataset_file', default='coco') 83 | parser.add_argument('--coco_path', type=str) 84 | parser.add_argument('--coco_panoptic_path', type=str) 85 | parser.add_argument('--remove_difficult', action='store_true') 86 | 87 | parser.add_argument('--output_dir', default='', 88 | help='path where to save, empty for no saving') 89 | parser.add_argument('--device', default='cuda', 90 | help='device to use for training / testing') 91 | parser.add_argument('--seed', default=42, type=int) 92 | parser.add_argument('--resume', default='', help='resume from checkpoint') 93 | parser.add_argument('--start_epoch', default=0, type=int, metavar='N', 94 | help='start epoch') 95 | parser.add_argument('--eval', action='store_true') 96 | parser.add_argument('--num_workers', default=2, type=int) 97 | 98 | # distributed training parameters 99 | parser.add_argument('--world_size', default=1, type=int, 100 | help='number of distributed processes') 101 | parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') 102 | return parser 103 | 104 | 105 | def main(args): 106 | utils.init_distributed_mode(args) 107 | print("git:\n {}\n".format(utils.get_sha())) 108 | 109 | if args.frozen_weights is not None: 110 | assert args.masks, "Frozen training is meant for segmentation only" 111 | print(args) 112 | 113 | device = torch.device(args.device) 114 | 115 | # fix the seed for reproducibility 116 | seed = args.seed + utils.get_rank() 117 | torch.manual_seed(seed) 118 | np.random.seed(seed) 119 | random.seed(seed) 120 | 121 | model, criterion, postprocessors = build_model(args) 122 | model.to(device) 123 | 124 | model_without_ddp = model 125 | if args.distributed: 126 | model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) 127 | model_without_ddp = model.module 128 | n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) 129 | print('number of params:', n_parameters) 130 | 131 | param_dicts = [ 132 | {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]}, 133 | { 134 | "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], 135 | "lr": args.lr_backbone, 136 | }, 137 | ] 138 | optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, 139 | weight_decay=args.weight_decay) 140 | lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) 141 | 142 | dataset_train = build_dataset(image_set='train', args=args) 143 | dataset_val = build_dataset(image_set='val', args=args) 144 | 145 | if args.distributed: 146 | sampler_train = DistributedSampler(dataset_train) 147 | sampler_val = DistributedSampler(dataset_val, shuffle=False) 148 | else: 149 | sampler_train = torch.utils.data.RandomSampler(dataset_train) 150 | sampler_val = torch.utils.data.SequentialSampler(dataset_val) 151 | 152 | batch_sampler_train = torch.utils.data.BatchSampler( 153 | sampler_train, args.batch_size, drop_last=True) 154 | 155 | data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, 156 | collate_fn=utils.collate_fn, num_workers=args.num_workers) 157 | data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, 158 | drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) 159 | 160 | if args.dataset_file == "coco_panoptic": 161 | # We also evaluate AP during panoptic training, on original coco DS 162 | coco_val = datasets.coco.build("val", args) 163 | base_ds = get_coco_api_from_dataset(coco_val) 164 | else: 165 | base_ds = get_coco_api_from_dataset(dataset_val) 166 | 167 | if args.frozen_weights is not None: 168 | checkpoint = torch.load(args.frozen_weights, map_location='cpu') 169 | model_without_ddp.detr.load_state_dict(checkpoint['model']) 170 | 171 | output_dir = Path(args.output_dir) 172 | if args.resume: 173 | if args.resume.startswith('https'): 174 | checkpoint = torch.hub.load_state_dict_from_url( 175 | args.resume, map_location='cpu', check_hash=True) 176 | else: 177 | checkpoint = torch.load(args.resume, map_location='cpu') 178 | model_without_ddp.load_state_dict(checkpoint['model']) 179 | if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: 180 | optimizer.load_state_dict(checkpoint['optimizer']) 181 | lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) 182 | args.start_epoch = checkpoint['epoch'] + 1 183 | 184 | if args.eval: 185 | test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, 186 | data_loader_val, base_ds, device, args.output_dir) 187 | if args.output_dir: 188 | utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") 189 | return 190 | 191 | print("Start training") 192 | start_time = time.time() 193 | for epoch in range(args.start_epoch, args.epochs): 194 | if args.distributed: 195 | sampler_train.set_epoch(epoch) 196 | train_stats = train_one_epoch( 197 | model, criterion, data_loader_train, optimizer, device, epoch, 198 | args.clip_max_norm) 199 | lr_scheduler.step() 200 | if args.output_dir: 201 | checkpoint_paths = [output_dir / 'checkpoint.pth'] 202 | # extra checkpoint before LR drop and every 100 epochs 203 | if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: 204 | checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') 205 | for checkpoint_path in checkpoint_paths: 206 | utils.save_on_master({ 207 | 'model': model_without_ddp.state_dict(), 208 | 'optimizer': optimizer.state_dict(), 209 | 'lr_scheduler': lr_scheduler.state_dict(), 210 | 'epoch': epoch, 211 | 'args': args, 212 | }, checkpoint_path) 213 | 214 | test_stats, coco_evaluator = evaluate( 215 | model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir 216 | ) 217 | 218 | log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 219 | **{f'test_{k}': v for k, v in test_stats.items()}, 220 | 'epoch': epoch, 221 | 'n_parameters': n_parameters} 222 | 223 | if args.output_dir and utils.is_main_process(): 224 | with (output_dir / "log.txt").open("a") as f: 225 | f.write(json.dumps(log_stats) + "\n") 226 | 227 | # for evaluation logs 228 | if coco_evaluator is not None: 229 | (output_dir / 'eval').mkdir(exist_ok=True) 230 | if "bbox" in coco_evaluator.coco_eval: 231 | filenames = ['latest.pth'] 232 | if epoch % 50 == 0: 233 | filenames.append(f'{epoch:03}.pth') 234 | for name in filenames: 235 | torch.save(coco_evaluator.coco_eval["bbox"].eval, 236 | output_dir / "eval" / name) 237 | 238 | total_time = time.time() - start_time 239 | total_time_str = str(datetime.timedelta(seconds=int(total_time))) 240 | print('Training time {}'.format(total_time_str)) 241 | 242 | 243 | if __name__ == '__main__': 244 | parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) 245 | args = parser.parse_args() 246 | if args.output_dir: 247 | Path(args.output_dir).mkdir(parents=True, exist_ok=True) 248 | main(args) 249 | -------------------------------------------------------------------------------- /detr/models/transformer.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | DETR Transformer class. 4 | 5 | Copy-paste from torch.nn.Transformer with modifications: 6 | * positional encodings are passed in MHattention 7 | * extra LN at the end of encoder is removed 8 | * decoder returns a stack of activations from all decoding layers 9 | """ 10 | import copy 11 | from typing import Optional, List 12 | 13 | import torch 14 | import torch.nn.functional as F 15 | from torch import nn, Tensor 16 | 17 | 18 | class Transformer(nn.Module): 19 | 20 | def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, 21 | num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, 22 | activation="relu", normalize_before=False, 23 | return_intermediate_dec=False): 24 | super().__init__() 25 | 26 | encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, 27 | dropout, activation, normalize_before) 28 | encoder_norm = nn.LayerNorm(d_model) if normalize_before else None 29 | self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) 30 | 31 | decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, 32 | dropout, activation, normalize_before) 33 | decoder_norm = nn.LayerNorm(d_model) 34 | self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, 35 | return_intermediate=return_intermediate_dec) 36 | 37 | self._reset_parameters() 38 | 39 | self.d_model = d_model 40 | self.nhead = nhead 41 | 42 | def _reset_parameters(self): 43 | for p in self.parameters(): 44 | if p.dim() > 1: 45 | nn.init.xavier_uniform_(p) 46 | 47 | def forward(self, src, mask, query_embed, pos_embed): 48 | # flatten NxCxHxW to HWxNxC 49 | bs, c, h, w = src.shape 50 | src = src.flatten(2).permute(2, 0, 1) 51 | pos_embed = pos_embed.flatten(2).permute(2, 0, 1) 52 | query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) 53 | mask = mask.flatten(1) 54 | 55 | tgt = torch.zeros_like(query_embed) 56 | memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) 57 | hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, 58 | pos=pos_embed, query_pos=query_embed) 59 | return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) 60 | 61 | 62 | class TransformerEncoder(nn.Module): 63 | 64 | def __init__(self, encoder_layer, num_layers, norm=None): 65 | super().__init__() 66 | self.layers = _get_clones(encoder_layer, num_layers) 67 | self.num_layers = num_layers 68 | self.norm = norm 69 | 70 | def forward(self, src, 71 | mask: Optional[Tensor] = None, 72 | src_key_padding_mask: Optional[Tensor] = None, 73 | pos: Optional[Tensor] = None): 74 | output = src 75 | 76 | for layer in self.layers: 77 | output = layer(output, src_mask=mask, 78 | src_key_padding_mask=src_key_padding_mask, pos=pos) 79 | 80 | if self.norm is not None: 81 | output = self.norm(output) 82 | 83 | return output 84 | 85 | 86 | class TransformerDecoder(nn.Module): 87 | 88 | def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): 89 | super().__init__() 90 | self.layers = _get_clones(decoder_layer, num_layers) 91 | self.num_layers = num_layers 92 | self.norm = norm 93 | self.return_intermediate = return_intermediate 94 | 95 | def forward(self, tgt, memory, 96 | tgt_mask: Optional[Tensor] = None, 97 | memory_mask: Optional[Tensor] = None, 98 | tgt_key_padding_mask: Optional[Tensor] = None, 99 | memory_key_padding_mask: Optional[Tensor] = None, 100 | pos: Optional[Tensor] = None, 101 | query_pos: Optional[Tensor] = None): 102 | output = tgt 103 | 104 | intermediate = [] 105 | 106 | for layer in self.layers: 107 | output = layer(output, memory, tgt_mask=tgt_mask, 108 | memory_mask=memory_mask, 109 | tgt_key_padding_mask=tgt_key_padding_mask, 110 | memory_key_padding_mask=memory_key_padding_mask, 111 | pos=pos, query_pos=query_pos) 112 | if self.return_intermediate: 113 | intermediate.append(self.norm(output)) 114 | 115 | if self.norm is not None: 116 | output = self.norm(output) 117 | if self.return_intermediate: 118 | intermediate.pop() 119 | intermediate.append(output) 120 | 121 | if self.return_intermediate: 122 | return torch.stack(intermediate) 123 | 124 | return output.unsqueeze(0) 125 | 126 | 127 | class TransformerEncoderLayer(nn.Module): 128 | 129 | def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, 130 | activation="relu", normalize_before=False): 131 | super().__init__() 132 | self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) 133 | # Implementation of Feedforward model 134 | self.linear1 = nn.Linear(d_model, dim_feedforward) 135 | self.dropout = nn.Dropout(dropout) 136 | self.linear2 = nn.Linear(dim_feedforward, d_model) 137 | 138 | self.norm1 = nn.LayerNorm(d_model) 139 | self.norm2 = nn.LayerNorm(d_model) 140 | self.dropout1 = nn.Dropout(dropout) 141 | self.dropout2 = nn.Dropout(dropout) 142 | 143 | self.activation = _get_activation_fn(activation) 144 | self.normalize_before = normalize_before 145 | 146 | def with_pos_embed(self, tensor, pos: Optional[Tensor]): 147 | return tensor if pos is None else tensor + pos 148 | 149 | def forward_post(self, 150 | src, 151 | src_mask: Optional[Tensor] = None, 152 | src_key_padding_mask: Optional[Tensor] = None, 153 | pos: Optional[Tensor] = None): 154 | q = k = self.with_pos_embed(src, pos) 155 | src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, 156 | key_padding_mask=src_key_padding_mask)[0] 157 | src = src + self.dropout1(src2) 158 | src = self.norm1(src) 159 | src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) 160 | src = src + self.dropout2(src2) 161 | src = self.norm2(src) 162 | return src 163 | 164 | def forward_pre(self, src, 165 | src_mask: Optional[Tensor] = None, 166 | src_key_padding_mask: Optional[Tensor] = None, 167 | pos: Optional[Tensor] = None): 168 | src2 = self.norm1(src) 169 | q = k = self.with_pos_embed(src2, pos) 170 | src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, 171 | key_padding_mask=src_key_padding_mask)[0] 172 | src = src + self.dropout1(src2) 173 | src2 = self.norm2(src) 174 | src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) 175 | src = src + self.dropout2(src2) 176 | return src 177 | 178 | def forward(self, src, 179 | src_mask: Optional[Tensor] = None, 180 | src_key_padding_mask: Optional[Tensor] = None, 181 | pos: Optional[Tensor] = None): 182 | if self.normalize_before: 183 | return self.forward_pre(src, src_mask, src_key_padding_mask, pos) 184 | return self.forward_post(src, src_mask, src_key_padding_mask, pos) 185 | 186 | 187 | class TransformerDecoderLayer(nn.Module): 188 | 189 | def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, 190 | activation="relu", normalize_before=False): 191 | super().__init__() 192 | self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) 193 | self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) 194 | # Implementation of Feedforward model 195 | self.linear1 = nn.Linear(d_model, dim_feedforward) 196 | self.dropout = nn.Dropout(dropout) 197 | self.linear2 = nn.Linear(dim_feedforward, d_model) 198 | 199 | self.norm1 = nn.LayerNorm(d_model) 200 | self.norm2 = nn.LayerNorm(d_model) 201 | self.norm3 = nn.LayerNorm(d_model) 202 | self.dropout1 = nn.Dropout(dropout) 203 | self.dropout2 = nn.Dropout(dropout) 204 | self.dropout3 = nn.Dropout(dropout) 205 | 206 | self.activation = _get_activation_fn(activation) 207 | self.normalize_before = normalize_before 208 | 209 | def with_pos_embed(self, tensor, pos: Optional[Tensor]): 210 | return tensor if pos is None else tensor + pos 211 | 212 | def forward_post(self, tgt, memory, 213 | tgt_mask: Optional[Tensor] = None, 214 | memory_mask: Optional[Tensor] = None, 215 | tgt_key_padding_mask: Optional[Tensor] = None, 216 | memory_key_padding_mask: Optional[Tensor] = None, 217 | pos: Optional[Tensor] = None, 218 | query_pos: Optional[Tensor] = None): 219 | q = k = self.with_pos_embed(tgt, query_pos) 220 | tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, 221 | key_padding_mask=tgt_key_padding_mask)[0] 222 | tgt = tgt + self.dropout1(tgt2) 223 | tgt = self.norm1(tgt) 224 | tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), 225 | key=self.with_pos_embed(memory, pos), 226 | value=memory, attn_mask=memory_mask, 227 | key_padding_mask=memory_key_padding_mask)[0] 228 | tgt = tgt + self.dropout2(tgt2) 229 | tgt = self.norm2(tgt) 230 | tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) 231 | tgt = tgt + self.dropout3(tgt2) 232 | tgt = self.norm3(tgt) 233 | return tgt 234 | 235 | def forward_pre(self, tgt, memory, 236 | tgt_mask: Optional[Tensor] = None, 237 | memory_mask: Optional[Tensor] = None, 238 | tgt_key_padding_mask: Optional[Tensor] = None, 239 | memory_key_padding_mask: Optional[Tensor] = None, 240 | pos: Optional[Tensor] = None, 241 | query_pos: Optional[Tensor] = None): 242 | tgt2 = self.norm1(tgt) 243 | q = k = self.with_pos_embed(tgt2, query_pos) 244 | tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, 245 | key_padding_mask=tgt_key_padding_mask)[0] 246 | tgt = tgt + self.dropout1(tgt2) 247 | tgt2 = self.norm2(tgt) 248 | tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), 249 | key=self.with_pos_embed(memory, pos), 250 | value=memory, attn_mask=memory_mask, 251 | key_padding_mask=memory_key_padding_mask)[0] 252 | tgt = tgt + self.dropout2(tgt2) 253 | tgt2 = self.norm3(tgt) 254 | tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) 255 | tgt = tgt + self.dropout3(tgt2) 256 | return tgt 257 | 258 | def forward(self, tgt, memory, 259 | tgt_mask: Optional[Tensor] = None, 260 | memory_mask: Optional[Tensor] = None, 261 | tgt_key_padding_mask: Optional[Tensor] = None, 262 | memory_key_padding_mask: Optional[Tensor] = None, 263 | pos: Optional[Tensor] = None, 264 | query_pos: Optional[Tensor] = None): 265 | if self.normalize_before: 266 | return self.forward_pre(tgt, memory, tgt_mask, memory_mask, 267 | tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) 268 | return self.forward_post(tgt, memory, tgt_mask, memory_mask, 269 | tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) 270 | 271 | 272 | def _get_clones(module, N): 273 | return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) 274 | 275 | 276 | def build_transformer(args): 277 | return Transformer( 278 | d_model=args.hidden_dim, 279 | dropout=args.dropout, 280 | nhead=args.nheads, 281 | dim_feedforward=args.dim_feedforward, 282 | num_encoder_layers=args.enc_layers, 283 | num_decoder_layers=args.dec_layers, 284 | normalize_before=args.pre_norm, 285 | return_intermediate_dec=True, 286 | ) 287 | 288 | 289 | def _get_activation_fn(activation): 290 | """Return an activation function given a string""" 291 | if activation == "relu": 292 | return F.relu 293 | if activation == "gelu": 294 | return F.gelu 295 | if activation == "glu": 296 | return F.glu 297 | raise RuntimeError(F"activation should be relu/gelu, not {activation}.") 298 | -------------------------------------------------------------------------------- /detr/models/segmentation.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | This file provides the definition of the convolutional heads used to predict masks, as well as the losses 4 | """ 5 | import io 6 | from collections import defaultdict 7 | from typing import List, Optional 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | from torch import Tensor 13 | from PIL import Image 14 | 15 | import util.box_ops as box_ops 16 | from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list 17 | 18 | try: 19 | from panopticapi.utils import id2rgb, rgb2id 20 | except ImportError: 21 | pass 22 | 23 | 24 | class DETRsegm(nn.Module): 25 | def __init__(self, detr, freeze_detr=False): 26 | super().__init__() 27 | self.detr = detr 28 | 29 | if freeze_detr: 30 | for p in self.parameters(): 31 | p.requires_grad_(False) 32 | 33 | hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead 34 | self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) 35 | self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) 36 | 37 | def forward(self, samples: NestedTensor): 38 | if isinstance(samples, (list, torch.Tensor)): 39 | samples = nested_tensor_from_tensor_list(samples) 40 | features, pos = self.detr.backbone(samples) 41 | 42 | bs = features[-1].tensors.shape[0] 43 | 44 | src, mask = features[-1].decompose() 45 | assert mask is not None 46 | src_proj = self.detr.input_proj(src) 47 | hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) 48 | 49 | outputs_class = self.detr.class_embed(hs) 50 | outputs_coord = self.detr.bbox_embed(hs).sigmoid() 51 | out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} 52 | if self.detr.aux_loss: 53 | out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) 54 | 55 | # FIXME h_boxes takes the last one computed, keep this in mind 56 | bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) 57 | 58 | seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) 59 | outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) 60 | 61 | out["pred_masks"] = outputs_seg_masks 62 | return out 63 | 64 | 65 | def _expand(tensor, length: int): 66 | return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) 67 | 68 | 69 | class MaskHeadSmallConv(nn.Module): 70 | """ 71 | Simple convolutional head, using group norm. 72 | Upsampling is done using a FPN approach 73 | """ 74 | 75 | def __init__(self, dim, fpn_dims, context_dim): 76 | super().__init__() 77 | 78 | inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] 79 | self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) 80 | self.gn1 = torch.nn.GroupNorm(8, dim) 81 | self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) 82 | self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) 83 | self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) 84 | self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) 85 | self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) 86 | self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) 87 | self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) 88 | self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) 89 | self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) 90 | 91 | self.dim = dim 92 | 93 | self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) 94 | self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) 95 | self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) 96 | 97 | for m in self.modules(): 98 | if isinstance(m, nn.Conv2d): 99 | nn.init.kaiming_uniform_(m.weight, a=1) 100 | nn.init.constant_(m.bias, 0) 101 | 102 | def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): 103 | x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) 104 | 105 | x = self.lay1(x) 106 | x = self.gn1(x) 107 | x = F.relu(x) 108 | x = self.lay2(x) 109 | x = self.gn2(x) 110 | x = F.relu(x) 111 | 112 | cur_fpn = self.adapter1(fpns[0]) 113 | if cur_fpn.size(0) != x.size(0): 114 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 115 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 116 | x = self.lay3(x) 117 | x = self.gn3(x) 118 | x = F.relu(x) 119 | 120 | cur_fpn = self.adapter2(fpns[1]) 121 | if cur_fpn.size(0) != x.size(0): 122 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 123 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 124 | x = self.lay4(x) 125 | x = self.gn4(x) 126 | x = F.relu(x) 127 | 128 | cur_fpn = self.adapter3(fpns[2]) 129 | if cur_fpn.size(0) != x.size(0): 130 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 131 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 132 | x = self.lay5(x) 133 | x = self.gn5(x) 134 | x = F.relu(x) 135 | 136 | x = self.out_lay(x) 137 | return x 138 | 139 | 140 | class MHAttentionMap(nn.Module): 141 | """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" 142 | 143 | def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): 144 | super().__init__() 145 | self.num_heads = num_heads 146 | self.hidden_dim = hidden_dim 147 | self.dropout = nn.Dropout(dropout) 148 | 149 | self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) 150 | self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) 151 | 152 | nn.init.zeros_(self.k_linear.bias) 153 | nn.init.zeros_(self.q_linear.bias) 154 | nn.init.xavier_uniform_(self.k_linear.weight) 155 | nn.init.xavier_uniform_(self.q_linear.weight) 156 | self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 157 | 158 | def forward(self, q, k, mask: Optional[Tensor] = None): 159 | q = self.q_linear(q) 160 | k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) 161 | qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) 162 | kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) 163 | weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) 164 | 165 | if mask is not None: 166 | weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) 167 | weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) 168 | weights = self.dropout(weights) 169 | return weights 170 | 171 | 172 | def dice_loss(inputs, targets, num_boxes): 173 | """ 174 | Compute the DICE loss, similar to generalized IOU for masks 175 | Args: 176 | inputs: A float tensor of arbitrary shape. 177 | The predictions for each example. 178 | targets: A float tensor with the same shape as inputs. Stores the binary 179 | classification label for each element in inputs 180 | (0 for the negative class and 1 for the positive class). 181 | """ 182 | inputs = inputs.sigmoid() 183 | inputs = inputs.flatten(1) 184 | numerator = 2 * (inputs * targets).sum(1) 185 | denominator = inputs.sum(-1) + targets.sum(-1) 186 | loss = 1 - (numerator + 1) / (denominator + 1) 187 | return loss.sum() / num_boxes 188 | 189 | 190 | def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): 191 | """ 192 | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. 193 | Args: 194 | inputs: A float tensor of arbitrary shape. 195 | The predictions for each example. 196 | targets: A float tensor with the same shape as inputs. Stores the binary 197 | classification label for each element in inputs 198 | (0 for the negative class and 1 for the positive class). 199 | alpha: (optional) Weighting factor in range (0,1) to balance 200 | positive vs negative examples. Default = -1 (no weighting). 201 | gamma: Exponent of the modulating factor (1 - p_t) to 202 | balance easy vs hard examples. 203 | Returns: 204 | Loss tensor 205 | """ 206 | prob = inputs.sigmoid() 207 | ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") 208 | p_t = prob * targets + (1 - prob) * (1 - targets) 209 | loss = ce_loss * ((1 - p_t) ** gamma) 210 | 211 | if alpha >= 0: 212 | alpha_t = alpha * targets + (1 - alpha) * (1 - targets) 213 | loss = alpha_t * loss 214 | 215 | return loss.mean(1).sum() / num_boxes 216 | 217 | 218 | class PostProcessSegm(nn.Module): 219 | def __init__(self, threshold=0.5): 220 | super().__init__() 221 | self.threshold = threshold 222 | 223 | @torch.no_grad() 224 | def forward(self, results, outputs, orig_target_sizes, max_target_sizes): 225 | assert len(orig_target_sizes) == len(max_target_sizes) 226 | max_h, max_w = max_target_sizes.max(0)[0].tolist() 227 | outputs_masks = outputs["pred_masks"].squeeze(2) 228 | outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) 229 | outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() 230 | 231 | for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): 232 | img_h, img_w = t[0], t[1] 233 | results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) 234 | results[i]["masks"] = F.interpolate( 235 | results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" 236 | ).byte() 237 | 238 | return results 239 | 240 | 241 | class PostProcessPanoptic(nn.Module): 242 | """This class converts the output of the model to the final panoptic result, in the format expected by the 243 | coco panoptic API """ 244 | 245 | def __init__(self, is_thing_map, threshold=0.85): 246 | """ 247 | Parameters: 248 | is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether 249 | the class is a thing (True) or a stuff (False) class 250 | threshold: confidence threshold: segments with confidence lower than this will be deleted 251 | """ 252 | super().__init__() 253 | self.threshold = threshold 254 | self.is_thing_map = is_thing_map 255 | 256 | def forward(self, outputs, processed_sizes, target_sizes=None): 257 | """ This function computes the panoptic prediction from the model's predictions. 258 | Parameters: 259 | outputs: This is a dict coming directly from the model. See the model doc for the content. 260 | processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the 261 | model, ie the size after data augmentation but before batching. 262 | target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size 263 | of each prediction. If left to None, it will default to the processed_sizes 264 | """ 265 | if target_sizes is None: 266 | target_sizes = processed_sizes 267 | assert len(processed_sizes) == len(target_sizes) 268 | out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] 269 | assert len(out_logits) == len(raw_masks) == len(target_sizes) 270 | preds = [] 271 | 272 | def to_tuple(tup): 273 | if isinstance(tup, tuple): 274 | return tup 275 | return tuple(tup.cpu().tolist()) 276 | 277 | for cur_logits, cur_masks, cur_boxes, size, target_size in zip( 278 | out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes 279 | ): 280 | # we filter empty queries and detection below threshold 281 | scores, labels = cur_logits.softmax(-1).max(-1) 282 | keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) 283 | cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) 284 | cur_scores = cur_scores[keep] 285 | cur_classes = cur_classes[keep] 286 | cur_masks = cur_masks[keep] 287 | cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0) 288 | cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) 289 | 290 | h, w = cur_masks.shape[-2:] 291 | assert len(cur_boxes) == len(cur_classes) 292 | 293 | # It may be that we have several predicted masks for the same stuff class. 294 | # In the following, we track the list of masks ids for each stuff class (they are merged later on) 295 | cur_masks = cur_masks.flatten(1) 296 | stuff_equiv_classes = defaultdict(lambda: []) 297 | for k, label in enumerate(cur_classes): 298 | if not self.is_thing_map[label.item()]: 299 | stuff_equiv_classes[label.item()].append(k) 300 | 301 | def get_ids_area(masks, scores, dedup=False): 302 | # This helper function creates the final panoptic segmentation image 303 | # It also returns the area of the masks that appears on the image 304 | 305 | m_id = masks.transpose(0, 1).softmax(-1) 306 | 307 | if m_id.shape[-1] == 0: 308 | # We didn't detect any mask :( 309 | m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) 310 | else: 311 | m_id = m_id.argmax(-1).view(h, w) 312 | 313 | if dedup: 314 | # Merge the masks corresponding to the same stuff class 315 | for equiv in stuff_equiv_classes.values(): 316 | if len(equiv) > 1: 317 | for eq_id in equiv: 318 | m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) 319 | 320 | final_h, final_w = to_tuple(target_size) 321 | 322 | seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) 323 | seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) 324 | 325 | np_seg_img = ( 326 | torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() 327 | ) 328 | m_id = torch.from_numpy(rgb2id(np_seg_img)) 329 | 330 | area = [] 331 | for i in range(len(scores)): 332 | area.append(m_id.eq(i).sum().item()) 333 | return area, seg_img 334 | 335 | area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) 336 | if cur_classes.numel() > 0: 337 | # We know filter empty masks as long as we find some 338 | while True: 339 | filtered_small = torch.as_tensor( 340 | [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device 341 | ) 342 | if filtered_small.any().item(): 343 | cur_scores = cur_scores[~filtered_small] 344 | cur_classes = cur_classes[~filtered_small] 345 | cur_masks = cur_masks[~filtered_small] 346 | area, seg_img = get_ids_area(cur_masks, cur_scores) 347 | else: 348 | break 349 | 350 | else: 351 | cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) 352 | 353 | segments_info = [] 354 | for i, a in enumerate(area): 355 | cat = cur_classes[i].item() 356 | segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) 357 | del cur_classes 358 | 359 | with io.BytesIO() as out: 360 | seg_img.save(out, format="PNG") 361 | predictions = {"png_string": out.getvalue(), "segments_info": segments_info} 362 | preds.append(predictions) 363 | return preds 364 | --------------------------------------------------------------------------------