├── .idea ├── .gitignore ├── vcs.xml ├── inspectionProfiles │ └── profiles_settings.xml ├── modules.xml ├── misc.xml ├── deployment.xml ├── detr.iml └── remote-mappings.xml ├── .github ├── DETR.png ├── pnp-detr.png ├── CODE_OF_CONDUCT.md ├── ISSUE_TEMPLATE │ ├── bugs.md │ ├── questions-help-support.md │ └── unexpected-problems-bugs.md └── CONTRIBUTING.md ├── tox.ini ├── util ├── __init__.py ├── box_ops.py ├── plot_utils.py └── misc.py ├── models ├── __init__.py ├── position_encoding.py ├── matcher.py ├── backbone.py ├── sampler.py ├── segmentation.py └── detr.py ├── requirements.txt ├── .gitignore ├── Dockerfile ├── .circleci └── config.yml ├── datasets ├── __init__.py ├── panoptic_eval.py ├── coco_panoptic.py ├── sample_coco.py ├── coco.py ├── transforms.py └── coco_eval.py ├── run_with_submitit.py ├── test_all.py ├── README.md ├── hubconf.py ├── engine.py ├── analyze_grad.py ├── compute_flops.py ├── jit_handles.py ├── flop_count.py ├── LICENSE └── main.py /.idea/.gitignore: -------------------------------------------------------------------------------- 1 | # Default ignored files 2 | /workspace.xml 3 | /shelf/ 4 | -------------------------------------------------------------------------------- /.github/DETR.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twangnh/pnp-detr/HEAD/.github/DETR.png -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | ignore = F401,E402,F403,W503,W504 4 | -------------------------------------------------------------------------------- /.github/pnp-detr.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/twangnh/pnp-detr/HEAD/.github/pnp-detr.png -------------------------------------------------------------------------------- /util/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | from .detr import build 3 | 4 | 5 | def build_model(args): 6 | return build(args) 7 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /.idea/inspectionProfiles/profiles_settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools 3 | submitit 4 | torch>=1.5.0 5 | torchvision>=0.6.0 6 | git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi 7 | scipy 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .nfs* 2 | *.ipynb 3 | *.pyc 4 | .dumbo.json 5 | .DS_Store 6 | .*.swp 7 | *.pth 8 | **/__pycache__/** 9 | .ipynb_checkpoints/ 10 | datasets/data/ 11 | experiment-* 12 | *.tmp 13 | *.pkl 14 | **/.mypy_cache/* 15 | .mypy_cache/* 16 | not_tracked_dir/ 17 | .vscode 18 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | Facebook has adopted a Code of Conduct that we expect project participants to adhere to. 4 | Please read the [full text](https://code.fb.com/codeofconduct/) 5 | so that you can understand what actions will and will not be tolerated. 6 | -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.idea/misc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update -qq && \ 6 | apt-get install -y git vim libgtk2.0-dev && \ 7 | rm -rf /var/cache/apk/* 8 | 9 | RUN pip --no-cache-dir install Cython 10 | 11 | COPY requirements.txt /workspace 12 | 13 | RUN pip --no-cache-dir install -r /workspace/requirements.txt 14 | -------------------------------------------------------------------------------- /.idea/deployment.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /.idea/detr.iml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 12 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | jobs: 4 | python_lint: 5 | docker: 6 | - image: circleci/python:3.7 7 | steps: 8 | - checkout 9 | - run: 10 | command: | 11 | pip install --user --progress-bar off flake8 typing 12 | flake8 . 13 | 14 | test: 15 | docker: 16 | - image: circleci/python:3.7 17 | steps: 18 | - checkout 19 | - run: 20 | command: | 21 | pip install --user --progress-bar off scipy pytest 22 | pip install --user --progress-bar off --pre torch torchvision -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html 23 | pytest . 24 | 25 | workflows: 26 | build: 27 | jobs: 28 | - python_lint 29 | - test 30 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "🐛 Bugs" 3 | about: Report bugs in DETR 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | ## Instructions To Reproduce the 🐛 Bug: 9 | 10 | 1. what changes you made (`git diff`) or what code you wrote 11 | ``` 12 | 13 | ``` 14 | 2. what exact command you run: 15 | 3. what you observed (including __full logs__): 16 | ``` 17 | 18 | ``` 19 | 4. please simplify the steps as much as possible so they do not require additional resources to 20 | run, such as a private dataset. 21 | 22 | ## Expected behavior: 23 | 24 | If there are no obvious error in "what you observed" provided above, 25 | please tell us the expected behavior. 26 | 27 | ## Environment: 28 | 29 | Provide your environment information using the following command: 30 | ``` 31 | python -m torch.utils.collect_env 32 | ``` 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/questions-help-support.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "How to do something❓" 3 | about: How to do something using DETR? 4 | 5 | --- 6 | 7 | ## ❓ How to do something using DETR 8 | 9 | Describe what you want to do, including: 10 | 1. what inputs you will provide, if any: 11 | 2. what outputs you are expecting: 12 | 13 | 14 | NOTE: 15 | 16 | 1. Only general answers are provided. 17 | If you want to ask about "why X did not work", please use the 18 | [Unexpected behaviors](https://github.com/facebookresearch/detr/issues/new/choose) issue template. 19 | 20 | 2. About how to implement new models / new dataloader / new training logic, etc., check documentation first. 21 | 22 | 3. We do not answer general machine learning / computer vision questions that are not specific to DETR, such as how a model works, how to improve your training/make it converge, or what algorithm/methods can be used to achieve X. 23 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch.utils.data 3 | import torchvision 4 | 5 | from .coco import build as build_coco 6 | 7 | 8 | def get_coco_api_from_dataset(dataset): 9 | for _ in range(10): 10 | # if isinstance(dataset, torchvision.datasets.CocoDetection): 11 | # break 12 | if isinstance(dataset, torch.utils.data.Subset): 13 | dataset = dataset.dataset 14 | if isinstance(dataset, torchvision.datasets.CocoDetection): 15 | return dataset.coco 16 | 17 | 18 | def build_dataset(image_set, args): 19 | if args.dataset_file == 'coco': 20 | return build_coco(image_set, args) 21 | if args.dataset_file == 'coco_panoptic': 22 | # to avoid making panopticapi required for coco 23 | from .coco_panoptic import build as build_coco_panoptic 24 | return build_coco_panoptic(image_set, args) 25 | raise ValueError(f'dataset {args.dataset_file} not supported') 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/unexpected-problems-bugs.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Unexpected behaviors" 3 | about: Run into unexpected behaviors when using DETR 4 | title: Please read & provide the following 5 | 6 | --- 7 | 8 | If you do not know the root cause of the problem, and wish someone to help you, please 9 | post according to this template: 10 | 11 | ## Instructions To Reproduce the Issue: 12 | 13 | 1. what changes you made (`git diff`) or what code you wrote 14 | ``` 15 | 16 | ``` 17 | 2. what exact command you run: 18 | 3. what you observed (including __full logs__): 19 | ``` 20 | 21 | ``` 22 | 4. please simplify the steps as much as possible so they do not require additional resources to 23 | run, such as a private dataset. 24 | 25 | ## Expected behavior: 26 | 27 | If there are no obvious error in "what you observed" provided above, 28 | please tell us the expected behavior. 29 | 30 | If you expect the model to converge / work better, note that we do not give suggestions 31 | on how to train a new model. 32 | Only in one of the two conditions we will help with it: 33 | (1) You're unable to reproduce the results in DETR model zoo. 34 | (2) It indicates a DETR bug. 35 | 36 | ## Environment: 37 | 38 | Provide your environment information using the following command: 39 | ``` 40 | python -m torch.utils.collect_env 41 | ``` 42 | -------------------------------------------------------------------------------- /datasets/panoptic_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import json 3 | import os 4 | 5 | import util.misc as utils 6 | 7 | try: 8 | from panopticapi.evaluation import pq_compute 9 | except ImportError: 10 | pass 11 | 12 | 13 | class PanopticEvaluator(object): 14 | def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): 15 | self.gt_json = ann_file 16 | self.gt_folder = ann_folder 17 | if utils.is_main_process(): 18 | if not os.path.exists(output_dir): 19 | os.mkdir(output_dir) 20 | self.output_dir = output_dir 21 | self.predictions = [] 22 | 23 | def update(self, predictions): 24 | for p in predictions: 25 | with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: 26 | f.write(p.pop("png_string")) 27 | 28 | self.predictions += predictions 29 | 30 | def synchronize_between_processes(self): 31 | all_predictions = utils.all_gather(self.predictions) 32 | merged_predictions = [] 33 | for p in all_predictions: 34 | merged_predictions += p 35 | self.predictions = merged_predictions 36 | 37 | def summarize(self): 38 | if utils.is_main_process(): 39 | json_data = {"annotations": self.predictions} 40 | predictions_json = os.path.join(self.output_dir, "predictions.json") 41 | with open(predictions_json, "w") as f: 42 | f.write(json.dumps(json_data)) 43 | return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) 44 | return None 45 | -------------------------------------------------------------------------------- /.idea/remote-mappings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to DETR 2 | We want to make contributing to this project as easy and transparent as 3 | possible. 4 | 5 | ## Our Development Process 6 | Minor changes and improvements will be released on an ongoing basis. Larger changes (e.g., changesets implementing a new paper) will be released on a more periodic basis. 7 | 8 | ## Pull Requests 9 | We actively welcome your pull requests. 10 | 11 | 1. Fork the repo and create your branch from `master`. 12 | 2. If you've added code that should be tested, add tests. 13 | 3. If you've changed APIs, update the documentation. 14 | 4. Ensure the test suite passes. 15 | 5. Make sure your code lints. 16 | 6. If you haven't already, complete the Contributor License Agreement ("CLA"). 17 | 18 | ## Contributor License Agreement ("CLA") 19 | In order to accept your pull request, we need you to submit a CLA. You only need 20 | to do this once to work on any of Facebook's open source projects. 21 | 22 | Complete your CLA here: 23 | 24 | ## Issues 25 | We use GitHub issues to track public bugs. Please ensure your description is 26 | clear and has sufficient instructions to be able to reproduce the issue. 27 | 28 | Facebook has a [bounty program](https://www.facebook.com/whitehat/) for the safe 29 | disclosure of security bugs. In those cases, please go through the process 30 | outlined on that page and do not file a public issue. 31 | 32 | ## Coding Style 33 | * 4 spaces for indentation rather than tabs 34 | * 80 character line length 35 | * PEP8 formatting following [Black](https://black.readthedocs.io/en/stable/) 36 | 37 | ## License 38 | By contributing to DETR, you agree that your contributions will be licensed 39 | under the LICENSE file in the root directory of this source tree. 40 | -------------------------------------------------------------------------------- /util/box_ops.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Utilities for bounding box manipulation and GIoU. 4 | """ 5 | import torch 6 | from torchvision.ops.boxes import box_area 7 | 8 | 9 | def box_cxcywh_to_xyxy(x): 10 | x_c, y_c, w, h = x.unbind(-1) 11 | b = [(x_c - 0.5 * w), (y_c - 0.5 * h), 12 | (x_c + 0.5 * w), (y_c + 0.5 * h)] 13 | return torch.stack(b, dim=-1) 14 | 15 | 16 | def box_xyxy_to_cxcywh(x): 17 | x0, y0, x1, y1 = x.unbind(-1) 18 | b = [(x0 + x1) / 2, (y0 + y1) / 2, 19 | (x1 - x0), (y1 - y0)] 20 | return torch.stack(b, dim=-1) 21 | 22 | 23 | # modified from torchvision to also return the union 24 | def box_iou(boxes1, boxes2): 25 | area1 = box_area(boxes1) 26 | area2 = box_area(boxes2) 27 | 28 | lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] 29 | rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] 30 | 31 | wh = (rb - lt).clamp(min=0) # [N,M,2] 32 | inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] 33 | 34 | union = area1[:, None] + area2 - inter 35 | 36 | iou = inter / union 37 | return iou, union 38 | 39 | 40 | def generalized_box_iou(boxes1, boxes2): 41 | """ 42 | Generalized IoU from https://giou.stanford.edu/ 43 | 44 | The boxes should be in [x0, y0, x1, y1] format 45 | 46 | Returns a [N, M] pairwise matrix, where N = len(boxes1) 47 | and M = len(boxes2) 48 | """ 49 | # degenerate boxes gives inf / nan results 50 | # so do an early check 51 | assert (boxes1[:, 2:] >= boxes1[:, :2]).all() 52 | assert (boxes2[:, 2:] >= boxes2[:, :2]).all() 53 | iou, union = box_iou(boxes1, boxes2) 54 | 55 | lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) 56 | rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) 57 | 58 | wh = (rb - lt).clamp(min=0) # [N,M,2] 59 | area = wh[:, :, 0] * wh[:, :, 1] 60 | 61 | return iou - (area - union) / area 62 | 63 | 64 | def masks_to_boxes(masks): 65 | """Compute the bounding boxes around the provided masks 66 | 67 | The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. 68 | 69 | Returns a [N, 4] tensors, with the boxes in xyxy format 70 | """ 71 | if masks.numel() == 0: 72 | return torch.zeros((0, 4), device=masks.device) 73 | 74 | h, w = masks.shape[-2:] 75 | 76 | y = torch.arange(0, h, dtype=torch.float) 77 | x = torch.arange(0, w, dtype=torch.float) 78 | y, x = torch.meshgrid(y, x) 79 | 80 | x_mask = (masks * x.unsqueeze(0)) 81 | x_max = x_mask.flatten(1).max(-1)[0] 82 | x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] 83 | 84 | y_mask = (masks * y.unsqueeze(0)) 85 | y_max = y_mask.flatten(1).max(-1)[0] 86 | y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] 87 | 88 | return torch.stack([x_min, y_min, x_max, y_max], 1) 89 | -------------------------------------------------------------------------------- /models/position_encoding.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Various positional encodings for the transformer. 4 | """ 5 | import math 6 | import torch 7 | from torch import nn 8 | 9 | from util.misc import NestedTensor 10 | 11 | 12 | class PositionEmbeddingSine(nn.Module): 13 | """ 14 | This is a more standard version of the position embedding, very similar to the one 15 | used by the Attention is all you need paper, generalized to work on images. 16 | """ 17 | def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): 18 | super().__init__() 19 | self.num_pos_feats = num_pos_feats 20 | self.temperature = temperature 21 | self.normalize = normalize 22 | if scale is not None and normalize is False: 23 | raise ValueError("normalize should be True if scale is passed") 24 | if scale is None: 25 | scale = 2 * math.pi 26 | self.scale = scale 27 | 28 | def forward(self, tensor_list: NestedTensor): 29 | x = tensor_list.tensors 30 | mask = tensor_list.mask 31 | assert mask is not None 32 | not_mask = ~mask 33 | y_embed = not_mask.cumsum(1, dtype=torch.float32) 34 | x_embed = not_mask.cumsum(2, dtype=torch.float32) 35 | if self.normalize: 36 | eps = 1e-6 37 | y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale 38 | x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale 39 | 40 | dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) 41 | dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) 42 | 43 | pos_x = x_embed[:, :, :, None] / dim_t 44 | pos_y = y_embed[:, :, :, None] / dim_t 45 | pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) 46 | pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) 47 | pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) 48 | return pos 49 | 50 | 51 | class PositionEmbeddingLearned(nn.Module): 52 | """ 53 | Absolute pos embedding, learned. 54 | """ 55 | def __init__(self, num_pos_feats=256): 56 | super().__init__() 57 | self.row_embed = nn.Embedding(50, num_pos_feats) 58 | self.col_embed = nn.Embedding(50, num_pos_feats) 59 | self.reset_parameters() 60 | 61 | def reset_parameters(self): 62 | nn.init.uniform_(self.row_embed.weight) 63 | nn.init.uniform_(self.col_embed.weight) 64 | 65 | def forward(self, tensor_list: NestedTensor): 66 | x = tensor_list.tensors 67 | h, w = x.shape[-2:] 68 | i = torch.arange(w, device=x.device) 69 | j = torch.arange(h, device=x.device) 70 | x_emb = self.col_embed(i) 71 | y_emb = self.row_embed(j) 72 | pos = torch.cat([ 73 | x_emb.unsqueeze(0).repeat(h, 1, 1), 74 | y_emb.unsqueeze(1).repeat(1, w, 1), 75 | ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) 76 | return pos 77 | 78 | 79 | def build_position_encoding(args): 80 | N_steps = args.hidden_dim // 2 81 | if args.position_embedding in ('v2', 'sine'): 82 | # TODO find a better way of exposing other arguments 83 | position_embedding = PositionEmbeddingSine(N_steps, normalize=True) 84 | elif args.position_embedding in ('v3', 'learned'): 85 | position_embedding = PositionEmbeddingLearned(N_steps) 86 | else: 87 | raise ValueError(f"not supported {args.position_embedding}") 88 | 89 | return position_embedding 90 | -------------------------------------------------------------------------------- /run_with_submitit.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | A script to run multinode training with submitit. 4 | """ 5 | import argparse 6 | import os 7 | import uuid 8 | from pathlib import Path 9 | 10 | import main as detection 11 | import submitit 12 | 13 | 14 | def parse_args(): 15 | detection_parser = detection.get_args_parser() 16 | parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser]) 17 | parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") 18 | parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request") 19 | parser.add_argument("--timeout", default=60, type=int, help="Duration of the job") 20 | parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") 21 | return parser.parse_args() 22 | 23 | 24 | def get_shared_folder() -> Path: 25 | user = os.getenv("USER") 26 | if Path("/checkpoint/").is_dir(): 27 | p = Path(f"/checkpoint/{user}/experiments") 28 | p.mkdir(exist_ok=True) 29 | return p 30 | raise RuntimeError("No shared folder available") 31 | 32 | 33 | def get_init_file(): 34 | # Init file must not exist, but it's parent dir must exist. 35 | os.makedirs(str(get_shared_folder()), exist_ok=True) 36 | init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" 37 | if init_file.exists(): 38 | os.remove(str(init_file)) 39 | return init_file 40 | 41 | 42 | class Trainer(object): 43 | def __init__(self, args): 44 | self.args = args 45 | 46 | def __call__(self): 47 | import main as detection 48 | 49 | self._setup_gpu_args() 50 | detection.main(self.args) 51 | 52 | def checkpoint(self): 53 | import os 54 | import submitit 55 | from pathlib import Path 56 | 57 | self.args.dist_url = get_init_file().as_uri() 58 | checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") 59 | if os.path.exists(checkpoint_file): 60 | self.args.resume = checkpoint_file 61 | print("Requeuing ", self.args) 62 | empty_trainer = type(self)(self.args) 63 | return submitit.helpers.DelayedSubmission(empty_trainer) 64 | 65 | def _setup_gpu_args(self): 66 | import submitit 67 | from pathlib import Path 68 | 69 | job_env = submitit.JobEnvironment() 70 | self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) 71 | self.args.gpu = job_env.local_rank 72 | self.args.rank = job_env.global_rank 73 | self.args.world_size = job_env.num_tasks 74 | print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") 75 | 76 | 77 | def main(): 78 | args = parse_args() 79 | if args.job_dir == "": 80 | args.job_dir = get_shared_folder() / "%j" 81 | 82 | # Note that the folder will depend on the job_id, to easily track experiments 83 | executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) 84 | 85 | # cluster setup is defined by environment variables 86 | num_gpus_per_node = args.ngpus 87 | nodes = args.nodes 88 | timeout_min = args.timeout 89 | 90 | executor.update_parameters( 91 | mem_gb=40 * num_gpus_per_node, 92 | gpus_per_node=num_gpus_per_node, 93 | tasks_per_node=num_gpus_per_node, # one task per GPU 94 | cpus_per_task=10, 95 | nodes=nodes, 96 | timeout_min=timeout_min, # max is 60 * 72 97 | ) 98 | 99 | executor.update_parameters(name="detr") 100 | 101 | args.dist_url = get_init_file().as_uri() 102 | args.output_dir = args.job_dir 103 | 104 | trainer = Trainer(args) 105 | job = executor.submit(trainer) 106 | 107 | print("Submitted job_id:", job.job_id) 108 | 109 | 110 | if __name__ == "__main__": 111 | main() 112 | -------------------------------------------------------------------------------- /test_all.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import unittest 3 | 4 | import torch 5 | 6 | from models.matcher import HungarianMatcher 7 | from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned 8 | from models.backbone import Backbone, Joiner, BackboneBase 9 | from util import box_ops 10 | from util.misc import nested_tensor_from_tensor_list 11 | from hubconf import detr_resnet50, detr_resnet50_panoptic 12 | 13 | 14 | class Tester(unittest.TestCase): 15 | 16 | def test_box_cxcywh_to_xyxy(self): 17 | t = torch.rand(10, 4) 18 | r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t)) 19 | self.assertLess((t - r).abs().max(), 1e-5) 20 | 21 | @staticmethod 22 | def indices_torch2python(indices): 23 | return [(i.tolist(), j.tolist()) for i, j in indices] 24 | 25 | def test_hungarian(self): 26 | n_queries, n_targets, n_classes = 100, 15, 91 27 | logits = torch.rand(1, n_queries, n_classes + 1) 28 | boxes = torch.rand(1, n_queries, 4) 29 | tgt_labels = torch.randint(high=n_classes, size=(n_targets,)) 30 | tgt_boxes = torch.rand(n_targets, 4) 31 | matcher = HungarianMatcher() 32 | targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}] 33 | indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets) 34 | indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1), 35 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2) 36 | self.assertEqual(len(indices_single[0][0]), n_targets) 37 | self.assertEqual(len(indices_single[0][1]), n_targets) 38 | self.assertEqual(self.indices_torch2python(indices_single), 39 | self.indices_torch2python([indices_batched[0]])) 40 | self.assertEqual(self.indices_torch2python(indices_single), 41 | self.indices_torch2python([indices_batched[1]])) 42 | 43 | # test with empty targets 44 | tgt_labels_empty = torch.randint(high=n_classes, size=(0,)) 45 | tgt_boxes_empty = torch.rand(0, 4) 46 | targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}] 47 | indices = matcher({'pred_logits': logits.repeat(2, 1, 1), 48 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty) 49 | self.assertEqual(len(indices[1][0]), 0) 50 | indices = matcher({'pred_logits': logits.repeat(2, 1, 1), 51 | 'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2) 52 | self.assertEqual(len(indices[0][0]), 0) 53 | 54 | def test_position_encoding_script(self): 55 | m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned() 56 | mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa 57 | 58 | def test_backbone_script(self): 59 | backbone = Backbone('resnet50', True, False, False) 60 | torch.jit.script(backbone) # noqa 61 | 62 | def test_model_script_detection(self): 63 | model = detr_resnet50(pretrained=False).eval() 64 | scripted_model = torch.jit.script(model) 65 | x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) 66 | out = model(x) 67 | out_script = scripted_model(x) 68 | self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) 69 | self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) 70 | 71 | def test_model_script_panoptic(self): 72 | model = detr_resnet50_panoptic(pretrained=False).eval() 73 | scripted_model = torch.jit.script(model) 74 | x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) 75 | out = model(x) 76 | out_script = scripted_model(x) 77 | self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) 78 | self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) 79 | self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"])) 80 | 81 | 82 | if __name__ == '__main__': 83 | unittest.main() 84 | -------------------------------------------------------------------------------- /datasets/coco_panoptic.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import json 3 | from pathlib import Path 4 | 5 | import numpy as np 6 | import torch 7 | from PIL import Image 8 | 9 | from panopticapi.utils import rgb2id 10 | from util.box_ops import masks_to_boxes 11 | 12 | from .coco import make_coco_transforms 13 | 14 | 15 | class CocoPanoptic: 16 | def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): 17 | with open(ann_file, 'r') as f: 18 | self.coco = json.load(f) 19 | 20 | # sort 'images' field so that they are aligned with 'annotations' 21 | # i.e., in alphabetical order 22 | self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) 23 | # sanity check 24 | if "annotations" in self.coco: 25 | for img, ann in zip(self.coco['images'], self.coco['annotations']): 26 | assert img['file_name'][:-4] == ann['file_name'][:-4] 27 | 28 | self.img_folder = img_folder 29 | self.ann_folder = ann_folder 30 | self.ann_file = ann_file 31 | self.transforms = transforms 32 | self.return_masks = return_masks 33 | 34 | def __getitem__(self, idx): 35 | ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] 36 | img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') 37 | ann_path = Path(self.ann_folder) / ann_info['file_name'] 38 | 39 | img = Image.open(img_path).convert('RGB') 40 | w, h = img.size 41 | if "segments_info" in ann_info: 42 | masks = np.asarray(Image.open(ann_path), dtype=np.uint32) 43 | masks = rgb2id(masks) 44 | 45 | ids = np.array([ann['id'] for ann in ann_info['segments_info']]) 46 | masks = masks == ids[:, None, None] 47 | 48 | masks = torch.as_tensor(masks, dtype=torch.uint8) 49 | labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) 50 | 51 | target = {} 52 | target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) 53 | if self.return_masks: 54 | target['masks'] = masks 55 | target['labels'] = labels 56 | 57 | target["boxes"] = masks_to_boxes(masks) 58 | 59 | target['size'] = torch.as_tensor([int(h), int(w)]) 60 | target['orig_size'] = torch.as_tensor([int(h), int(w)]) 61 | if "segments_info" in ann_info: 62 | for name in ['iscrowd', 'area']: 63 | target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) 64 | 65 | if self.transforms is not None: 66 | img, target = self.transforms(img, target) 67 | 68 | return img, target 69 | 70 | def __len__(self): 71 | return len(self.coco['images']) 72 | 73 | def get_height_and_width(self, idx): 74 | img_info = self.coco['images'][idx] 75 | height = img_info['height'] 76 | width = img_info['width'] 77 | return height, width 78 | 79 | 80 | def build(image_set, args): 81 | img_folder_root = Path(args.coco_path) 82 | ann_folder_root = Path(args.coco_panoptic_path) 83 | assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' 84 | assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' 85 | mode = 'panoptic' 86 | PATHS = { 87 | "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), 88 | "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), 89 | } 90 | 91 | img_folder, ann_file = PATHS[image_set] 92 | img_folder_path = img_folder_root / img_folder 93 | ann_folder = ann_folder_root / f'{mode}_{img_folder}' 94 | ann_file = ann_folder_root / ann_file 95 | 96 | dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, 97 | transforms=make_coco_transforms(image_set), return_masks=args.masks) 98 | 99 | return dataset 100 | -------------------------------------------------------------------------------- /models/matcher.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Modules to compute the matching cost and solve the corresponding LSAP. 4 | """ 5 | import torch 6 | from scipy.optimize import linear_sum_assignment 7 | from torch import nn 8 | 9 | from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou 10 | 11 | 12 | class HungarianMatcher(nn.Module): 13 | """This class computes an assignment between the targets and the predictions of the network 14 | 15 | For efficiency reasons, the targets don't include the no_object. Because of this, in general, 16 | there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, 17 | while the others are un-matched (and thus treated as non-objects). 18 | """ 19 | 20 | def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): 21 | """Creates the matcher 22 | 23 | Params: 24 | cost_class: This is the relative weight of the classification error in the matching cost 25 | cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost 26 | cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost 27 | """ 28 | super().__init__() 29 | self.cost_class = cost_class 30 | self.cost_bbox = cost_bbox 31 | self.cost_giou = cost_giou 32 | assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" 33 | 34 | @torch.no_grad() 35 | def forward(self, outputs, targets): 36 | """ Performs the matching 37 | 38 | Params: 39 | outputs: This is a dict that contains at least these entries: 40 | "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits 41 | "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates 42 | 43 | targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: 44 | "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth 45 | objects in the target) containing the class labels 46 | "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates 47 | 48 | Returns: 49 | A list of size batch_size, containing tuples of (index_i, index_j) where: 50 | - index_i is the indices of the selected predictions (in order) 51 | - index_j is the indices of the corresponding selected targets (in order) 52 | For each batch element, it holds: 53 | len(index_i) = len(index_j) = min(num_queries, num_target_boxes) 54 | """ 55 | bs, num_queries = outputs["pred_logits"].shape[:2] 56 | 57 | # We flatten to compute the cost matrices in a batch 58 | out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] 59 | out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] 60 | 61 | # Also concat the target labels and boxes 62 | tgt_ids = torch.cat([v["labels"] for v in targets]) 63 | tgt_bbox = torch.cat([v["boxes"] for v in targets]) 64 | 65 | # Compute the classification cost. Contrary to the loss, we don't use the NLL, 66 | # but approximate it in 1 - proba[target class]. 67 | # The 1 is a constant that doesn't change the matching, it can be ommitted. 68 | cost_class = -out_prob[:, tgt_ids] 69 | 70 | # Compute the L1 cost between boxes 71 | cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) 72 | 73 | # Compute the giou cost betwen boxes 74 | cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) 75 | 76 | # Final cost matrix 77 | C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou 78 | C = C.view(bs, num_queries, -1).cpu() 79 | 80 | sizes = [len(v["boxes"]) for v in targets] 81 | indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] 82 | return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] 83 | 84 | 85 | def build_matcher(args): 86 | return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) 87 | -------------------------------------------------------------------------------- /util/plot_utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plotting utilities to visualize training logs. 3 | """ 4 | import torch 5 | import pandas as pd 6 | import seaborn as sns 7 | import matplotlib.pyplot as plt 8 | 9 | from pathlib import Path, PurePath 10 | 11 | 12 | def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): 13 | ''' 14 | Function to plot specific fields from training log(s). Plots both training and test results. 15 | 16 | :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file 17 | - fields = which results to plot from each log file - plots both training and test for each field. 18 | - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots 19 | - log_name = optional, name of log file if different than default 'log.txt'. 20 | 21 | :: Outputs - matplotlib plots of results in fields, color coded for each log file. 22 | - solid lines are training results, dashed lines are test results. 23 | 24 | ''' 25 | func_name = "plot_utils.py::plot_logs" 26 | 27 | # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, 28 | # convert single Path to list to avoid 'not iterable' error 29 | 30 | if not isinstance(logs, list): 31 | if isinstance(logs, PurePath): 32 | logs = [logs] 33 | print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") 34 | else: 35 | raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ 36 | Expect list[Path] or single Path obj, received {type(logs)}") 37 | 38 | # verify valid dir(s) and that every item in list is Path object 39 | for i, dir in enumerate(logs): 40 | if not isinstance(dir, PurePath): 41 | raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") 42 | if dir.exists(): 43 | continue 44 | raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") 45 | 46 | # load log file(s) and plot 47 | dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] 48 | 49 | fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) 50 | 51 | for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): 52 | for j, field in enumerate(fields): 53 | if field == 'mAP': 54 | coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean() 55 | axs[j].plot(coco_eval, c=color) 56 | else: 57 | df.interpolate().ewm(com=ewm_col).mean().plot( 58 | y=[f'train_{field}', f'test_{field}'], 59 | ax=axs[j], 60 | color=[color] * 2, 61 | style=['-', '--'] 62 | ) 63 | for ax, field in zip(axs, fields): 64 | ax.legend([Path(p).name for p in logs]) 65 | ax.set_title(field) 66 | 67 | 68 | def plot_precision_recall(files, naming_scheme='iter'): 69 | if naming_scheme == 'exp_id': 70 | # name becomes exp_id 71 | names = [f.parts[-3] for f in files] 72 | elif naming_scheme == 'iter': 73 | names = [f.stem for f in files] 74 | else: 75 | raise ValueError(f'not supported {naming_scheme}') 76 | fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) 77 | for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): 78 | data = torch.load(f) 79 | # precision is n_iou, n_points, n_cat, n_area, max_det 80 | precision = data['precision'] 81 | recall = data['params'].recThrs 82 | scores = data['scores'] 83 | # take precision for all classes, all areas and 100 detections 84 | precision = precision[0, :, :, 0, -1].mean(1) 85 | scores = scores[0, :, :, 0, -1].mean(1) 86 | prec = precision.mean() 87 | rec = data['recall'][0, :, 0, -1].mean() 88 | print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + 89 | f'score={scores.mean():0.3f}, ' + 90 | f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' 91 | ) 92 | axs[0].plot(recall, precision, c=color) 93 | axs[1].plot(recall, scores, c=color) 94 | 95 | axs[0].set_title('Precision / Recall') 96 | axs[0].legend(names) 97 | axs[1].set_title('Scores / Recall') 98 | axs[1].legend(names) 99 | return fig, axs 100 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Implementation of ICCV 2021 paper: PnP-DETR: Towards Efficient Visual Analysis with Transformers [arxiv](https://arxiv.org/abs/2109.07036) 2 | 3 | **:star::star::star:[News] A Re-implementation is integrated into **detrex**, Benchmarking for Detection Transformers: at https://github.com/IDEA-Research/detrex** 4 | 5 | This repository is based on [detr](https://github.com/facebookresearch/detr) 6 | 7 | Recently, DETR pioneered the solution of vision tasks with transformers, it directly translates the image feature map into the object detection result. Though effective, translating the full feature map can be costly due to redundant computation on some area like the background. In this work, we encapsulate the idea of reducing spatial redundancy into a novel poll and pool (PnP) sampling module, with which we build an end-to-end PnP-DETR architecture that adaptively allocates its computation spatially to be more efficient. Concretely, the PnP module abstracts the image feature map into fine foreground object feature vectors and a small number of coarse background contextual feature vectors. The transformer models information interaction within the fine-coarse feature space and translates the features into the detection result. Moreover, the PnP-augmented model can instantly achieve various desired trade-offs between performance and computation with a single model by varying the sampled feature length, without requiring to train multiple models as existing methods. Thus it offers greater flexibility for deployment in diverse scenarios with varying computation constraint. We further validate the generalizability of the PnP module on panoptic segmentation and the recent transformer-based image recognition model ViT and show consistent efficiency gain. We believe our method makes a step for efficient visual analysis with transformers, wherein spatial redundancy is commonly observed. 8 | 9 | ![PnP-DETR](.github/pnp-detr.png) 10 | 11 | 12 | # Usage 13 | First, clone the repository locally: 14 | ``` 15 | git clone https://github.com/twangnh/pnp-detr 16 | ``` 17 | Then, install PyTorch 1.5+ and torchvision 0.6+: 18 | ``` 19 | conda install -c pytorch pytorch torchvision 20 | ``` 21 | Install pycocotools (for evaluation on COCO) and scipy (for training): 22 | ``` 23 | conda install cython scipy 24 | pip install -U 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI' 25 | ``` 26 | That's it, should be good to train and evaluate detection models. 27 | 28 | (optional) to work with panoptic install panopticapi: 29 | ``` 30 | pip install git+https://github.com/cocodataset/panopticapi.git 31 | ``` 32 | 33 | ## Data preparation 34 | 35 | Download and extract COCO 2017 train and val images with annotations from 36 | [http://cocodataset.org](http://cocodataset.org/#download). 37 | We expect the directory structure to be the following: 38 | ``` 39 | path/to/coco/ 40 | annotations/ # annotation json files 41 | train2017/ # train images 42 | val2017/ # val images 43 | ``` 44 | 45 | ## Training 46 | To train PnP-DETR on a single node with 8 gpus for 300 epochs run: 47 | ``` 48 | python -m torch.distributed.launch --nproc_per_node=8 --use_env main.py --coco_path /path/to/coco 49 | ``` 50 | you can adjust the range of random poll ratio with --sample_ratio_lower_bound and --sample_ratio_higher_bound 51 | 52 | Following DETR, We train PnP-DETR with AdamW setting learning rate in the transformer to 1e-4 and 1e-5 in the backbone. 53 | Horizontal flips, scales an crops are used for augmentation. 54 | Images are rescaled to have min size 800 and max size 1333. 55 | The transformer is trained with dropout of 0.1, and the whole model is trained with grad clip of 0.1. 56 | 57 | 58 | ## Evaluation 59 | To evaluate DETR R50 on COCO val5k with a single GPU run: 60 | ``` 61 | python main.py --batch_size 2 --no_aux_loss --eval --resume xxx --coco_path /path/to/coco --sample_topk_ratio xxx 62 | ``` 63 | 64 | ## Multinode training 65 | Distributed training is available via Slurm and [submitit](https://github.com/facebookincubator/submitit): 66 | ``` 67 | pip install submitit 68 | ``` 69 | Train baseline DETR-6-6 model on 4 nodes for 300 epochs: 70 | ``` 71 | python run_with_submitit.py --timeout 3000 --coco_path /path/to/coco 72 | ``` 73 | 74 | ## Cite 75 | Please consider to cite our paper: 76 | 77 | ``` 78 | @inproceedings{wang2021pnp, 79 | title={PnP-DETR: Towards Efficient Visual Analysis with Transformers}, 80 | author={Wang, Tao and Yuan, Li and Chen, Yunpeng and Feng, Jiashi and Yan, Shuicheng}, 81 | booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, 82 | pages={4661--4670}, 83 | year={2021} 84 | } 85 | ``` 86 | -------------------------------------------------------------------------------- /models/backbone.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Backbone modules. 4 | """ 5 | from collections import OrderedDict 6 | 7 | import torch 8 | import torch.nn.functional as F 9 | import torchvision 10 | from torch import nn 11 | from torchvision.models._utils import IntermediateLayerGetter 12 | from typing import Dict, List 13 | 14 | from util.misc import NestedTensor, is_main_process 15 | 16 | from .position_encoding import build_position_encoding 17 | 18 | 19 | class FrozenBatchNorm2d(torch.nn.Module): 20 | """ 21 | BatchNorm2d where the batch statistics and the affine parameters are fixed. 22 | 23 | Copy-paste from torchvision.misc.ops with added eps before rqsrt, 24 | without which any other models than torchvision.models.resnet[18,34,50,101] 25 | produce nans. 26 | """ 27 | 28 | def __init__(self, n): 29 | super(FrozenBatchNorm2d, self).__init__() 30 | self.register_buffer("weight", torch.ones(n)) 31 | self.register_buffer("bias", torch.zeros(n)) 32 | self.register_buffer("running_mean", torch.zeros(n)) 33 | self.register_buffer("running_var", torch.ones(n)) 34 | 35 | def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, 36 | missing_keys, unexpected_keys, error_msgs): 37 | num_batches_tracked_key = prefix + 'num_batches_tracked' 38 | if num_batches_tracked_key in state_dict: 39 | del state_dict[num_batches_tracked_key] 40 | 41 | super(FrozenBatchNorm2d, self)._load_from_state_dict( 42 | state_dict, prefix, local_metadata, strict, 43 | missing_keys, unexpected_keys, error_msgs) 44 | 45 | def forward(self, x): 46 | # move reshapes to the beginning 47 | # to make it fuser-friendly 48 | w = self.weight.reshape(1, -1, 1, 1) 49 | b = self.bias.reshape(1, -1, 1, 1) 50 | rv = self.running_var.reshape(1, -1, 1, 1) 51 | rm = self.running_mean.reshape(1, -1, 1, 1) 52 | eps = 1e-5 53 | scale = w * (rv + eps).rsqrt() 54 | bias = b - rm * scale 55 | return x * scale + bias 56 | 57 | 58 | class BackboneBase(nn.Module): 59 | 60 | def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): 61 | super().__init__() 62 | for name, parameter in backbone.named_parameters(): 63 | if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: 64 | parameter.requires_grad_(False) 65 | if return_interm_layers: 66 | return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} 67 | else: 68 | return_layers = {'layer4': "0"} 69 | self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) 70 | self.num_channels = num_channels 71 | 72 | def forward(self, tensor_list: NestedTensor): 73 | xs = self.body(tensor_list.tensors) 74 | out: Dict[str, NestedTensor] = {} 75 | for name, x in xs.items(): 76 | m = tensor_list.mask 77 | assert m is not None 78 | mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] 79 | out[name] = NestedTensor(x, mask) 80 | return out 81 | 82 | 83 | class Backbone(BackboneBase): 84 | """ResNet backbone with frozen BatchNorm.""" 85 | def __init__(self, name: str, 86 | train_backbone: bool, 87 | return_interm_layers: bool, 88 | dilation: bool): 89 | backbone = getattr(torchvision.models, name)( 90 | replace_stride_with_dilation=[False, False, dilation], 91 | pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) 92 | num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 93 | super().__init__(backbone, train_backbone, num_channels, return_interm_layers) 94 | 95 | 96 | class Joiner(nn.Sequential): 97 | def __init__(self, backbone, position_embedding): 98 | super().__init__(backbone, position_embedding) 99 | 100 | def forward(self, tensor_list: NestedTensor): 101 | xs = self[0](tensor_list) 102 | out: List[NestedTensor] = [] 103 | pos = [] 104 | for name, x in xs.items(): 105 | out.append(x) 106 | # position encoding 107 | pos.append(self[1](x).to(x.tensors.dtype)) 108 | 109 | return out, pos 110 | 111 | 112 | def build_backbone(args): 113 | position_embedding = build_position_encoding(args) 114 | train_backbone = args.lr_backbone > 0 115 | return_interm_layers = args.masks 116 | backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) 117 | model = Joiner(backbone, position_embedding) 118 | model.num_channels = backbone.num_channels 119 | return model 120 | -------------------------------------------------------------------------------- /models/sampler.py: -------------------------------------------------------------------------------- 1 | class SortSampler(nn.Module): 2 | 3 | def __init__(self, topk_ratio, input_dim, score_pred_net='2layer-fc-256', kproj_net='1layer-fc', unsample_abstract_number=30,pos_embed_kproj=False): 4 | ##topk_ratio : hard sample的比例 5 | ## unsample_abstract_number: soft sample的数量,是一个固定值 6 | super().__init__() 7 | self.topk_ratio = topk_ratio 8 | if score_pred_net == '2layer-fc-256': 9 | self.score_pred_net = nn.Sequential(nn.Linear(input_dim, input_dim), 10 | nn.ReLU(), 11 | nn.Linear(input_dim, 1)) 12 | elif score_pred_net == '2layer-fc-16': 13 | self.score_pred_net = nn.Sequential(nn.Linear(input_dim, 16), 14 | nn.ReLU(), 15 | nn.Linear(16, 1)) 16 | elif score_pred_net == '1layer-fc': 17 | self.score_pred_net = nn.Linear(input_dim, 1) 18 | else: 19 | raise ValueError 20 | 21 | self.norm_feature = nn.LayerNorm(input_dim,elementwise_affine=False) 22 | self.unsample_abstract_number = unsample_abstract_number 23 | if kproj_net == '2layer-fc': 24 | self.k_proj = nn.Sequential(nn.Linear(input_dim, input_dim), 25 | nn.ReLU(), 26 | nn.Linear(input_dim, unsample_abstract_number)) 27 | elif kproj_net == '1layer-fc': 28 | self.k_proj = nn.Linear(input_dim, unsample_abstract_number) 29 | else: 30 | raise ValueError 31 | self.v_proj = nn.Linear(input_dim, input_dim) 32 | self.pos_embed_kproj = pos_embed_kproj 33 | 34 | def forward(self, src, mask, pos_embed): 35 | #pos_embed shape: h*w, 1, c 36 | l, bs ,c = src.shape 37 | if mask==None: 38 | mask = src.new_zeros(bs,l).bool() 39 | pos_embed = pos_embed.repeat(1,bs,1) 40 | sample_weight = self.score_pred_net(src).sigmoid().view(bs,-1) 41 | # sample_weight[mask] = sample_weight[mask].clone() * 0. 42 | # sample_weight.data[mask] = 0. 43 | sample_weight_clone = sample_weight.clone().detach() 44 | sample_weight_clone[mask] = -1. 45 | 46 | ##max sample number: 47 | sample_lens = ((~mask).sum(1)*self.topk_ratio).int() 48 | max_sample_num = sample_lens.max() 49 | mask_topk = torch.arange(max_sample_num).expand(len(sample_lens), max_sample_num).to(sample_lens.device) > (sample_lens-1).unsqueeze(1) 50 | 51 | ## for sampling remaining unsampled points 52 | min_sample_num = sample_lens.min() 53 | 54 | sort_order = sample_weight_clone.sort(descending=True,dim=1)[1] 55 | sort_confidence_topk = sort_order[:,:max_sample_num] 56 | sort_confidence_topk_remaining = sort_order[:,min_sample_num:] 57 | ## flatten for gathering 58 | src = src.flatten(2).permute(2, 0, 1) 59 | src = self.norm_feature(src) 60 | 61 | src_sample_remaining = src.gather(0, sort_confidence_topk_remaining.permute(1, 0)[..., None].expand(-1, -1, c)) 62 | 63 | ## this will maskout the padding and sampled points 64 | mask_unsampled = torch.arange(mask.size(1)).expand(len(sample_lens), mask.size(1)).to(sample_lens.device) < (sample_lens).unsqueeze(1) 65 | mask_unsampled = mask_unsampled | mask.gather(1, sort_order) 66 | mask_unsampled = mask_unsampled[:,min_sample_num:] 67 | 68 | ## abstract the unsampled points with attention 69 | if self.pos_embed_kproj: 70 | pos_embed_sample_remaining = pos_embed.gather(0, sort_confidence_topk_remaining.permute(1, 0)[..., None].expand(-1, -1, c)) 71 | kproj = self.k_proj(src_sample_remaining+pos_embed_sample_remaining) 72 | else: 73 | kproj = self.k_proj(src_sample_remaining) 74 | kproj = kproj.masked_fill( 75 | mask_unsampled.permute(1,0).unsqueeze(2), 76 | float('-inf'), 77 | ).permute(1,2,0).softmax(-1) 78 | abs_unsampled_points = torch.bmm(kproj, self.v_proj(src_sample_remaining).permute(1,0,2)).permute(1,0,2) 79 | abs_unsampled_pos_embed = torch.bmm(kproj, pos_embed.gather(0,sort_confidence_topk_remaining. 80 | permute(1,0)[...,None].expand(-1,-1,c)).permute(1,0,2)).permute(1,0,2) 81 | abs_unsampled_mask = mask.new_zeros(mask.size(0),abs_unsampled_points.size(0)) 82 | 83 | ## reg sample weight to be sparse with l1 loss 84 | sample_reg_loss = sample_weight.gather(1,sort_confidence_topk).mean() 85 | src_sampled = src.gather(0,sort_confidence_topk.permute(1,0)[...,None].expand(-1,-1,c)) *sample_weight.gather(1,sort_confidence_topk).permute(1,0).unsqueeze(-1) 86 | pos_embed_sampled = pos_embed.gather(0,sort_confidence_topk.permute(1,0)[...,None].expand(-1,-1,c)) 87 | mask_sampled = mask_topk 88 | 89 | src = torch.cat([src_sampled, abs_unsampled_points]) 90 | pos_embed = torch.cat([pos_embed_sampled,abs_unsampled_pos_embed]) 91 | mask = torch.cat([mask_sampled, abs_unsampled_mask],dim=1) 92 | assert ((~mask).sum(1)==sample_lens+self.unsample_abstract_number).all() 93 | return src, sample_reg_loss, sort_confidence_topk, mask, pos_embed -------------------------------------------------------------------------------- /datasets/sample_coco.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import json 3 | import os 4 | from collections import defaultdict 5 | 6 | from random import sample 7 | # import random 8 | import numpy as np 9 | import tqdm 10 | 11 | import matplotlib.pyplot as plt 12 | if __name__ == "__main__": 13 | ann_file = './data/coco/annotations/instances_train2017.json' 14 | PER_CAT_THR = 1000 15 | output_filename = './data/coco/annotations/instances_train2017_sampled_PER_CAT_THR_{}.json'.format(PER_CAT_THR) 16 | 17 | with open(ann_file, "r") as f: 18 | dataset = json.load(f) 19 | 20 | catToImgs = defaultdict(list) 21 | for ann in dataset['annotations']: 22 | catToImgs[ann['category_id']].append(ann['image_id']) 23 | 24 | ## remove duplicate imgs 25 | for cat_id in catToImgs.keys(): 26 | catToImgs[cat_id] = list(set(catToImgs[cat_id])) 27 | 28 | per_cat_img_number = [len(catToImgs[cat_id]) for cat_id in catToImgs.keys()] 29 | sorting_order_imgnumber = np.argsort(per_cat_img_number).tolist() 30 | sorted_catid = [list(catToImgs.keys())[i] for i in sorting_order_imgnumber] 31 | 32 | catToImgs_list = [{cat_id:catToImgs[cat_id]}for cat_id in catToImgs.keys()] 33 | catToImgs_sampled = copy.deepcopy(catToImgs) 34 | sampled_img_ids = [] 35 | 36 | for cat_id in tqdm.tqdm(sorted_catid):## starting from cat with least imgs 37 | if len(catToImgs[cat_id])>PER_CAT_THR: # only sample categories with more than 2000 training imgs 38 | in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id in sampled_img_ids] 39 | not_in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id not in sampled_img_ids] 40 | 41 | catToImgs_sampled[cat_id] = in_sampled + sample(not_in_sampled, PER_CAT_THR-len(in_sampled)) if len(in_sampled)PER_CAT_THR: # only sample categories with more than 2000 training imgs 51 | # in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id in sampled_img_ids] 52 | # not_in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id not in sampled_img_ids] 53 | # 54 | # catToImgs_sampled_2000[cat_id] = in_sampled + sample(not_in_sampled, PER_CAT_THR-len(in_sampled)) if len(in_sampled)PER_CAT_THR: # only sample categories with more than 2000 training imgs 65 | # in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id in sampled_img_ids] 66 | # not_in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id not in sampled_img_ids] 67 | # 68 | # catToImgs_sampled_1000[cat_id] = in_sampled + sample(not_in_sampled, PER_CAT_THR-len(in_sampled)) if len(in_sampled)PER_CAT_THR: # only sample categories with more than 2000 training imgs 79 | # in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id in sampled_img_ids] 80 | # not_in_sampled = [img_id for img_id in catToImgs[cat_id] if img_id not in sampled_img_ids] 81 | # 82 | # catToImgs_sampled_500[cat_id] = in_sampled + sample(not_in_sampled, PER_CAT_THR-len(in_sampled)) if len(in_sampled) boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) 87 | boxes = boxes[keep] 88 | classes = classes[keep] 89 | if self.return_masks: 90 | masks = masks[keep] 91 | if keypoints is not None: 92 | keypoints = keypoints[keep] 93 | 94 | target = {} 95 | target["boxes"] = boxes 96 | target["labels"] = classes 97 | if self.return_masks: 98 | target["masks"] = masks 99 | target["image_id"] = image_id 100 | if keypoints is not None: 101 | target["keypoints"] = keypoints 102 | 103 | # for conversion to coco api 104 | area = torch.tensor([obj["area"] for obj in anno]) 105 | iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) 106 | target["area"] = area[keep] 107 | target["iscrowd"] = iscrowd[keep] 108 | 109 | target["orig_size"] = torch.as_tensor([int(h), int(w)]) 110 | target["size"] = torch.as_tensor([int(h), int(w)]) 111 | 112 | return image, target 113 | 114 | 115 | def make_coco_transforms(image_set): 116 | 117 | normalize = T.Compose([ 118 | T.ToTensor(), 119 | T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 120 | ]) 121 | 122 | scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] 123 | 124 | if image_set == 'train' or 'sampled_PER_CAT_THR' in image_set: 125 | return T.Compose([ 126 | T.RandomHorizontalFlip(), 127 | T.RandomSelect( 128 | T.RandomResize(scales, max_size=1333), 129 | T.Compose([ 130 | T.RandomResize([400, 500, 600]), 131 | T.RandomSizeCrop(384, 600), 132 | T.RandomResize(scales, max_size=1333), 133 | ]) 134 | ), 135 | normalize, 136 | ]) 137 | 138 | if image_set == 'val': 139 | return T.Compose([ 140 | T.RandomResize([800], max_size=1333), 141 | normalize, 142 | ]) 143 | 144 | raise ValueError(f'unknown {image_set}') 145 | 146 | 147 | def build(image_set, args): 148 | root = Path(args.coco_path) 149 | assert root.exists(), f'provided COCO path {root} does not exist' 150 | mode = 'instances' 151 | PATHS = { 152 | "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), 153 | "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), 154 | "train_sampled_PER_CAT_THR_500": (root / "train2017", root / "annotations" / f'{mode}_train2017_sampled_PER_CAT_THR_500.json'), 155 | "train_sampled_PER_CAT_THR_1000": ( 156 | root / "train2017", root / "annotations" / f'{mode}_train2017_sampled_PER_CAT_THR_1000.json'), 157 | "train_sampled_PER_CAT_THR_2000": ( 158 | root / "train2017", root / "annotations" / f'{mode}_train2017_sampled_PER_CAT_THR_2000.json') 159 | } 160 | 161 | img_folder, ann_file = PATHS[image_set] 162 | dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) 163 | return dataset 164 | -------------------------------------------------------------------------------- /hubconf.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import torch 3 | 4 | from models.backbone import Backbone, Joiner 5 | from models.detr import DETR, PostProcess 6 | from models.position_encoding import PositionEmbeddingSine 7 | from models.segmentation import DETRsegm, PostProcessPanoptic 8 | from models.transformer import Transformer 9 | 10 | dependencies = ["torch", "torchvision"] 11 | 12 | 13 | def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): 14 | hidden_dim = 256 15 | backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) 16 | pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) 17 | backbone_with_pos_enc = Joiner(backbone, pos_enc) 18 | backbone_with_pos_enc.num_channels = backbone.num_channels 19 | transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) 20 | detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) 21 | if mask: 22 | return DETRsegm(detr) 23 | return detr 24 | 25 | 26 | def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): 27 | """ 28 | DETR R50 with 6 encoder and 6 decoder layers. 29 | 30 | Achieves 42/62.4 AP/AP50 on COCO val5k. 31 | """ 32 | model = _make_detr("resnet50", dilation=False, num_classes=num_classes) 33 | if pretrained: 34 | checkpoint = torch.hub.load_state_dict_from_url( 35 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True 36 | ) 37 | model.load_state_dict(checkpoint["model"]) 38 | if return_postprocessor: 39 | return model, PostProcess() 40 | return model 41 | 42 | 43 | def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): 44 | """ 45 | DETR-DC5 R50 with 6 encoder and 6 decoder layers. 46 | 47 | The last block of ResNet-50 has dilation to increase 48 | output resolution. 49 | Achieves 43.3/63.1 AP/AP50 on COCO val5k. 50 | """ 51 | model = _make_detr("resnet50", dilation=True, num_classes=num_classes) 52 | if pretrained: 53 | checkpoint = torch.hub.load_state_dict_from_url( 54 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True 55 | ) 56 | model.load_state_dict(checkpoint["model"]) 57 | if return_postprocessor: 58 | return model, PostProcess() 59 | return model 60 | 61 | 62 | def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): 63 | """ 64 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 65 | 66 | Achieves 43.5/63.8 AP/AP50 on COCO val5k. 67 | """ 68 | model = _make_detr("resnet101", dilation=False, num_classes=num_classes) 69 | if pretrained: 70 | checkpoint = torch.hub.load_state_dict_from_url( 71 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True 72 | ) 73 | model.load_state_dict(checkpoint["model"]) 74 | if return_postprocessor: 75 | return model, PostProcess() 76 | return model 77 | 78 | 79 | def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): 80 | """ 81 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 82 | 83 | The last block of ResNet-101 has dilation to increase 84 | output resolution. 85 | Achieves 44.9/64.7 AP/AP50 on COCO val5k. 86 | """ 87 | model = _make_detr("resnet101", dilation=True, num_classes=num_classes) 88 | if pretrained: 89 | checkpoint = torch.hub.load_state_dict_from_url( 90 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True 91 | ) 92 | model.load_state_dict(checkpoint["model"]) 93 | if return_postprocessor: 94 | return model, PostProcess() 95 | return model 96 | 97 | 98 | def detr_resnet50_panoptic( 99 | pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False 100 | ): 101 | """ 102 | DETR R50 with 6 encoder and 6 decoder layers. 103 | Achieves 43.4 PQ on COCO val5k. 104 | 105 | threshold is the minimum confidence required for keeping segments in the prediction 106 | """ 107 | model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) 108 | is_thing_map = {i: i <= 90 for i in range(250)} 109 | if pretrained: 110 | checkpoint = torch.hub.load_state_dict_from_url( 111 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", 112 | map_location="cpu", 113 | check_hash=True, 114 | ) 115 | model.load_state_dict(checkpoint["model"]) 116 | if return_postprocessor: 117 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 118 | return model 119 | 120 | 121 | def detr_resnet50_dc5_panoptic( 122 | pretrained=False, num_classes=91, threshold=0.85, return_postprocessor=False 123 | ): 124 | """ 125 | DETR-DC5 R50 with 6 encoder and 6 decoder layers. 126 | 127 | The last block of ResNet-50 has dilation to increase 128 | output resolution. 129 | Achieves 44.6 on COCO val5k. 130 | 131 | threshold is the minimum confidence required for keeping segments in the prediction 132 | """ 133 | model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) 134 | is_thing_map = {i: i <= 90 for i in range(250)} 135 | if pretrained: 136 | checkpoint = torch.hub.load_state_dict_from_url( 137 | url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", 138 | map_location="cpu", 139 | check_hash=True, 140 | ) 141 | model.load_state_dict(checkpoint["model"]) 142 | if return_postprocessor: 143 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 144 | return model 145 | 146 | 147 | def detr_resnet101_panoptic( 148 | pretrained=False, num_classes=91, threshold=0.85, return_postprocessor=False 149 | ): 150 | """ 151 | DETR-DC5 R101 with 6 encoder and 6 decoder layers. 152 | 153 | Achieves 45.1 PQ on COCO val5k. 154 | 155 | threshold is the minimum confidence required for keeping segments in the prediction 156 | """ 157 | model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) 158 | is_thing_map = {i: i <= 90 for i in range(250)} 159 | if pretrained: 160 | checkpoint = torch.hub.load_state_dict_from_url( 161 | url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", 162 | map_location="cpu", 163 | check_hash=True, 164 | ) 165 | model.load_state_dict(checkpoint["model"]) 166 | if return_postprocessor: 167 | return model, PostProcessPanoptic(is_thing_map, threshold=threshold) 168 | return model 169 | -------------------------------------------------------------------------------- /engine.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Train and eval functions used in main.py 4 | """ 5 | import math 6 | import os 7 | import sys 8 | from typing import Iterable 9 | 10 | import torch 11 | 12 | import util.misc as utils 13 | from datasets.coco_eval import CocoEvaluator 14 | from datasets.panoptic_eval import PanopticEvaluator 15 | 16 | import random 17 | 18 | def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, 19 | data_loader: Iterable, optimizer: torch.optim.Optimizer, 20 | device: torch.device, epoch: int, sample_ratio_lower_bound, sample_ratio_higher_bound, max_norm: float = 0): 21 | model.train() 22 | criterion.train() 23 | metric_logger = utils.MetricLogger(delimiter=" ") 24 | metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) 25 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 26 | header = 'Epoch: [{}]'.format(epoch) 27 | print_freq = 100 28 | 29 | for samples, targets in metric_logger.log_every(data_loader, print_freq, header): 30 | samples = samples.to(device) 31 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 32 | sample_ratio = random.uniform(sample_ratio_lower_bound, sample_ratio_higher_bound) 33 | outputs = model(samples,sample_ratio) 34 | loss_dict = criterion(outputs, targets) 35 | loss_dict['sample_reg_loss']=outputs['sample_reg_loss'] 36 | weight_dict = criterion.weight_dict 37 | losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) 38 | # reduce losses over all GPUs for logging purposes 39 | loss_dict_reduced = utils.reduce_dict(loss_dict) 40 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 41 | for k, v in loss_dict_reduced.items()} 42 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 43 | for k, v in loss_dict_reduced.items() if k in weight_dict} 44 | losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) 45 | 46 | loss_value = losses_reduced_scaled.item() 47 | 48 | if not math.isfinite(loss_value): 49 | print("Loss is {}, stopping training".format(loss_value)) 50 | print(loss_dict_reduced) 51 | sys.exit(1) 52 | 53 | optimizer.zero_grad() 54 | losses.backward() 55 | if max_norm > 0: 56 | torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) 57 | optimizer.step() 58 | 59 | metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) 60 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 61 | metric_logger.update(lr=optimizer.param_groups[0]["lr"]) 62 | # gather the stats from all processes 63 | metric_logger.synchronize_between_processes() 64 | print("Averaged stats:", metric_logger) 65 | return {k: meter.global_avg for k, meter in metric_logger.meters.items()} 66 | 67 | 68 | @torch.no_grad() 69 | def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir, sample_ratio): 70 | model.eval() 71 | criterion.eval() 72 | 73 | metric_logger = utils.MetricLogger(delimiter=" ") 74 | metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) 75 | header = 'Test:' 76 | 77 | iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) 78 | coco_evaluator = CocoEvaluator(base_ds, iou_types) 79 | # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] 80 | 81 | panoptic_evaluator = None 82 | if 'panoptic' in postprocessors.keys(): 83 | panoptic_evaluator = PanopticEvaluator( 84 | data_loader.dataset.ann_file, 85 | data_loader.dataset.ann_folder, 86 | output_dir=os.path.join(output_dir, "panoptic_eval"), 87 | ) 88 | 89 | for samples, targets in metric_logger.log_every(data_loader, 10, header): 90 | samples = samples.to(device) 91 | targets = [{k: v.to(device) for k, v in t.items()} for t in targets] 92 | 93 | outputs = model(samples, sample_ratio) 94 | loss_dict = criterion(outputs, targets) 95 | weight_dict = criterion.weight_dict 96 | 97 | # reduce losses over all GPUs for logging purposes 98 | loss_dict_reduced = utils.reduce_dict(loss_dict) 99 | loss_dict_reduced_scaled = {k: v * weight_dict[k] 100 | for k, v in loss_dict_reduced.items() if k in weight_dict} 101 | loss_dict_reduced_unscaled = {f'{k}_unscaled': v 102 | for k, v in loss_dict_reduced.items()} 103 | metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), 104 | **loss_dict_reduced_scaled, 105 | **loss_dict_reduced_unscaled) 106 | metric_logger.update(class_error=loss_dict_reduced['class_error']) 107 | 108 | orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) 109 | results = postprocessors['bbox'](outputs, orig_target_sizes) 110 | if 'segm' in postprocessors.keys(): 111 | target_sizes = torch.stack([t["size"] for t in targets], dim=0) 112 | results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) 113 | res = {target['image_id'].item(): output for target, output in zip(targets, results)} 114 | if coco_evaluator is not None: 115 | coco_evaluator.update(res) 116 | 117 | if panoptic_evaluator is not None: 118 | res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) 119 | for i, target in enumerate(targets): 120 | image_id = target["image_id"].item() 121 | file_name = f"{image_id:012d}.png" 122 | res_pano[i]["image_id"] = image_id 123 | res_pano[i]["file_name"] = file_name 124 | 125 | panoptic_evaluator.update(res_pano) 126 | 127 | # gather the stats from all processes 128 | metric_logger.synchronize_between_processes() 129 | print("Averaged stats:", metric_logger) 130 | if coco_evaluator is not None: 131 | coco_evaluator.synchronize_between_processes() 132 | if panoptic_evaluator is not None: 133 | panoptic_evaluator.synchronize_between_processes() 134 | 135 | # accumulate predictions from all images 136 | if coco_evaluator is not None: 137 | coco_evaluator.accumulate() 138 | coco_evaluator.summarize() 139 | panoptic_res = None 140 | if panoptic_evaluator is not None: 141 | panoptic_res = panoptic_evaluator.summarize() 142 | stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} 143 | if coco_evaluator is not None: 144 | if 'bbox' in postprocessors.keys(): 145 | stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() 146 | if 'segm' in postprocessors.keys(): 147 | stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() 148 | if panoptic_res is not None: 149 | stats['PQ_all'] = panoptic_res["All"] 150 | stats['PQ_th'] = panoptic_res["Things"] 151 | stats['PQ_st'] = panoptic_res["Stuff"] 152 | return stats, coco_evaluator 153 | -------------------------------------------------------------------------------- /analyze_grad.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """autograd_tutorial.ipynb 3 | 4 | Automatically generated by Colaboratory. 5 | 6 | Original file is located at 7 | https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/009cea8b0f40dfcb55e3280f73b06cc2/autograd_tutorial.ipynb 8 | """ 9 | 10 | # Commented out IPython magic to ensure Python compatibility. 11 | # %matplotlib inline 12 | 13 | 14 | 15 | """Autograd: Automatic Differentiation 16 | =================================== 17 | 18 | Central to all neural networks in PyTorch is the ``autograd`` package. 19 | Let’s first briefly visit this, and we will then go to training our 20 | first neural network. 21 | 22 | 23 | The ``autograd`` package provides automatic differentiation for all operations 24 | on Tensors. It is a define-by-run framework, which means that your backprop is 25 | defined by how your code is run, and that every single iteration can be 26 | different. 27 | 28 | Let us see this in more simple terms with some examples. 29 | 30 | Tensor 31 | -------- 32 | 33 | ``torch.Tensor`` is the central class of the package. If you set its attribute 34 | ``.requires_grad`` as ``True``, it starts to track all operations on it. When 35 | you finish your computation you can call ``.backward()`` and have all the 36 | gradients computed automatically. The gradient for this tensor will be 37 | accumulated into ``.grad`` attribute. 38 | 39 | To stop a tensor from tracking history, you can call ``.detach()`` to detach 40 | it from the computation history, and to prevent future computation from being 41 | tracked. 42 | 43 | To prevent tracking history (and using memory), you can also wrap the code block 44 | in ``with torch.no_grad():``. This can be particularly helpful when evaluating a 45 | model because the model may have trainable parameters with 46 | ``requires_grad=True``, but for which we don't need the gradients. 47 | 48 | There’s one more class which is very important for autograd 49 | implementation - a ``Function``. 50 | 51 | ``Tensor`` and ``Function`` are interconnected and build up an acyclic 52 | graph, that encodes a complete history of computation. Each tensor has 53 | a ``.grad_fn`` attribute that references a ``Function`` that has created 54 | the ``Tensor`` (except for Tensors created by the user - their 55 | ``grad_fn is None``). 56 | 57 | If you want to compute the derivatives, you can call ``.backward()`` on 58 | a ``Tensor``. If ``Tensor`` is a scalar (i.e. it holds a one element 59 | data), you don’t need to specify any arguments to ``backward()``, 60 | however if it has more elements, you need to specify a ``gradient`` 61 | argument that is a tensor of matching shape. 62 | """ 63 | 64 | import torch 65 | torch.manual_seed(123) 66 | torch.cuda.manual_seed(123) 67 | """Create a tensor and set ``requires_grad=True`` to track computation with it""" 68 | 69 | x = torch.ones(2, 2, requires_grad=True) 70 | print(x) 71 | 72 | """Do a tensor operation:""" 73 | 74 | y = x + 2 75 | print(y) 76 | 77 | """``y`` was created as a result of an operation, so it has a ``grad_fn``.""" 78 | 79 | print(y.grad_fn) 80 | 81 | """Do more operations on ``y``""" 82 | 83 | z = y * y * 3 84 | out = z.mean() 85 | 86 | print(z, out) 87 | 88 | """``.requires_grad_( ... )`` changes an existing Tensor's ``requires_grad`` 89 | flag in-place. The input flag defaults to ``False`` if not given. 90 | """ 91 | 92 | a = torch.randn(2, 2) 93 | a = ((a * 3) / (a - 1)) 94 | print(a.requires_grad) 95 | a.requires_grad_(True) 96 | print(a.requires_grad) 97 | b = (a * a).sum() 98 | print(b.grad_fn) 99 | 100 | """Gradients 101 | --------- 102 | Let's backprop now. 103 | Because ``out`` contains a single scalar, ``out.backward()`` is 104 | equivalent to ``out.backward(torch.tensor(1.))``. 105 | """ 106 | 107 | def set_grad(var): 108 | def hook(grad): 109 | var.grad = grad 110 | return hook 111 | 112 | import matplotlib.pyplot as plt 113 | q = k = torch.randn(500, 256, requires_grad=True) 114 | w_q = torch.randn(256, 256) 115 | w_k = torch.randn(256, 256) 116 | q_proj = torch.matmul(q, w_q) 117 | k_proj = torch.matmul(k, w_k) 118 | q_proj = q_proj.softmax(-1) 119 | k_proj = k_proj.transpose(1,0).softmax(-1) 120 | q_proj.register_hook(set_grad(q_proj)) 121 | k_proj.register_hook(set_grad(k_proj)) 122 | double_attn = torch.matmul(q_proj, k_proj) 123 | double_attn.backward(torch.eye(500)) 124 | plt.imshow(double_attn.cpu().detach()) 125 | plt.show() 126 | plt.imshow(q.grad) 127 | plt.show() 128 | 129 | q_proj = torch.matmul(q, w_q) 130 | k_proj = torch.matmul(k, w_k) 131 | q_proj.register_hook(set_grad(q_proj)) 132 | k_proj.register_hook(set_grad(k_proj)) 133 | self_attn = (torch.matmul(q_proj, k_proj.transpose(1,0))/256**0.5).softmax(-1) 134 | self_attn.backward(torch.ones(500, 500)) 135 | plt.imshow(self_attn.cpu().detach()) 136 | plt.show() 137 | plt.imshow(q.grad) 138 | plt.show() 139 | 140 | """Print gradients d(out)/dx""" 141 | 142 | print(x.grad) 143 | 144 | """You should have got a matrix of ``4.5``. Let’s call the ``out`` 145 | *Tensor* “$o$”. 146 | We have that $o = \frac{1}{4}\sum_i z_i$, 147 | $z_i = 3(x_i+2)^2$ and $z_i\bigr\rvert_{x_i=1} = 27$. 148 | Therefore, 149 | $\frac{\partial o}{\partial x_i} = \frac{3}{2}(x_i+2)$, hence 150 | $\frac{\partial o}{\partial x_i}\bigr\rvert_{x_i=1} = \frac{9}{2} = 4.5$. 151 | 152 | Mathematically, if you have a vector valued function $\vec{y}=f(\vec{x})$, 153 | then the gradient of $\vec{y}$ with respect to $\vec{x}$ 154 | is a Jacobian matrix: 155 | 156 | \begin{align}J=\left(\begin{array}{ccc} 157 | \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{1}}{\partial x_{n}}\\ 158 | \vdots & \ddots & \vdots\\ 159 | \frac{\partial y_{m}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} 160 | \end{array}\right)\end{align} 161 | 162 | Generally speaking, ``torch.autograd`` is an engine for computing 163 | vector-Jacobian product. That is, given any vector 164 | $v=\left(\begin{array}{cccc} v_{1} & v_{2} & \cdots & v_{m}\end{array}\right)^{T}$, 165 | compute the product $v^{T}\cdot J$. If $v$ happens to be 166 | the gradient of a scalar function $l=g\left(\vec{y}\right)$, 167 | that is, 168 | $v=\left(\begin{array}{ccc}\frac{\partial l}{\partial y_{1}} & \cdots & \frac{\partial l}{\partial y_{m}}\end{array}\right)^{T}$, 169 | then by the chain rule, the vector-Jacobian product would be the 170 | gradient of $l$ with respect to $\vec{x}$: 171 | 172 | \begin{align}J^{T}\cdot v=\left(\begin{array}{ccc} 173 | \frac{\partial y_{1}}{\partial x_{1}} & \cdots & \frac{\partial y_{m}}{\partial x_{1}}\\ 174 | \vdots & \ddots & \vdots\\ 175 | \frac{\partial y_{1}}{\partial x_{n}} & \cdots & \frac{\partial y_{m}}{\partial x_{n}} 176 | \end{array}\right)\left(\begin{array}{c} 177 | \frac{\partial l}{\partial y_{1}}\\ 178 | \vdots\\ 179 | \frac{\partial l}{\partial y_{m}} 180 | \end{array}\right)=\left(\begin{array}{c} 181 | \frac{\partial l}{\partial x_{1}}\\ 182 | \vdots\\ 183 | \frac{\partial l}{\partial x_{n}} 184 | \end{array}\right)\end{align} 185 | 186 | (Note that $v^{T}\cdot J$ gives a row vector which can be 187 | treated as a column vector by taking $J^{T}\cdot v$.) 188 | 189 | This characteristic of vector-Jacobian product makes it very 190 | convenient to feed external gradients into a model that has 191 | non-scalar output. 192 | 193 | Now let's take a look at an example of vector-Jacobian product: 194 | """ 195 | 196 | x = torch.randn(3, requires_grad=True) 197 | 198 | y = x * 2 199 | while y.data.norm() < 1000: 200 | y = y * 2 201 | 202 | print(y) 203 | 204 | """Now in this case ``y`` is no longer a scalar. ``torch.autograd`` 205 | could not compute the full Jacobian directly, but if we just 206 | want the vector-Jacobian product, simply pass the vector to 207 | ``backward`` as argument: 208 | """ 209 | 210 | v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float) 211 | y.backward(v) 212 | 213 | print(x.grad) 214 | 215 | """You can also stop autograd from tracking history on Tensors 216 | with ``.requires_grad=True`` either by wrapping the code block in 217 | ``with torch.no_grad():`` 218 | """ 219 | 220 | print(x.requires_grad) 221 | print((x ** 2).requires_grad) 222 | 223 | with torch.no_grad(): 224 | print((x ** 2).requires_grad) 225 | 226 | """Or by using ``.detach()`` to get a new Tensor with the same 227 | content but that does not require gradients: 228 | """ 229 | 230 | print(x.requires_grad) 231 | y = x.detach() 232 | print(y.requires_grad) 233 | print(x.eq(y).all()) 234 | 235 | """**Read Later:** 236 | 237 | Document about ``autograd.Function`` is at 238 | https://pytorch.org/docs/stable/autograd.html#function 239 | """ -------------------------------------------------------------------------------- /compute_flops.py: -------------------------------------------------------------------------------- 1 | # this is the main entrypoint 2 | # as we describe in the paper, we compute the flops over the first 100 images 3 | # on COCO val2017, and report the average result 4 | import torch 5 | import time 6 | import torchvision 7 | import argparse 8 | 9 | import numpy as np 10 | import tqdm 11 | 12 | from models import build_model 13 | from datasets import build_dataset 14 | 15 | from flop_count import flop_count 16 | 17 | 18 | def get_args_parser(): 19 | parser = argparse.ArgumentParser('Set transformer detector', add_help=False) 20 | parser.add_argument('--lr', default=1e-4, type=float) 21 | parser.add_argument('--lr_backbone', default=1e-5, type=float) 22 | parser.add_argument('--batch_size', default=2, type=int) 23 | parser.add_argument('--weight_decay', default=1e-4, type=float) 24 | parser.add_argument('--epochs', default=300, type=int) 25 | parser.add_argument('--lr_drop', default=200, type=int) 26 | parser.add_argument('--clip_max_norm', default=0.1, type=float, 27 | help='gradient clipping max norm') 28 | 29 | # Model parameters 30 | parser.add_argument('--frozen_weights', type=str, default=None, 31 | help="Path to the pretrained model. If set, only the mask head will be trained") 32 | # * Backbone 33 | parser.add_argument('--backbone', default='resnet50', type=str, 34 | help="Name of the convolutional backbone to use") 35 | parser.add_argument('--dilation', action='store_true', 36 | help="If true, we replace stride with dilation in the last convolutional block (DC5)") 37 | parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), 38 | help="Type of positional embedding to use on top of the image features") 39 | 40 | # * Transformer 41 | parser.add_argument('--enc_layers', default=6, type=int, 42 | help="Number of encoding layers in the transformer") 43 | parser.add_argument('--dec_layers', default=6, type=int, 44 | help="Number of decoding layers in the transformer") 45 | parser.add_argument('--dim_feedforward', default=2048, type=int, 46 | help="Intermediate size of the feedforward layers in the transformer blocks") 47 | parser.add_argument('--hidden_dim', default=256, type=int, 48 | help="Size of the embeddings (dimension of the transformer)") 49 | parser.add_argument('--dropout', default=0.1, type=float, 50 | help="Dropout applied in the transformer") 51 | parser.add_argument('--nheads', default=8, type=int, 52 | help="Number of attention heads inside the transformer's attentions") 53 | parser.add_argument('--num_queries', default=100, type=int, 54 | help="Number of query slots") 55 | parser.add_argument('--pre_norm', action='store_true') 56 | 57 | # * Segmentation 58 | parser.add_argument('--masks', action='store_true', 59 | help="Train segmentation head if the flag is provided") 60 | 61 | # Loss 62 | parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', 63 | help="Disables auxiliary decoding losses (loss at each layer)") 64 | # * Matcher 65 | parser.add_argument('--set_cost_class', default=1, type=float, 66 | help="Class coefficient in the matching cost") 67 | parser.add_argument('--set_cost_bbox', default=5, type=float, 68 | help="L1 box coefficient in the matching cost") 69 | parser.add_argument('--set_cost_giou', default=2, type=float, 70 | help="giou box coefficient in the matching cost") 71 | # * Loss coefficients 72 | parser.add_argument('--mask_loss_coef', default=1, type=float) 73 | parser.add_argument('--dice_loss_coef', default=1, type=float) 74 | parser.add_argument('--bbox_loss_coef', default=5, type=float) 75 | parser.add_argument('--giou_loss_coef', default=2, type=float) 76 | parser.add_argument('--eos_coef', default=0.1, type=float, 77 | help="Relative classification weight of the no-object class") 78 | 79 | # dataset parameters 80 | parser.add_argument('--train_image_set', default='train')## add for train on sampled set, train_sampled_PER_CAT_THR_500, ... 81 | parser.add_argument('--dataset_file', default='coco') 82 | parser.add_argument('--coco_path', type=str) 83 | parser.add_argument('--coco_panoptic_path', type=str) 84 | parser.add_argument('--remove_difficult', action='store_true') 85 | 86 | parser.add_argument('--output_dir', default='', 87 | help='path where to save, empty for no saving') 88 | parser.add_argument('--device', default='cuda', 89 | help='device to use for training / testing') 90 | parser.add_argument('--seed', default=42, type=int) 91 | parser.add_argument('--resume', default='', help='resume from checkpoint') 92 | parser.add_argument('--start_epoch', default=0, type=int, metavar='N', 93 | help='start epoch') 94 | parser.add_argument('--eval', action='store_true') 95 | parser.add_argument('--num_workers', default=2, type=int) 96 | 97 | # distributed training parameters 98 | parser.add_argument('--world_size', default=1, type=int, 99 | help='number of distributed processes') 100 | parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') 101 | parser.add_argument('--sample_reg_loss', default=1e-4, type=float, 102 | help="sample_reg_loss") 103 | parser.add_argument('--sample_topk_ratio', default=1/3., type=float) 104 | parser.add_argument('--score_pred_net', type=str, default='2layer-fc-256') 105 | parser.add_argument('--unsample_abstract_number', default=100, type=int, 106 | help='unsample_abstract_number') 107 | parser.add_argument('--pos_embed_kproj', action='store_true', 108 | help="add pos embeding for predicting unsampled aggregation attention") 109 | parser.add_argument('--sampler_lr_drop_epoch', default=1e5, type=int, 110 | help='default is not drop') 111 | parser.add_argument('--reshape_param_group', action='store_true', 112 | help="reshape_param_group of loaded state_dict to match with the 3 group setting") 113 | parser.add_argument('--notload_lr_scheduler', action='store_true', 114 | help="notload_lr_scheduler") 115 | return parser 116 | 117 | def get_dataset(coco_path): 118 | """ 119 | Gets the COCO dataset used for computing the flops on 120 | """ 121 | class DummyArgs: 122 | pass 123 | args = DummyArgs() 124 | args.dataset_file = "coco" 125 | args.coco_path = coco_path 126 | args.masks = False 127 | dataset = build_dataset(image_set='val', args=args) 128 | return dataset 129 | 130 | 131 | def warmup(model, inputs, N=10): 132 | for i in range(N): 133 | out = model(inputs) 134 | torch.cuda.synchronize() 135 | 136 | 137 | def measure_time(model, inputs, N=10): 138 | warmup(model, inputs) 139 | s = time.time() 140 | for i in range(N): 141 | out = model(inputs) 142 | torch.cuda.synchronize() 143 | t = (time.time() - s) / N 144 | return t 145 | 146 | 147 | def fmt_res(data): 148 | return data.mean(), data.std(), data.min(), data.max() 149 | 150 | 151 | # get the first 100 images of COCO val2017 152 | PATH_TO_COCO = "./data/coco/" 153 | dataset = get_dataset(PATH_TO_COCO) 154 | images = [] 155 | for idx in range(100): 156 | img, t = dataset[idx] 157 | images.append(img) 158 | 159 | device = torch.device('cuda') 160 | results = {} 161 | 162 | parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) 163 | args = parser.parse_args() 164 | 165 | model, criterion, postprocessors = build_model(args) 166 | model.to(device) 167 | 168 | model_name = 'detr_resnet50' 169 | 170 | with torch.no_grad(): 171 | tmp = [] 172 | tmp2 = [] 173 | measure_scopes = ['encoder','decoder','backbone','SortSampler'] 174 | measure_scopes_res = {k:[] for k in measure_scopes} 175 | for img in tqdm.tqdm(images): 176 | inputs = [img.to(device)] 177 | res = flop_count(model, (inputs,)) 178 | [measure_scopes_res[k].append(sum(flop_count(model, (inputs,), measure_scope=k).values())) for k in measure_scopes] 179 | # t = measure_time(model, inputs) 180 | tmp.append(sum(res.values())) 181 | # tmp2.append(t) 182 | results[model_name] = {'flops': fmt_res(np.array(tmp)), 183 | 'flops_backbone': np.mean(measure_scopes_res['backbone']), 184 | 'flops_encoder': np.mean(measure_scopes_res['encoder']), 185 | 'flops_decoder': np.mean(measure_scopes_res['decoder']), 186 | 'flops_sampler': np.mean(measure_scopes_res['SortSampler']), 187 | } 188 | 189 | 190 | print('=============================') 191 | print('') 192 | for r in results: 193 | print(r) 194 | for k, v in results[r].items(): 195 | print(' ', k, ':', v) -------------------------------------------------------------------------------- /datasets/transforms.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Transforms and data augmentation for both image + bbox. 4 | """ 5 | import random 6 | 7 | import PIL 8 | import torch 9 | import torchvision.transforms as T 10 | import torchvision.transforms.functional as F 11 | 12 | from util.box_ops import box_xyxy_to_cxcywh 13 | from util.misc import interpolate 14 | 15 | 16 | def crop(image, target, region): 17 | cropped_image = F.crop(image, *region) 18 | 19 | target = target.copy() 20 | i, j, h, w = region 21 | 22 | # should we do something wrt the original size? 23 | target["size"] = torch.tensor([h, w]) 24 | 25 | fields = ["labels", "area", "iscrowd"] 26 | 27 | if "boxes" in target: 28 | boxes = target["boxes"] 29 | max_size = torch.as_tensor([w, h], dtype=torch.float32) 30 | cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) 31 | cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) 32 | cropped_boxes = cropped_boxes.clamp(min=0) 33 | area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) 34 | target["boxes"] = cropped_boxes.reshape(-1, 4) 35 | target["area"] = area 36 | fields.append("boxes") 37 | 38 | if "masks" in target: 39 | # FIXME should we update the area here if there are no boxes? 40 | target['masks'] = target['masks'][:, i:i + h, j:j + w] 41 | fields.append("masks") 42 | 43 | # remove elements for which the boxes or masks that have zero area 44 | if "boxes" in target or "masks" in target: 45 | # favor boxes selection when defining which elements to keep 46 | # this is compatible with previous implementation 47 | if "boxes" in target: 48 | cropped_boxes = target['boxes'].reshape(-1, 2, 2) 49 | keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) 50 | else: 51 | keep = target['masks'].flatten(1).any(1) 52 | 53 | for field in fields: 54 | target[field] = target[field][keep] 55 | 56 | return cropped_image, target 57 | 58 | 59 | def hflip(image, target): 60 | flipped_image = F.hflip(image) 61 | 62 | w, h = image.size 63 | 64 | target = target.copy() 65 | if "boxes" in target: 66 | boxes = target["boxes"] 67 | boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) 68 | target["boxes"] = boxes 69 | 70 | if "masks" in target: 71 | target['masks'] = target['masks'].flip(-1) 72 | 73 | return flipped_image, target 74 | 75 | 76 | def resize(image, target, size, max_size=None): 77 | # size can be min_size (scalar) or (w, h) tuple 78 | 79 | def get_size_with_aspect_ratio(image_size, size, max_size=None): 80 | w, h = image_size 81 | if max_size is not None: 82 | min_original_size = float(min((w, h))) 83 | max_original_size = float(max((w, h))) 84 | if max_original_size / min_original_size * size > max_size: 85 | size = int(round(max_size * min_original_size / max_original_size)) 86 | 87 | if (w <= h and w == size) or (h <= w and h == size): 88 | return (h, w) 89 | 90 | if w < h: 91 | ow = size 92 | oh = int(size * h / w) 93 | else: 94 | oh = size 95 | ow = int(size * w / h) 96 | 97 | return (oh, ow) 98 | 99 | def get_size(image_size, size, max_size=None): 100 | if isinstance(size, (list, tuple)): 101 | return size[::-1] 102 | else: 103 | return get_size_with_aspect_ratio(image_size, size, max_size) 104 | 105 | size = get_size(image.size, size, max_size) 106 | rescaled_image = F.resize(image, size) 107 | 108 | if target is None: 109 | return rescaled_image, None 110 | 111 | ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) 112 | ratio_width, ratio_height = ratios 113 | 114 | target = target.copy() 115 | if "boxes" in target: 116 | boxes = target["boxes"] 117 | scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) 118 | target["boxes"] = scaled_boxes 119 | 120 | if "area" in target: 121 | area = target["area"] 122 | scaled_area = area * (ratio_width * ratio_height) 123 | target["area"] = scaled_area 124 | 125 | h, w = size 126 | target["size"] = torch.tensor([h, w]) 127 | 128 | if "masks" in target: 129 | target['masks'] = interpolate( 130 | target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 131 | 132 | return rescaled_image, target 133 | 134 | 135 | def pad(image, target, padding): 136 | # assumes that we only pad on the bottom right corners 137 | padded_image = F.pad(image, (0, 0, padding[0], padding[1])) 138 | if target is None: 139 | return padded_image, None 140 | target = target.copy() 141 | # should we do something wrt the original size? 142 | target["size"] = torch.tensor(padded_image[::-1]) 143 | if "masks" in target: 144 | target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) 145 | return padded_image, target 146 | 147 | 148 | class RandomCrop(object): 149 | def __init__(self, size): 150 | self.size = size 151 | 152 | def __call__(self, img, target): 153 | region = T.RandomCrop.get_params(img, self.size) 154 | return crop(img, target, region) 155 | 156 | 157 | class RandomSizeCrop(object): 158 | def __init__(self, min_size: int, max_size: int): 159 | self.min_size = min_size 160 | self.max_size = max_size 161 | 162 | def __call__(self, img: PIL.Image.Image, target: dict): 163 | w = random.randint(self.min_size, min(img.width, self.max_size)) 164 | h = random.randint(self.min_size, min(img.height, self.max_size)) 165 | region = T.RandomCrop.get_params(img, [h, w]) 166 | return crop(img, target, region) 167 | 168 | 169 | class CenterCrop(object): 170 | def __init__(self, size): 171 | self.size = size 172 | 173 | def __call__(self, img, target): 174 | image_width, image_height = img.size 175 | crop_height, crop_width = self.size 176 | crop_top = int(round((image_height - crop_height) / 2.)) 177 | crop_left = int(round((image_width - crop_width) / 2.)) 178 | return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) 179 | 180 | 181 | class RandomHorizontalFlip(object): 182 | def __init__(self, p=0.5): 183 | self.p = p 184 | 185 | def __call__(self, img, target): 186 | if random.random() < self.p: 187 | return hflip(img, target) 188 | return img, target 189 | 190 | 191 | class RandomResize(object): 192 | def __init__(self, sizes, max_size=None): 193 | assert isinstance(sizes, (list, tuple)) 194 | self.sizes = sizes 195 | self.max_size = max_size 196 | 197 | def __call__(self, img, target=None): 198 | size = random.choice(self.sizes) 199 | return resize(img, target, size, self.max_size) 200 | 201 | 202 | class RandomPad(object): 203 | def __init__(self, max_pad): 204 | self.max_pad = max_pad 205 | 206 | def __call__(self, img, target): 207 | pad_x = random.randint(0, self.max_pad) 208 | pad_y = random.randint(0, self.max_pad) 209 | return pad(img, target, (pad_x, pad_y)) 210 | 211 | 212 | class RandomSelect(object): 213 | """ 214 | Randomly selects between transforms1 and transforms2, 215 | with probability p for transforms1 and (1 - p) for transforms2 216 | """ 217 | def __init__(self, transforms1, transforms2, p=0.5): 218 | self.transforms1 = transforms1 219 | self.transforms2 = transforms2 220 | self.p = p 221 | 222 | def __call__(self, img, target): 223 | if random.random() < self.p: 224 | return self.transforms1(img, target) 225 | return self.transforms2(img, target) 226 | 227 | 228 | class ToTensor(object): 229 | def __call__(self, img, target): 230 | return F.to_tensor(img), target 231 | 232 | 233 | class RandomErasing(object): 234 | 235 | def __init__(self, *args, **kwargs): 236 | self.eraser = T.RandomErasing(*args, **kwargs) 237 | 238 | def __call__(self, img, target): 239 | return self.eraser(img), target 240 | 241 | 242 | class Normalize(object): 243 | def __init__(self, mean, std): 244 | self.mean = mean 245 | self.std = std 246 | 247 | def __call__(self, image, target=None): 248 | image = F.normalize(image, mean=self.mean, std=self.std) 249 | if target is None: 250 | return image, None 251 | target = target.copy() 252 | h, w = image.shape[-2:] 253 | if "boxes" in target: 254 | boxes = target["boxes"] 255 | boxes = box_xyxy_to_cxcywh(boxes) 256 | boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) 257 | target["boxes"] = boxes 258 | return image, target 259 | 260 | 261 | class Compose(object): 262 | def __init__(self, transforms): 263 | self.transforms = transforms 264 | 265 | def __call__(self, image, target): 266 | for t in self.transforms: 267 | image, target = t(image, target) 268 | return image, target 269 | 270 | def __repr__(self): 271 | format_string = self.__class__.__name__ + "(" 272 | for t in self.transforms: 273 | format_string += "\n" 274 | format_string += " {0}".format(t) 275 | format_string += "\n)" 276 | return format_string 277 | -------------------------------------------------------------------------------- /datasets/coco_eval.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | COCO evaluator that works in distributed mode. 4 | 5 | Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py 6 | The difference is that there is less copy-pasting from pycocotools 7 | in the end of the file, as python3 can suppress prints with contextlib 8 | """ 9 | import os 10 | import contextlib 11 | import copy 12 | import numpy as np 13 | import torch 14 | 15 | from pycocotools.cocoeval import COCOeval 16 | from pycocotools.coco import COCO 17 | import pycocotools.mask as mask_util 18 | 19 | from util.misc import all_gather 20 | 21 | 22 | class CocoEvaluator(object): 23 | def __init__(self, coco_gt, iou_types): 24 | assert isinstance(iou_types, (list, tuple)) 25 | coco_gt = copy.deepcopy(coco_gt) 26 | self.coco_gt = coco_gt 27 | 28 | self.iou_types = iou_types 29 | self.coco_eval = {} 30 | for iou_type in iou_types: 31 | self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) 32 | 33 | self.img_ids = [] 34 | self.eval_imgs = {k: [] for k in iou_types} 35 | 36 | def update(self, predictions): 37 | img_ids = list(np.unique(list(predictions.keys()))) 38 | self.img_ids.extend(img_ids) 39 | 40 | for iou_type in self.iou_types: 41 | results = self.prepare(predictions, iou_type) 42 | 43 | # suppress pycocotools prints 44 | with open(os.devnull, 'w') as devnull: 45 | with contextlib.redirect_stdout(devnull): 46 | coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() 47 | coco_eval = self.coco_eval[iou_type] 48 | 49 | coco_eval.cocoDt = coco_dt 50 | coco_eval.params.imgIds = list(img_ids) 51 | img_ids, eval_imgs = evaluate(coco_eval) 52 | 53 | self.eval_imgs[iou_type].append(eval_imgs) 54 | 55 | def synchronize_between_processes(self): 56 | for iou_type in self.iou_types: 57 | self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) 58 | create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) 59 | 60 | def accumulate(self): 61 | for coco_eval in self.coco_eval.values(): 62 | coco_eval.accumulate() 63 | 64 | def summarize(self): 65 | for iou_type, coco_eval in self.coco_eval.items(): 66 | print("IoU metric: {}".format(iou_type)) 67 | coco_eval.summarize() 68 | 69 | def prepare(self, predictions, iou_type): 70 | if iou_type == "bbox": 71 | return self.prepare_for_coco_detection(predictions) 72 | elif iou_type == "segm": 73 | return self.prepare_for_coco_segmentation(predictions) 74 | elif iou_type == "keypoints": 75 | return self.prepare_for_coco_keypoint(predictions) 76 | else: 77 | raise ValueError("Unknown iou type {}".format(iou_type)) 78 | 79 | def prepare_for_coco_detection(self, predictions): 80 | coco_results = [] 81 | for original_id, prediction in predictions.items(): 82 | if len(prediction) == 0: 83 | continue 84 | 85 | boxes = prediction["boxes"] 86 | boxes = convert_to_xywh(boxes).tolist() 87 | scores = prediction["scores"].tolist() 88 | labels = prediction["labels"].tolist() 89 | 90 | coco_results.extend( 91 | [ 92 | { 93 | "image_id": original_id, 94 | "category_id": labels[k], 95 | "bbox": box, 96 | "score": scores[k], 97 | } 98 | for k, box in enumerate(boxes) 99 | ] 100 | ) 101 | return coco_results 102 | 103 | def prepare_for_coco_segmentation(self, predictions): 104 | coco_results = [] 105 | for original_id, prediction in predictions.items(): 106 | if len(prediction) == 0: 107 | continue 108 | 109 | scores = prediction["scores"] 110 | labels = prediction["labels"] 111 | masks = prediction["masks"] 112 | 113 | masks = masks > 0.5 114 | 115 | scores = prediction["scores"].tolist() 116 | labels = prediction["labels"].tolist() 117 | 118 | rles = [ 119 | mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] 120 | for mask in masks 121 | ] 122 | for rle in rles: 123 | rle["counts"] = rle["counts"].decode("utf-8") 124 | 125 | coco_results.extend( 126 | [ 127 | { 128 | "image_id": original_id, 129 | "category_id": labels[k], 130 | "segmentation": rle, 131 | "score": scores[k], 132 | } 133 | for k, rle in enumerate(rles) 134 | ] 135 | ) 136 | return coco_results 137 | 138 | def prepare_for_coco_keypoint(self, predictions): 139 | coco_results = [] 140 | for original_id, prediction in predictions.items(): 141 | if len(prediction) == 0: 142 | continue 143 | 144 | boxes = prediction["boxes"] 145 | boxes = convert_to_xywh(boxes).tolist() 146 | scores = prediction["scores"].tolist() 147 | labels = prediction["labels"].tolist() 148 | keypoints = prediction["keypoints"] 149 | keypoints = keypoints.flatten(start_dim=1).tolist() 150 | 151 | coco_results.extend( 152 | [ 153 | { 154 | "image_id": original_id, 155 | "category_id": labels[k], 156 | 'keypoints': keypoint, 157 | "score": scores[k], 158 | } 159 | for k, keypoint in enumerate(keypoints) 160 | ] 161 | ) 162 | return coco_results 163 | 164 | 165 | def convert_to_xywh(boxes): 166 | xmin, ymin, xmax, ymax = boxes.unbind(1) 167 | return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) 168 | 169 | 170 | def merge(img_ids, eval_imgs): 171 | all_img_ids = all_gather(img_ids) 172 | all_eval_imgs = all_gather(eval_imgs) 173 | 174 | merged_img_ids = [] 175 | for p in all_img_ids: 176 | merged_img_ids.extend(p) 177 | 178 | merged_eval_imgs = [] 179 | for p in all_eval_imgs: 180 | merged_eval_imgs.append(p) 181 | 182 | merged_img_ids = np.array(merged_img_ids) 183 | merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) 184 | 185 | # keep only unique (and in sorted order) images 186 | merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) 187 | merged_eval_imgs = merged_eval_imgs[..., idx] 188 | 189 | return merged_img_ids, merged_eval_imgs 190 | 191 | 192 | def create_common_coco_eval(coco_eval, img_ids, eval_imgs): 193 | img_ids, eval_imgs = merge(img_ids, eval_imgs) 194 | img_ids = list(img_ids) 195 | eval_imgs = list(eval_imgs.flatten()) 196 | 197 | coco_eval.evalImgs = eval_imgs 198 | coco_eval.params.imgIds = img_ids 199 | coco_eval._paramsEval = copy.deepcopy(coco_eval.params) 200 | 201 | 202 | ################################################################# 203 | # From pycocotools, just removed the prints and fixed 204 | # a Python3 bug about unicode not defined 205 | ################################################################# 206 | 207 | 208 | def evaluate(self): 209 | ''' 210 | Run per image evaluation on given images and store results (a list of dict) in self.evalImgs 211 | :return: None 212 | ''' 213 | # tic = time.time() 214 | # print('Running per image evaluation...') 215 | p = self.params 216 | # add backward compatibility if useSegm is specified in params 217 | if p.useSegm is not None: 218 | p.iouType = 'segm' if p.useSegm == 1 else 'bbox' 219 | print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) 220 | # print('Evaluate annotation type *{}*'.format(p.iouType)) 221 | p.imgIds = list(np.unique(p.imgIds)) 222 | if p.useCats: 223 | p.catIds = list(np.unique(p.catIds)) 224 | p.maxDets = sorted(p.maxDets) 225 | self.params = p 226 | 227 | self._prepare() 228 | # loop through images, area range, max detection number 229 | catIds = p.catIds if p.useCats else [-1] 230 | 231 | if p.iouType == 'segm' or p.iouType == 'bbox': 232 | computeIoU = self.computeIoU 233 | elif p.iouType == 'keypoints': 234 | computeIoU = self.computeOks 235 | self.ious = { 236 | (imgId, catId): computeIoU(imgId, catId) 237 | for imgId in p.imgIds 238 | for catId in catIds} 239 | 240 | evaluateImg = self.evaluateImg 241 | maxDet = p.maxDets[-1] 242 | evalImgs = [ 243 | evaluateImg(imgId, catId, areaRng, maxDet) 244 | for catId in catIds 245 | for areaRng in p.areaRng 246 | for imgId in p.imgIds 247 | ] 248 | # this is NOT in the pycocotools code, but could be done outside 249 | evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) 250 | self._paramsEval = copy.deepcopy(self.params) 251 | # toc = time.time() 252 | # print('DONE (t={:0.2f}s).'.format(toc-tic)) 253 | return p.imgIds, evalImgs 254 | 255 | ################################################################# 256 | # end of straight copy from pycocotools, just removing the prints 257 | ################################################################# 258 | -------------------------------------------------------------------------------- /jit_handles.py: -------------------------------------------------------------------------------- 1 | # taken from detectron2 / fvcore with a few modifications 2 | # https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py 3 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 4 | 5 | import typing 6 | from collections import Counter, OrderedDict 7 | import numpy as np 8 | from numpy import prod 9 | from itertools import zip_longest 10 | 11 | 12 | def get_shape(val: object) -> typing.List[int]: 13 | """ 14 | Get the shapes from a jit value object. 15 | Args: 16 | val (torch._C.Value): jit value object. 17 | Returns: 18 | list(int): return a list of ints. 19 | """ 20 | if val.isCompleteTensor(): # pyre-ignore 21 | r = val.type().sizes() # pyre-ignore 22 | if not r: 23 | r = [1] 24 | return r 25 | elif val.type().kind() in ("IntType", "FloatType"): 26 | return [1] 27 | else: 28 | raise ValueError() 29 | 30 | 31 | def addmm_flop_jit( 32 | inputs: typing.List[object], outputs: typing.List[object] 33 | ) -> typing.Counter[str]: 34 | """ 35 | This method counts the flops for fully connected layers with torch script. 36 | Args: 37 | inputs (list(torch._C.Value)): The input shape in the form of a list of 38 | jit object. 39 | outputs (list(torch._C.Value)): The output shape in the form of a list 40 | of jit object. 41 | Returns: 42 | Counter: A Counter dictionary that records the number of flops for each 43 | operation. 44 | """ 45 | # Count flop for nn.Linear 46 | # inputs is a list of length 3. 47 | input_shapes = [get_shape(v) for v in inputs[1:3]] 48 | # input_shapes[0]: [batch size, input feature dimension] 49 | # input_shapes[1]: [batch size, output feature dimension] 50 | assert len(input_shapes[0]) == 2 51 | assert len(input_shapes[1]) == 2 52 | batch_size, input_dim = input_shapes[0] 53 | output_dim = input_shapes[1][1] 54 | flop = batch_size * input_dim * output_dim 55 | flop_counter = Counter({"addmm": flop}) 56 | return flop_counter 57 | 58 | 59 | def bmm_flop_jit(inputs, outputs): 60 | # Count flop for nn.Linear 61 | # inputs is a list of length 3. 62 | input_shapes = [get_shape(v) for v in inputs] 63 | # input_shapes[0]: [batch size, input feature dimension] 64 | # input_shapes[1]: [batch size, output feature dimension] 65 | assert len(input_shapes[0]) == 3 66 | assert len(input_shapes[1]) == 3 67 | T, batch_size, input_dim = input_shapes[0] 68 | output_dim = input_shapes[1][2] 69 | flop = T * batch_size * input_dim * output_dim 70 | flop_counter = Counter({"bmm": flop}) 71 | return flop_counter 72 | 73 | 74 | def basic_binary_op_flop_jit(inputs, outputs, name): 75 | input_shapes = [get_shape(v) for v in inputs] 76 | # for broadcasting 77 | input_shapes = [s[::-1] for s in input_shapes] 78 | max_shape = np.array(list(zip_longest(*input_shapes, fillvalue=1))).max(1) 79 | flop = prod(max_shape) 80 | flop_counter = Counter({name: flop}) 81 | return flop_counter 82 | 83 | 84 | def rsqrt_flop_jit(inputs, outputs): 85 | input_shapes = [get_shape(v) for v in inputs] 86 | flop = prod(input_shapes[0]) * 2 87 | flop_counter = Counter({"rsqrt": flop}) 88 | return flop_counter 89 | 90 | def dropout_flop_jit(inputs, outputs): 91 | input_shapes = [get_shape(v) for v in inputs[:1]] 92 | flop = prod(input_shapes[0]) 93 | flop_counter = Counter({"dropout": flop}) 94 | return flop_counter 95 | 96 | def softmax_flop_jit(inputs, outputs): 97 | # from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/profiler/internal/flops_registry.py 98 | input_shapes = [get_shape(v) for v in inputs[:1]] 99 | flop = prod(input_shapes[0]) * 5 100 | flop_counter = Counter({'softmax': flop}) 101 | return flop_counter 102 | 103 | def _reduction_op_flop_jit(inputs, outputs, reduce_flops=1, finalize_flops=0): 104 | input_shapes = [get_shape(v) for v in inputs] 105 | output_shapes = [get_shape(v) for v in outputs] 106 | 107 | in_elements = prod(input_shapes[0]) 108 | out_elements = prod(output_shapes[0]) 109 | 110 | num_flops = (in_elements * reduce_flops 111 | + out_elements * (finalize_flops - reduce_flops)) 112 | 113 | return num_flops 114 | 115 | 116 | def conv_flop_count( 117 | x_shape: typing.List[int], 118 | w_shape: typing.List[int], 119 | out_shape: typing.List[int], 120 | ) -> typing.Counter[str]: 121 | """ 122 | This method counts the flops for convolution. Note only multiplication is 123 | counted. Computation for addition and bias is ignored. 124 | Args: 125 | x_shape (list(int)): The input shape before convolution. 126 | w_shape (list(int)): The filter shape. 127 | out_shape (list(int)): The output shape after convolution. 128 | Returns: 129 | Counter: A Counter dictionary that records the number of flops for each 130 | operation. 131 | """ 132 | batch_size, Cin_dim, Cout_dim = x_shape[0], w_shape[1], out_shape[1] 133 | out_size = prod(out_shape[2:]) 134 | kernel_size = prod(w_shape[2:]) 135 | flop = batch_size * out_size * Cout_dim * Cin_dim * kernel_size 136 | flop_counter = Counter({"conv": flop}) 137 | return flop_counter 138 | 139 | 140 | def conv_flop_jit( 141 | inputs: typing.List[object], outputs: typing.List[object] 142 | ) -> typing.Counter[str]: 143 | """ 144 | This method counts the flops for convolution using torch script. 145 | Args: 146 | inputs (list(torch._C.Value)): The input shape in the form of a list of 147 | jit object before convolution. 148 | outputs (list(torch._C.Value)): The output shape in the form of a list 149 | of jit object after convolution. 150 | Returns: 151 | Counter: A Counter dictionary that records the number of flops for each 152 | operation. 153 | """ 154 | # Inputs of Convolution should be a list of length 12. They represent: 155 | # 0) input tensor, 1) convolution filter, 2) bias, 3) stride, 4) padding, 156 | # 5) dilation, 6) transposed, 7) out_pad, 8) groups, 9) benchmark_cudnn, 157 | # 10) deterministic_cudnn and 11) user_enabled_cudnn. 158 | assert len(inputs) == 12 159 | x, w = inputs[:2] 160 | x_shape, w_shape, out_shape = ( 161 | get_shape(x), 162 | get_shape(w), 163 | get_shape(outputs[0]), 164 | ) 165 | return conv_flop_count(x_shape, w_shape, out_shape) 166 | 167 | 168 | def einsum_flop_jit( 169 | inputs: typing.List[object], outputs: typing.List[object] 170 | ) -> typing.Counter[str]: 171 | """ 172 | This method counts the flops for the einsum operation. We currently support 173 | two einsum operations: "nct,ncp->ntp" and "ntg,ncg->nct". 174 | Args: 175 | inputs (list(torch._C.Value)): The input shape in the form of a list of 176 | jit object before einsum. 177 | outputs (list(torch._C.Value)): The output shape in the form of a list 178 | of jit object after einsum. 179 | Returns: 180 | Counter: A Counter dictionary that records the number of flops for each 181 | operation. 182 | """ 183 | # Inputs of einsum should be a list of length 2. 184 | # Inputs[0] stores the equation used for einsum. 185 | # Inputs[1] stores the list of input shapes. 186 | assert len(inputs) == 2 187 | equation = inputs[0].toIValue() # pyre-ignore 188 | # Get rid of white space in the equation string. 189 | equation = equation.replace(" ", "") 190 | # Re-map equation so that same equation with different alphabet 191 | # representations will look the same. 192 | letter_order = OrderedDict((k, 0) for k in equation if k.isalpha()).keys() 193 | mapping = {ord(x): 97 + i for i, x in enumerate(letter_order)} 194 | equation = equation.translate(mapping) 195 | input_shapes_jit = inputs[1].node().inputs() # pyre-ignore 196 | input_shapes = [get_shape(v) for v in input_shapes_jit] 197 | 198 | if equation == "abc,abd->acd": 199 | n, c, t = input_shapes[0] 200 | p = input_shapes[-1][-1] 201 | flop = n * c * t * p 202 | flop_counter = Counter({"einsum": flop}) 203 | return flop_counter 204 | 205 | elif equation == "abc,adc->adb": 206 | n, t, g = input_shapes[0] 207 | c = input_shapes[-1][1] 208 | flop = n * t * g * c 209 | flop_counter = Counter({"einsum": flop}) 210 | return flop_counter 211 | 212 | else: 213 | raise NotImplementedError("Unsupported einsum operation.") 214 | 215 | 216 | def matmul_flop_jit( 217 | inputs: typing.List[object], outputs: typing.List[object] 218 | ) -> typing.Counter[str]: 219 | """ 220 | This method counts the flops for matmul. 221 | Args: 222 | inputs (list(torch._C.Value)): The input shape in the form of a list of 223 | jit object before matmul. 224 | outputs (list(torch._C.Value)): The output shape in the form of a list 225 | of jit object after matmul. 226 | Returns: 227 | Counter: A Counter dictionary that records the number of flops for each 228 | operation. 229 | """ 230 | # Inputs should be a list of length 2. 231 | # Inputs contains the shapes of two matrices. 232 | input_shapes = [get_shape(v) for v in inputs] 233 | assert len(input_shapes) == 2 234 | assert len(input_shapes[1]) == 2 235 | assert input_shapes[0][-1] == input_shapes[1][0] 236 | batch_dim = input_shapes[0][0] 237 | m1_dim, m2_dim = input_shapes[1] 238 | flop = m1_dim * m2_dim * batch_dim 239 | flop_counter = Counter({"matmul": flop}) 240 | return flop_counter 241 | 242 | 243 | def batchnorm_flop_jit( 244 | inputs: typing.List[object], outputs: typing.List[object] 245 | ) -> typing.Counter[str]: 246 | """ 247 | This method counts the flops for batch norm. 248 | Args: 249 | inputs (list(torch._C.Value)): The input shape in the form of a list of 250 | jit object before batch norm. 251 | outputs (list(torch._C.Value)): The output shape in the form of a list 252 | of jit object after batch norm. 253 | Returns: 254 | Counter: A Counter dictionary that records the number of flops for each 255 | operation. 256 | """ 257 | # Inputs[0] contains the shape of the input. 258 | input_shape = get_shape(inputs[0]) 259 | assert 2 <= len(input_shape) <= 5 260 | flop = prod(input_shape) * 4 261 | flop_counter = Counter({"batchnorm": flop}) 262 | return flop_counter -------------------------------------------------------------------------------- /flop_count.py: -------------------------------------------------------------------------------- 1 | # taken from detectron2 with a few modifications 2 | # to include bmm and a few other ops 3 | # https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py 4 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 5 | 6 | import logging 7 | import typing 8 | from collections import Counter, defaultdict 9 | import torch 10 | import torch.nn as nn 11 | from functools import partial 12 | 13 | from jit_handles import ( 14 | addmm_flop_jit, 15 | batchnorm_flop_jit, 16 | conv_flop_jit, 17 | einsum_flop_jit, 18 | matmul_flop_jit, 19 | bmm_flop_jit, 20 | basic_binary_op_flop_jit, 21 | rsqrt_flop_jit, 22 | softmax_flop_jit, 23 | dropout_flop_jit, 24 | ) 25 | 26 | # A dictionary that maps supported operations to their flop count jit handles. 27 | _SUPPORTED_OPS: typing.Dict[str, typing.Callable] = { 28 | "aten::addmm": addmm_flop_jit, 29 | "aten::_convolution": conv_flop_jit, 30 | "aten::einsum": einsum_flop_jit, 31 | "aten::matmul": matmul_flop_jit, 32 | "aten::batch_norm": batchnorm_flop_jit, 33 | "aten::bmm": bmm_flop_jit, 34 | "aten::add": partial(basic_binary_op_flop_jit, name='aten::add'), 35 | "aten::add_": partial(basic_binary_op_flop_jit, name='aten::add_'), 36 | "aten::mul": partial(basic_binary_op_flop_jit, name='aten::mul'), 37 | "aten::sub": partial(basic_binary_op_flop_jit, name='aten::sub'), 38 | "aten::div": partial(basic_binary_op_flop_jit, name='aten::div'), 39 | "aten::floor_divide": partial(basic_binary_op_flop_jit, name='aten::floor_divide'), 40 | "aten::relu": partial(basic_binary_op_flop_jit, name='aten::relu'), 41 | "aten::relu_": partial(basic_binary_op_flop_jit, name='aten::relu_'), 42 | "aten::rsqrt": rsqrt_flop_jit, 43 | "aten::softmax": softmax_flop_jit, 44 | "aten::dropout": dropout_flop_jit, 45 | } 46 | 47 | # A list that contains ignored operations. 48 | _IGNORED_OPS: typing.List[str] = [ 49 | "aten::Int", 50 | "aten::__and__", 51 | "aten::arange", 52 | "aten::cat", 53 | "aten::clamp", 54 | "aten::clamp_", 55 | "aten::contiguous", 56 | "aten::copy_", 57 | "aten::detach", 58 | "aten::empty", 59 | "aten::eq", 60 | "aten::expand", 61 | "aten::flatten", 62 | "aten::floor", 63 | "aten::full", 64 | "aten::gt", 65 | "aten::index", 66 | "aten::index_put_", 67 | "aten::max", 68 | "aten::nonzero", 69 | "aten::permute", 70 | "aten::remainder", 71 | "aten::reshape", 72 | "aten::select", 73 | "aten::size", 74 | "aten::slice", 75 | "aten::split_with_sizes", 76 | "aten::squeeze", 77 | "aten::t", 78 | "aten::to", 79 | "aten::transpose", 80 | "aten::unsqueeze", 81 | "aten::view", 82 | "aten::zeros", 83 | "aten::zeros_like", 84 | "prim::Constant", 85 | "prim::Int", 86 | "prim::ListConstruct", 87 | "prim::ListUnpack", 88 | "prim::NumToTensor", 89 | "prim::TupleConstruct", 90 | ] 91 | 92 | _HAS_ALREADY_SKIPPED = False 93 | 94 | 95 | def flop_count( 96 | model: nn.Module, 97 | inputs: typing.Tuple[object, ...], 98 | whitelist: typing.Union[typing.List[str], None] = None, 99 | customized_ops: typing.Union[ 100 | typing.Dict[str, typing.Callable], None 101 | ] = None, 102 | measure_scope=None, 103 | ) -> typing.DefaultDict[str, float]: 104 | """ 105 | Given a model and an input to the model, compute the Gflops of the given 106 | model. Note the input should have a batch size of 1. 107 | Args: 108 | model (nn.Module): The model to compute flop counts. 109 | inputs (tuple): Inputs that are passed to `model` to count flops. 110 | Inputs need to be in a tuple. 111 | whitelist (list(str)): Whitelist of operations that will be counted. It 112 | needs to be a subset of _SUPPORTED_OPS. By default, the function 113 | computes flops for all supported operations. 114 | customized_ops (dict(str,Callable)) : A dictionary contains customized 115 | operations and their flop handles. If customized_ops contains an 116 | operation in _SUPPORTED_OPS, then the default handle in 117 | _SUPPORTED_OPS will be overwritten. 118 | Returns: 119 | defaultdict: A dictionary that records the number of gflops for each 120 | operation. 121 | """ 122 | # Copy _SUPPORTED_OPS to flop_count_ops. 123 | # If customized_ops is provided, update _SUPPORTED_OPS. 124 | flop_count_ops = _SUPPORTED_OPS.copy() 125 | if customized_ops: 126 | flop_count_ops.update(customized_ops) 127 | 128 | # If whitelist is None, count flops for all suported operations. 129 | if whitelist is None: 130 | whitelist_set = set(flop_count_ops.keys()) 131 | else: 132 | whitelist_set = set(whitelist) 133 | 134 | # Torch script does not support parallell torch models. 135 | if isinstance( 136 | model, 137 | (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel), 138 | ): 139 | model = model.module # pyre-ignore 140 | 141 | assert set(whitelist_set).issubset( 142 | flop_count_ops 143 | ), "whitelist needs to be a subset of _SUPPORTED_OPS and customized_ops." 144 | assert isinstance(inputs, tuple), "Inputs need to be in a tuple." 145 | 146 | # Compatibility with torch.jit. 147 | if hasattr(torch.jit, "get_trace_graph"): 148 | trace, _ = torch.jit.get_trace_graph(model, inputs) 149 | trace_nodes = trace.graph().nodes() 150 | else: 151 | with scope_name_workaround(): 152 | trace, _ = torch.jit._get_trace_graph(model, inputs) 153 | # graph=torch.onnx._optimize_trace(trace, torch.onnx.OperatorExportTypes.ONNX) 154 | trace_nodes = trace.nodes() 155 | 156 | skipped_ops = Counter() 157 | total_flop_counter = Counter() 158 | if measure_scope is not None: 159 | for node in trace_nodes: 160 | if measure_scope in node.scopeName(): 161 | kind = node.kind() 162 | if kind not in whitelist_set: 163 | # If the operation is not in _IGNORED_OPS, count skipped operations. 164 | if kind not in _IGNORED_OPS: 165 | skipped_ops[kind] += 1 166 | continue 167 | 168 | handle_count = flop_count_ops.get(kind, None) 169 | if handle_count is None: 170 | continue 171 | 172 | inputs, outputs = list(node.inputs()), list(node.outputs()) 173 | flops_counter = handle_count(inputs, outputs) 174 | total_flop_counter += flops_counter 175 | else: 176 | for node in trace_nodes: 177 | kind = node.kind() 178 | if kind not in whitelist_set: 179 | # If the operation is not in _IGNORED_OPS, count skipped operations. 180 | if kind not in _IGNORED_OPS: 181 | skipped_ops[kind] += 1 182 | continue 183 | 184 | handle_count = flop_count_ops.get(kind, None) 185 | if handle_count is None: 186 | continue 187 | 188 | inputs, outputs = list(node.inputs()), list(node.outputs()) 189 | flops_counter = handle_count(inputs, outputs) 190 | total_flop_counter += flops_counter 191 | 192 | global _HAS_ALREADY_SKIPPED 193 | if len(skipped_ops) > 0 and not _HAS_ALREADY_SKIPPED: 194 | _HAS_ALREADY_SKIPPED = True 195 | for op, freq in skipped_ops.items(): 196 | logging.warning("Skipped operation {} {} time(s)".format(op, freq)) 197 | 198 | # Convert flop count to gigaflops. 199 | final_count = defaultdict(float) 200 | for op in total_flop_counter: 201 | final_count[op] = total_flop_counter[op] / 1e9 202 | 203 | return final_count 204 | 205 | 206 | def print_table(rows, header=['Operation', 'OPS']): 207 | r"""Simple helper function to print a list of lists as a table 208 | 209 | :param rows: a :class:`list` of :class:`list` containing the data to be printed. Each entry in the list 210 | represents an individual row 211 | :param input: (optional) a :class:`list` containing the header of the table 212 | """ 213 | if len(rows) == 0: 214 | return 215 | col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))] 216 | row_format = ''.join(["{:<" + str(length) + "}" for length in col_max]) 217 | 218 | if len(header) > 0: 219 | print(row_format.format(*header)) 220 | print(row_format.format(*['-' * (val - 2) for val in col_max])) 221 | 222 | for row in rows: 223 | print(row_format.format(*row)) 224 | print(row_format.format(*['-' * (val - 3) for val in col_max])) 225 | 226 | # Workaround for scopename in pytorch 1.4 and newer 227 | # see: https://github.com/pytorch/pytorch/issues/33463 228 | 229 | 230 | class scope_name_workaround(object): 231 | def __init__(self): 232 | self.backup = None 233 | 234 | def __enter__(self): 235 | def _tracing_name(self_, tracing_state): 236 | if not tracing_state._traced_module_stack: 237 | return None 238 | module = tracing_state._traced_module_stack[-1] 239 | for name, child in module.named_children(): 240 | if child is self_: 241 | return name 242 | return None 243 | 244 | def _slow_forward(self_, *input, **kwargs): 245 | tracing_state = torch._C._get_tracing_state() 246 | if not tracing_state or isinstance(self_.forward, torch._C.ScriptMethod): 247 | return self_.forward(*input, **kwargs) 248 | if not hasattr(tracing_state, '_traced_module_stack'): 249 | tracing_state._traced_module_stack = [] 250 | name = _tracing_name(self_, tracing_state) 251 | if name: 252 | tracing_state.push_scope('%s[%s]' % (self_._get_name(), name)) 253 | else: 254 | tracing_state.push_scope(self_._get_name()) 255 | tracing_state._traced_module_stack.append(self_) 256 | try: 257 | result = self_.forward(*input, **kwargs) 258 | finally: 259 | tracing_state.pop_scope() 260 | tracing_state._traced_module_stack.pop() 261 | return result 262 | 263 | self.backup = torch.nn.Module._slow_forward 264 | setattr(torch.nn.Module, '_slow_forward', _slow_forward) 265 | 266 | def __exit__(self, type, value, tb): 267 | setattr(torch.nn.Module, '_slow_forward', self.backup) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2020 - present, Facebook, Inc 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /util/misc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | Misc functions, including distributed helpers. 4 | 5 | Mostly copy-paste from torchvision references. 6 | """ 7 | import os 8 | import subprocess 9 | import time 10 | from collections import defaultdict, deque 11 | import datetime 12 | import pickle 13 | from typing import Optional, List 14 | 15 | import torch 16 | import torch.distributed as dist 17 | from torch import Tensor 18 | 19 | # needed due to empty tensor bug in pytorch and torchvision 0.5 20 | import torchvision 21 | if float(torchvision.__version__[:3]) < 0.7: 22 | from torchvision.ops import _new_empty_tensor 23 | from torchvision.ops.misc import _output_size 24 | 25 | 26 | class SmoothedValue(object): 27 | """Track a series of values and provide access to smoothed values over a 28 | window or the global series average. 29 | """ 30 | 31 | def __init__(self, window_size=20, fmt=None): 32 | if fmt is None: 33 | fmt = "{median:.4f} ({global_avg:.4f})" 34 | self.deque = deque(maxlen=window_size) 35 | self.total = 0.0 36 | self.count = 0 37 | self.fmt = fmt 38 | 39 | def update(self, value, n=1): 40 | self.deque.append(value) 41 | self.count += n 42 | self.total += value * n 43 | 44 | def synchronize_between_processes(self): 45 | """ 46 | Warning: does not synchronize the deque! 47 | """ 48 | if not is_dist_avail_and_initialized(): 49 | return 50 | t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') 51 | dist.barrier() 52 | dist.all_reduce(t) 53 | t = t.tolist() 54 | self.count = int(t[0]) 55 | self.total = t[1] 56 | 57 | @property 58 | def median(self): 59 | d = torch.tensor(list(self.deque)) 60 | return d.median().item() 61 | 62 | @property 63 | def avg(self): 64 | d = torch.tensor(list(self.deque), dtype=torch.float32) 65 | return d.mean().item() 66 | 67 | @property 68 | def global_avg(self): 69 | return self.total / self.count 70 | 71 | @property 72 | def max(self): 73 | return max(self.deque) 74 | 75 | @property 76 | def value(self): 77 | return self.deque[-1] 78 | 79 | def __str__(self): 80 | return self.fmt.format( 81 | median=self.median, 82 | avg=self.avg, 83 | global_avg=self.global_avg, 84 | max=self.max, 85 | value=self.value) 86 | 87 | 88 | def all_gather(data): 89 | """ 90 | Run all_gather on arbitrary picklable data (not necessarily tensors) 91 | Args: 92 | data: any picklable object 93 | Returns: 94 | list[data]: list of data gathered from each rank 95 | """ 96 | world_size = get_world_size() 97 | if world_size == 1: 98 | return [data] 99 | 100 | # serialized to a Tensor 101 | buffer = pickle.dumps(data) 102 | storage = torch.ByteStorage.from_buffer(buffer) 103 | tensor = torch.ByteTensor(storage).to("cuda") 104 | 105 | # obtain Tensor size of each rank 106 | local_size = torch.tensor([tensor.numel()], device="cuda") 107 | size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] 108 | dist.all_gather(size_list, local_size) 109 | size_list = [int(size.item()) for size in size_list] 110 | max_size = max(size_list) 111 | 112 | # receiving Tensor from all ranks 113 | # we pad the tensor because torch all_gather does not support 114 | # gathering tensors of different shapes 115 | tensor_list = [] 116 | for _ in size_list: 117 | tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) 118 | if local_size != max_size: 119 | padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") 120 | tensor = torch.cat((tensor, padding), dim=0) 121 | dist.all_gather(tensor_list, tensor) 122 | 123 | data_list = [] 124 | for size, tensor in zip(size_list, tensor_list): 125 | buffer = tensor.cpu().numpy().tobytes()[:size] 126 | data_list.append(pickle.loads(buffer)) 127 | 128 | return data_list 129 | 130 | 131 | def reduce_dict(input_dict, average=True): 132 | """ 133 | Args: 134 | input_dict (dict): all the values will be reduced 135 | average (bool): whether to do average or sum 136 | Reduce the values in the dictionary from all processes so that all processes 137 | have the averaged results. Returns a dict with the same fields as 138 | input_dict, after reduction. 139 | """ 140 | world_size = get_world_size() 141 | if world_size < 2: 142 | return input_dict 143 | with torch.no_grad(): 144 | names = [] 145 | values = [] 146 | # sort the keys so that they are consistent across processes 147 | for k in sorted(input_dict.keys()): 148 | names.append(k) 149 | values.append(input_dict[k]) 150 | values = torch.stack(values, dim=0) 151 | dist.all_reduce(values) 152 | if average: 153 | values /= world_size 154 | reduced_dict = {k: v for k, v in zip(names, values)} 155 | return reduced_dict 156 | 157 | 158 | class MetricLogger(object): 159 | def __init__(self, delimiter="\t"): 160 | self.meters = defaultdict(SmoothedValue) 161 | self.delimiter = delimiter 162 | 163 | def update(self, **kwargs): 164 | for k, v in kwargs.items(): 165 | if isinstance(v, torch.Tensor): 166 | v = v.item() 167 | assert isinstance(v, (float, int)) 168 | self.meters[k].update(v) 169 | 170 | def __getattr__(self, attr): 171 | if attr in self.meters: 172 | return self.meters[attr] 173 | if attr in self.__dict__: 174 | return self.__dict__[attr] 175 | raise AttributeError("'{}' object has no attribute '{}'".format( 176 | type(self).__name__, attr)) 177 | 178 | def __str__(self): 179 | loss_str = [] 180 | for name, meter in self.meters.items(): 181 | loss_str.append( 182 | "{}: {}".format(name, str(meter)) 183 | ) 184 | return self.delimiter.join(loss_str) 185 | 186 | def synchronize_between_processes(self): 187 | for meter in self.meters.values(): 188 | meter.synchronize_between_processes() 189 | 190 | def add_meter(self, name, meter): 191 | self.meters[name] = meter 192 | 193 | def log_every(self, iterable, print_freq, header=None): 194 | i = 0 195 | if not header: 196 | header = '' 197 | start_time = time.time() 198 | end = time.time() 199 | iter_time = SmoothedValue(fmt='{avg:.4f}') 200 | data_time = SmoothedValue(fmt='{avg:.4f}') 201 | space_fmt = ':' + str(len(str(len(iterable)))) + 'd' 202 | if torch.cuda.is_available(): 203 | log_msg = self.delimiter.join([ 204 | header, 205 | '[{0' + space_fmt + '}/{1}]', 206 | 'eta: {eta}', 207 | '{meters}', 208 | 'time: {time}', 209 | 'data: {data}', 210 | 'max mem: {memory:.0f}' 211 | ]) 212 | else: 213 | log_msg = self.delimiter.join([ 214 | header, 215 | '[{0' + space_fmt + '}/{1}]', 216 | 'eta: {eta}', 217 | '{meters}', 218 | 'time: {time}', 219 | 'data: {data}' 220 | ]) 221 | MB = 1024.0 * 1024.0 222 | for obj in iterable: 223 | data_time.update(time.time() - end) 224 | yield obj 225 | iter_time.update(time.time() - end) 226 | if i % print_freq == 0 or i == len(iterable) - 1: 227 | eta_seconds = iter_time.global_avg * (len(iterable) - i) 228 | eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) 229 | if torch.cuda.is_available(): 230 | print(log_msg.format( 231 | i, len(iterable), eta=eta_string, 232 | meters=str(self), 233 | time=str(iter_time), data=str(data_time), 234 | memory=torch.cuda.max_memory_allocated() / MB)) 235 | else: 236 | print(log_msg.format( 237 | i, len(iterable), eta=eta_string, 238 | meters=str(self), 239 | time=str(iter_time), data=str(data_time))) 240 | i += 1 241 | end = time.time() 242 | total_time = time.time() - start_time 243 | total_time_str = str(datetime.timedelta(seconds=int(total_time))) 244 | print('{} Total time: {} ({:.4f} s / it)'.format( 245 | header, total_time_str, total_time / len(iterable))) 246 | 247 | 248 | def get_sha(): 249 | cwd = os.path.dirname(os.path.abspath(__file__)) 250 | 251 | def _run(command): 252 | return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() 253 | sha = 'N/A' 254 | diff = "clean" 255 | branch = 'N/A' 256 | try: 257 | sha = _run(['git', 'rev-parse', 'HEAD']) 258 | subprocess.check_output(['git', 'diff'], cwd=cwd) 259 | diff = _run(['git', 'diff-index', 'HEAD']) 260 | diff = "has uncommited changes" if diff else "clean" 261 | branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) 262 | except Exception: 263 | pass 264 | message = f"sha: {sha}, status: {diff}, branch: {branch}" 265 | return message 266 | 267 | 268 | def collate_fn(batch): 269 | batch = list(zip(*batch)) 270 | batch[0] = nested_tensor_from_tensor_list(batch[0]) 271 | return tuple(batch) 272 | 273 | 274 | def _max_by_axis(the_list): 275 | # type: (List[List[int]]) -> List[int] 276 | maxes = the_list[0] 277 | for sublist in the_list[1:]: 278 | for index, item in enumerate(sublist): 279 | maxes[index] = max(maxes[index], item) 280 | return maxes 281 | 282 | 283 | def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): 284 | # TODO make this more general 285 | if tensor_list[0].ndim == 3: 286 | # TODO make it support different-sized images 287 | max_size = _max_by_axis([list(img.shape) for img in tensor_list]) 288 | # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) 289 | batch_shape = [len(tensor_list)] + max_size 290 | b, c, h, w = batch_shape 291 | dtype = tensor_list[0].dtype 292 | device = tensor_list[0].device 293 | tensor = torch.zeros(batch_shape, dtype=dtype, device=device) 294 | mask = torch.ones((b, h, w), dtype=torch.bool, device=device) 295 | for img, pad_img, m in zip(tensor_list, tensor, mask): 296 | pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) 297 | m[: img.shape[1], :img.shape[2]] = False 298 | else: 299 | raise ValueError('not supported') 300 | return NestedTensor(tensor, mask) 301 | 302 | 303 | class NestedTensor(object): 304 | def __init__(self, tensors, mask: Optional[Tensor]): 305 | self.tensors = tensors 306 | self.mask = mask 307 | 308 | def to(self, device): 309 | # type: (Device) -> NestedTensor # noqa 310 | cast_tensor = self.tensors.to(device) 311 | mask = self.mask 312 | if mask is not None: 313 | assert mask is not None 314 | cast_mask = mask.to(device) 315 | else: 316 | cast_mask = None 317 | return NestedTensor(cast_tensor, cast_mask) 318 | 319 | def decompose(self): 320 | return self.tensors, self.mask 321 | 322 | def __repr__(self): 323 | return str(self.tensors) 324 | 325 | 326 | def setup_for_distributed(is_master): 327 | """ 328 | This function disables printing when not in master process 329 | """ 330 | import builtins as __builtin__ 331 | builtin_print = __builtin__.print 332 | 333 | def print(*args, **kwargs): 334 | force = kwargs.pop('force', False) 335 | if is_master or force: 336 | builtin_print(*args, **kwargs) 337 | 338 | __builtin__.print = print 339 | 340 | 341 | def is_dist_avail_and_initialized(): 342 | if not dist.is_available(): 343 | return False 344 | if not dist.is_initialized(): 345 | return False 346 | return True 347 | 348 | 349 | def get_world_size(): 350 | if not is_dist_avail_and_initialized(): 351 | return 1 352 | return dist.get_world_size() 353 | 354 | 355 | def get_rank(): 356 | if not is_dist_avail_and_initialized(): 357 | return 0 358 | return dist.get_rank() 359 | 360 | 361 | def is_main_process(): 362 | return get_rank() == 0 363 | 364 | 365 | def save_on_master(*args, **kwargs): 366 | if is_main_process(): 367 | torch.save(*args, **kwargs) 368 | 369 | 370 | def init_distributed_mode(args): 371 | if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: 372 | args.rank = int(os.environ["RANK"]) 373 | args.world_size = int(os.environ['WORLD_SIZE']) 374 | args.gpu = int(os.environ['LOCAL_RANK']) 375 | elif 'SLURM_PROCID' in os.environ: 376 | args.rank = int(os.environ['SLURM_PROCID']) 377 | args.gpu = args.rank % torch.cuda.device_count() 378 | else: 379 | print('Not using distributed mode') 380 | args.distributed = False 381 | return 382 | 383 | args.distributed = True 384 | 385 | torch.cuda.set_device(args.gpu) 386 | args.dist_backend = 'nccl' 387 | print('| distributed init (rank {}): {}'.format( 388 | args.rank, args.dist_url), flush=True) 389 | torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, 390 | world_size=args.world_size, rank=args.rank) 391 | torch.distributed.barrier() 392 | setup_for_distributed(args.rank == 0) 393 | 394 | 395 | @torch.no_grad() 396 | def accuracy(output, target, topk=(1,)): 397 | """Computes the precision@k for the specified values of k""" 398 | if target.numel() == 0: 399 | return [torch.zeros([], device=output.device)] 400 | maxk = max(topk) 401 | batch_size = target.size(0) 402 | 403 | _, pred = output.topk(maxk, 1, True, True) 404 | pred = pred.t() 405 | correct = pred.eq(target.view(1, -1).expand_as(pred)) 406 | 407 | res = [] 408 | for k in topk: 409 | correct_k = correct[:k].view(-1).float().sum(0) 410 | res.append(correct_k.mul_(100.0 / batch_size)) 411 | return res 412 | 413 | 414 | def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): 415 | # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor 416 | """ 417 | Equivalent to nn.functional.interpolate, but with support for empty batch sizes. 418 | This will eventually be supported natively by PyTorch, and this 419 | class can go away. 420 | """ 421 | if float(torchvision.__version__[:3]) < 0.7: 422 | if input.numel() > 0: 423 | return torch.nn.functional.interpolate( 424 | input, size, scale_factor, mode, align_corners 425 | ) 426 | 427 | output_shape = _output_size(2, input, size, scale_factor) 428 | output_shape = list(input.shape[:-2]) + list(output_shape) 429 | return _new_empty_tensor(input, output_shape) 430 | else: 431 | return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) 432 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | import argparse 3 | import datetime 4 | import json 5 | import random 6 | import time 7 | from pathlib import Path 8 | 9 | import numpy as np 10 | import torch 11 | from torch.utils.data import DataLoader, DistributedSampler 12 | 13 | import datasets 14 | import util.misc as utils 15 | from datasets import build_dataset, get_coco_api_from_dataset 16 | from engine import evaluate, train_one_epoch 17 | from models import build_model 18 | 19 | from torch.nn.parameter import Parameter 20 | 21 | import tqdm 22 | 23 | from getpass import getuser 24 | from socket import gethostname 25 | 26 | # this is a fake commit 27 | def get_args_parser(): 28 | parser = argparse.ArgumentParser('Set transformer detector', add_help=False) 29 | parser.add_argument('--lr', default=1e-4, type=float) 30 | parser.add_argument('--lr_backbone', default=1e-5, type=float) 31 | parser.add_argument('--batch_size', default=2, type=int) 32 | parser.add_argument('--weight_decay', default=1e-4, type=float) 33 | parser.add_argument('--epochs', default=300, type=int) 34 | parser.add_argument('--lr_drop', default=200, type=int) 35 | parser.add_argument('--clip_max_norm', default=0.1, type=float, 36 | help='gradient clipping max norm') 37 | 38 | # Model parameters 39 | parser.add_argument('--frozen_weights', type=str, default=None, 40 | help="Path to the pretrained model. If set, only the mask head will be trained") 41 | # * Backbone 42 | parser.add_argument('--backbone', default='resnet50', type=str, 43 | help="Name of the convolutional backbone to use") 44 | parser.add_argument('--dilation', action='store_true', 45 | help="If true, we replace stride with dilation in the last convolutional block (DC5)") 46 | parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), 47 | help="Type of positional embedding to use on top of the image features") 48 | 49 | # * Transformer 50 | parser.add_argument('--enc_layers', default=6, type=int, 51 | help="Number of encoding layers in the transformer") 52 | parser.add_argument('--dec_layers', default=6, type=int, 53 | help="Number of decoding layers in the transformer") 54 | parser.add_argument('--dim_feedforward', default=2048, type=int, 55 | help="Intermediate size of the feedforward layers in the transformer blocks") 56 | parser.add_argument('--hidden_dim', default=256, type=int, 57 | help="Size of the embeddings (dimension of the transformer)") 58 | parser.add_argument('--dropout', default=0.1, type=float, 59 | help="Dropout applied in the transformer") 60 | parser.add_argument('--nheads', default=8, type=int, 61 | help="Number of attention heads inside the transformer's attentions") 62 | parser.add_argument('--num_queries', default=100, type=int, 63 | help="Number of query slots") 64 | parser.add_argument('--pre_norm', action='store_true') 65 | 66 | # * Segmentation 67 | parser.add_argument('--masks', action='store_true', 68 | help="Train segmentation head if the flag is provided") 69 | 70 | # Loss 71 | parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', 72 | help="Disables auxiliary decoding losses (loss at each layer)") 73 | # * Matcher 74 | parser.add_argument('--set_cost_class', default=1, type=float, 75 | help="Class coefficient in the matching cost") 76 | parser.add_argument('--set_cost_bbox', default=5, type=float, 77 | help="L1 box coefficient in the matching cost") 78 | parser.add_argument('--set_cost_giou', default=2, type=float, 79 | help="giou box coefficient in the matching cost") 80 | # * Loss coefficients 81 | parser.add_argument('--mask_loss_coef', default=1, type=float) 82 | parser.add_argument('--dice_loss_coef', default=1, type=float) 83 | parser.add_argument('--bbox_loss_coef', default=5, type=float) 84 | parser.add_argument('--giou_loss_coef', default=2, type=float) 85 | parser.add_argument('--eos_coef', default=0.1, type=float, 86 | help="Relative classification weight of the no-object class") 87 | 88 | # dataset parameters 89 | parser.add_argument('--train_image_set', default='train')## add for train on sampled set, train_sampled_PER_CAT_THR_500, ... 90 | parser.add_argument('--dataset_file', default='coco') 91 | parser.add_argument('--coco_path', type=str) 92 | parser.add_argument('--coco_panoptic_path', type=str) 93 | parser.add_argument('--remove_difficult', action='store_true') 94 | 95 | parser.add_argument('--output_dir', default='', 96 | help='path where to save, empty for no saving') 97 | parser.add_argument('--device', default='cuda', 98 | help='device to use for training / testing') 99 | parser.add_argument('--seed', default=42, type=int) 100 | parser.add_argument('--resume', default='', help='resume from checkpoint') 101 | parser.add_argument('--start_epoch', default=0, type=int, metavar='N', 102 | help='start epoch') 103 | parser.add_argument('--eval', action='store_true') 104 | parser.add_argument('--num_workers', default=2, type=int) 105 | 106 | # distributed training parameters 107 | parser.add_argument('--world_size', default=1, type=int, 108 | help='number of distributed processes') 109 | parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') 110 | parser.add_argument('--sample_reg_loss', default=1e-4, type=float, 111 | help="sample_reg_loss") 112 | parser.add_argument('--sample_topk_ratio', default=1/3., type=float) 113 | parser.add_argument('--score_pred_net', type=str, default='2layer-fc-256') 114 | parser.add_argument('--kproj_net', type=str, default='1layer-fc') 115 | parser.add_argument('--unsample_abstract_number', default=0, type=int, 116 | help='unsample_abstract_number') 117 | parser.add_argument('--pos_embed_kproj', action='store_true', 118 | help="add pos embeding for predicting unsampled aggregation attention") 119 | parser.add_argument('--sampler_lr_drop_epoch', default=1e5, type=int, 120 | help='default is not drop') 121 | parser.add_argument('--reshape_param_group', action='store_true', 122 | help="reshape_param_group of loaded state_dict to match with the 3 group setting") 123 | parser.add_argument('--notload_lr_scheduler', action='store_true', 124 | help="notload_lr_scheduler") 125 | parser.add_argument('--sample_ratio_lower_bound', default=1/3., type=float) 126 | parser.add_argument('--sample_ratio_higher_bound', default=0.8, type=float) 127 | 128 | return parser 129 | 130 | def get_host_info(): 131 | return '{}@{}'.format(getuser(), gethostname()) 132 | 133 | def main(args): 134 | utils.init_distributed_mode(args) 135 | print("git:\n {}\n".format(utils.get_sha())) 136 | 137 | if args.frozen_weights is not None: 138 | assert args.masks, "Frozen training is meant for segmentation only" 139 | print(args) 140 | print(get_host_info()) 141 | device = torch.device(args.device) 142 | 143 | # fix the seed for reproducibility 144 | seed = args.seed + utils.get_rank() 145 | torch.manual_seed(seed) 146 | np.random.seed(seed) 147 | random.seed(seed) 148 | if args.unsample_abstract_number==0:# unsample_abstract_number not set 149 | if args.dilation: 150 | args.unsample_abstract_number=100 151 | else: 152 | args.unsample_abstract_number = 30 153 | 154 | model, criterion, postprocessors = build_model(args) 155 | model.to(device) 156 | criterion.weight_dict['sample_reg_loss'] = args.sample_reg_loss 157 | 158 | model_without_ddp = model 159 | if args.distributed: 160 | model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) 161 | model_without_ddp = model.module 162 | n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) 163 | print('number of params:', n_parameters) 164 | 165 | param_dicts = [ 166 | {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]}, 167 | { 168 | "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], 169 | "lr": args.lr_backbone, 170 | }, 171 | ] 172 | optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, 173 | weight_decay=args.weight_decay) 174 | lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) 175 | 176 | # dataset_train = build_dataset(image_set='train', args=args) 177 | dataset_train = build_dataset(image_set=args.train_image_set, args=args) 178 | dataset_val = build_dataset(image_set='val', args=args) 179 | # dataset_train.ids = dataset_train.ids[:100] 180 | # dataset_val.ids = dataset_val.ids[:100] 181 | 182 | if args.distributed: 183 | sampler_train = DistributedSampler(dataset_train) 184 | sampler_val = DistributedSampler(dataset_val, shuffle=False) 185 | else: 186 | sampler_train = torch.utils.data.RandomSampler(dataset_train) 187 | sampler_val = torch.utils.data.SequentialSampler(dataset_val) 188 | 189 | batch_sampler_train = torch.utils.data.BatchSampler( 190 | sampler_train, args.batch_size, drop_last=True) 191 | 192 | data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, 193 | collate_fn=utils.collate_fn, num_workers=args.num_workers) 194 | data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, 195 | drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) 196 | 197 | if args.dataset_file == "coco_panoptic": 198 | # We also evaluate AP during panoptic training, on original coco DS 199 | coco_val = datasets.coco.build("val", args) 200 | base_ds = get_coco_api_from_dataset(coco_val) 201 | else: 202 | base_ds = get_coco_api_from_dataset(dataset_val) 203 | 204 | if args.frozen_weights is not None: 205 | checkpoint = torch.load(args.frozen_weights, map_location='cpu') 206 | model_without_ddp.detr.load_state_dict(checkpoint['model']) 207 | 208 | output_dir = Path(args.output_dir) 209 | if args.resume: 210 | if args.resume.startswith('https'): 211 | checkpoint = torch.hub.load_state_dict_from_url( 212 | args.resume, map_location='cpu', check_hash=True) 213 | elif not args.resume.endswith('pth'): 214 | checkpoint = torch.load(args.output_dir+'/checkpoint.pth', map_location='cpu') 215 | else: 216 | checkpoint = torch.load(args.resume, map_location='cpu') 217 | 218 | def load_my_state_dict(module, state_dict): 219 | 220 | own_state = module.state_dict() 221 | for name, param in state_dict.items(): 222 | if name not in own_state: 223 | continue 224 | if isinstance(param, Parameter): 225 | # backwards compatibility for serialized parameters 226 | param = param.data 227 | own_state[name].copy_(param) 228 | 229 | 230 | # model_without_ddp.load_state_dict(checkpoint['model']) 231 | load_my_state_dict(model_without_ddp, checkpoint['model']) 232 | if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: 233 | try: 234 | optimizer.load_state_dict(checkpoint['optimizer']) 235 | if not args.notload_lr_scheduler: 236 | lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) 237 | args.start_epoch = checkpoint['epoch'] + 1 238 | except: 239 | print('skip loading optimizer and other training settings, supposed to be initing from trained model, but not resuming training') 240 | 241 | 242 | if args.eval: 243 | test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, 244 | data_loader_val, base_ds, device, args.output_dir, args.sample_topk_ratio) 245 | if args.output_dir: 246 | utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") 247 | return 248 | 249 | print("Start training") 250 | start_time = time.time() 251 | best_ap = 0. 252 | lr_scheduler.step(epoch=args.start_epoch) 253 | for epoch in tqdm.tqdm(range(args.start_epoch, args.epochs)): 254 | if args.distributed: 255 | sampler_train.set_epoch(epoch) 256 | lr_scheduler.step(epoch=epoch) 257 | if epoch >= args.sampler_lr_drop_epoch: 258 | optimizer.param_groups[0]['lr'] *= 0.1 259 | train_stats = train_one_epoch( 260 | model, criterion, data_loader_train, optimizer, device, epoch, args.sample_ratio_lower_bound,args.sample_ratio_higher_bound, 261 | args.clip_max_norm) 262 | 263 | test_stats_all_sample_ratio = [] 264 | sample_ratios = [0.333, 0.5, 0.65, 0.8] 265 | for sample_ratio in sample_ratios: 266 | test_stats, coco_evaluator = evaluate( 267 | model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir, sample_ratio 268 | ) 269 | test_stats_all_sample_ratio.append(test_stats) 270 | log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 271 | **{f'test_ratio_{sample_ratios[0]}_{k}': v for k, v in test_stats_all_sample_ratio[0].items()}, 272 | **{f'test_ratio_{sample_ratios[1]}_{k}': v for k, v in test_stats_all_sample_ratio[1].items()}, 273 | **{f'test_ratio_{sample_ratios[2]}_{k}': v for k, v in test_stats_all_sample_ratio[2].items()}, 274 | **{f'test_ratio_{sample_ratios[3]}_{k}': v for k, v in test_stats_all_sample_ratio[3].items()}, 275 | 'epoch': epoch, 276 | 'n_parameters': n_parameters, 277 | 'lrs':[optimizer.param_groups[i]['lr']for i in range(len(optimizer.param_groups))]} 278 | 279 | if args.output_dir: 280 | checkpoint_paths = [output_dir / 'checkpoint.pth'] 281 | # extra checkpoint before LR drop and every 100 epochs 282 | if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: 283 | checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') 284 | if test_stats_all_sample_ratio[2]['coco_eval_bbox'][0] > best_ap: 285 | best_ap = test_stats['coco_eval_bbox'][0] 286 | checkpoint_paths.append(output_dir / f'checkpoint_best.pth') 287 | for checkpoint_path in checkpoint_paths: 288 | utils.save_on_master({ 289 | 'model': model_without_ddp.state_dict(), 290 | 'optimizer': optimizer.state_dict(), 291 | 'lr_scheduler': lr_scheduler.state_dict(), 292 | 'epoch': epoch, 293 | 'args': args, 294 | }, checkpoint_path) 295 | 296 | if args.output_dir and utils.is_main_process(): 297 | with (output_dir / "log.txt").open("a") as f: 298 | f.write(json.dumps(log_stats) + "\n") 299 | 300 | # for evaluation logs 301 | if coco_evaluator is not None: 302 | (output_dir / 'eval').mkdir(exist_ok=True) 303 | if "bbox" in coco_evaluator.coco_eval: 304 | filenames = ['latest.pth'] 305 | if epoch % 50 == 0: 306 | filenames.append(f'{epoch:03}.pth') 307 | for name in filenames: 308 | torch.save(coco_evaluator.coco_eval["bbox"].eval, 309 | output_dir / "eval" / name) 310 | 311 | total_time = time.time() - start_time 312 | total_time_str = str(datetime.timedelta(seconds=int(total_time))) 313 | print('Training time {}'.format(total_time_str)) 314 | 315 | 316 | if __name__ == '__main__': 317 | parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) 318 | args = parser.parse_args() 319 | if args.output_dir: 320 | Path(args.output_dir).mkdir(parents=True, exist_ok=True) 321 | main(args) 322 | -------------------------------------------------------------------------------- /models/segmentation.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | This file provides the definition of the convolutional heads used to predict masks, as well as the losses 4 | """ 5 | import io 6 | from collections import defaultdict 7 | from typing import List, Optional 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.functional as F 12 | from torch import Tensor 13 | from PIL import Image 14 | 15 | import util.box_ops as box_ops 16 | from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list 17 | 18 | try: 19 | from panopticapi.utils import id2rgb, rgb2id 20 | except ImportError: 21 | pass 22 | 23 | 24 | class DETRsegm(nn.Module): 25 | def __init__(self, detr, freeze_detr=False): 26 | super().__init__() 27 | self.detr = detr 28 | 29 | if freeze_detr: 30 | for p in self.parameters(): 31 | p.requires_grad_(False) 32 | 33 | hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead 34 | self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) 35 | self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) 36 | 37 | def forward(self, samples: NestedTensor): 38 | if not isinstance(samples, NestedTensor): 39 | samples = nested_tensor_from_tensor_list(samples) 40 | features, pos = self.detr.backbone(samples) 41 | 42 | bs = features[-1].tensors.shape[0] 43 | 44 | src, mask = features[-1].decompose() 45 | assert mask is not None 46 | src_proj = self.detr.input_proj(src) 47 | hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) 48 | 49 | outputs_class = self.detr.class_embed(hs) 50 | outputs_coord = self.detr.bbox_embed(hs).sigmoid() 51 | out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} 52 | if self.detr.aux_loss: 53 | out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) 54 | 55 | # FIXME h_boxes takes the last one computed, keep this in mind 56 | bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) 57 | 58 | seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) 59 | outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) 60 | 61 | out["pred_masks"] = outputs_seg_masks 62 | return out 63 | 64 | 65 | def _expand(tensor, length: int): 66 | return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) 67 | 68 | 69 | class MaskHeadSmallConv(nn.Module): 70 | """ 71 | Simple convolutional head, using group norm. 72 | Upsampling is done using a FPN approach 73 | """ 74 | 75 | def __init__(self, dim, fpn_dims, context_dim): 76 | super().__init__() 77 | 78 | inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] 79 | self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) 80 | self.gn1 = torch.nn.GroupNorm(8, dim) 81 | self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) 82 | self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) 83 | self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) 84 | self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) 85 | self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) 86 | self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) 87 | self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) 88 | self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) 89 | self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) 90 | 91 | self.dim = dim 92 | 93 | self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) 94 | self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) 95 | self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) 96 | 97 | for m in self.modules(): 98 | if isinstance(m, nn.Conv2d): 99 | nn.init.kaiming_uniform_(m.weight, a=1) 100 | nn.init.constant_(m.bias, 0) 101 | 102 | def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): 103 | x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) 104 | 105 | x = self.lay1(x) 106 | x = self.gn1(x) 107 | x = F.relu(x) 108 | x = self.lay2(x) 109 | x = self.gn2(x) 110 | x = F.relu(x) 111 | 112 | cur_fpn = self.adapter1(fpns[0]) 113 | if cur_fpn.size(0) != x.size(0): 114 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 115 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 116 | x = self.lay3(x) 117 | x = self.gn3(x) 118 | x = F.relu(x) 119 | 120 | cur_fpn = self.adapter2(fpns[1]) 121 | if cur_fpn.size(0) != x.size(0): 122 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 123 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 124 | x = self.lay4(x) 125 | x = self.gn4(x) 126 | x = F.relu(x) 127 | 128 | cur_fpn = self.adapter3(fpns[2]) 129 | if cur_fpn.size(0) != x.size(0): 130 | cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) 131 | x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") 132 | x = self.lay5(x) 133 | x = self.gn5(x) 134 | x = F.relu(x) 135 | 136 | x = self.out_lay(x) 137 | return x 138 | 139 | 140 | class MHAttentionMap(nn.Module): 141 | """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" 142 | 143 | def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): 144 | super().__init__() 145 | self.num_heads = num_heads 146 | self.hidden_dim = hidden_dim 147 | self.dropout = nn.Dropout(dropout) 148 | 149 | self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) 150 | self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) 151 | 152 | nn.init.zeros_(self.k_linear.bias) 153 | nn.init.zeros_(self.q_linear.bias) 154 | nn.init.xavier_uniform_(self.k_linear.weight) 155 | nn.init.xavier_uniform_(self.q_linear.weight) 156 | self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 157 | 158 | def forward(self, q, k, mask: Optional[Tensor] = None): 159 | q = self.q_linear(q) 160 | k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) 161 | qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) 162 | kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) 163 | weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) 164 | 165 | if mask is not None: 166 | weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) 167 | weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights) 168 | weights = self.dropout(weights) 169 | return weights 170 | 171 | 172 | def dice_loss(inputs, targets, num_boxes): 173 | """ 174 | Compute the DICE loss, similar to generalized IOU for masks 175 | Args: 176 | inputs: A float tensor of arbitrary shape. 177 | The predictions for each example. 178 | targets: A float tensor with the same shape as inputs. Stores the binary 179 | classification label for each element in inputs 180 | (0 for the negative class and 1 for the positive class). 181 | """ 182 | inputs = inputs.sigmoid() 183 | inputs = inputs.flatten(1) 184 | numerator = 2 * (inputs * targets).sum(1) 185 | denominator = inputs.sum(-1) + targets.sum(-1) 186 | loss = 1 - (numerator + 1) / (denominator + 1) 187 | return loss.sum() / num_boxes 188 | 189 | 190 | def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): 191 | """ 192 | Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. 193 | Args: 194 | inputs: A float tensor of arbitrary shape. 195 | The predictions for each example. 196 | targets: A float tensor with the same shape as inputs. Stores the binary 197 | classification label for each element in inputs 198 | (0 for the negative class and 1 for the positive class). 199 | alpha: (optional) Weighting factor in range (0,1) to balance 200 | positive vs negative examples. Default = -1 (no weighting). 201 | gamma: Exponent of the modulating factor (1 - p_t) to 202 | balance easy vs hard examples. 203 | Returns: 204 | Loss tensor 205 | """ 206 | prob = inputs.sigmoid() 207 | ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") 208 | p_t = prob * targets + (1 - prob) * (1 - targets) 209 | loss = ce_loss * ((1 - p_t) ** gamma) 210 | 211 | if alpha >= 0: 212 | alpha_t = alpha * targets + (1 - alpha) * (1 - targets) 213 | loss = alpha_t * loss 214 | 215 | return loss.mean(1).sum() / num_boxes 216 | 217 | 218 | class PostProcessSegm(nn.Module): 219 | def __init__(self, threshold=0.5): 220 | super().__init__() 221 | self.threshold = threshold 222 | 223 | @torch.no_grad() 224 | def forward(self, results, outputs, orig_target_sizes, max_target_sizes): 225 | assert len(orig_target_sizes) == len(max_target_sizes) 226 | max_h, max_w = max_target_sizes.max(0)[0].tolist() 227 | outputs_masks = outputs["pred_masks"].squeeze(2) 228 | outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) 229 | outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() 230 | 231 | for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): 232 | img_h, img_w = t[0], t[1] 233 | results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) 234 | results[i]["masks"] = F.interpolate( 235 | results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" 236 | ).byte() 237 | 238 | return results 239 | 240 | 241 | class PostProcessPanoptic(nn.Module): 242 | """This class converts the output of the model to the final panoptic result, in the format expected by the 243 | coco panoptic API """ 244 | 245 | def __init__(self, is_thing_map, threshold=0.85): 246 | """ 247 | Parameters: 248 | is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether 249 | the class is a thing (True) or a stuff (False) class 250 | threshold: confidence threshold: segments with confidence lower than this will be deleted 251 | """ 252 | super().__init__() 253 | self.threshold = threshold 254 | self.is_thing_map = is_thing_map 255 | 256 | def forward(self, outputs, processed_sizes, target_sizes=None): 257 | """ This function computes the panoptic prediction from the model's predictions. 258 | Parameters: 259 | outputs: This is a dict coming directly from the model. See the model doc for the content. 260 | processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the 261 | model, ie the size after data augmentation but before batching. 262 | target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size 263 | of each prediction. If left to None, it will default to the processed_sizes 264 | """ 265 | if target_sizes is None: 266 | target_sizes = processed_sizes 267 | assert len(processed_sizes) == len(target_sizes) 268 | out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] 269 | assert len(out_logits) == len(raw_masks) == len(target_sizes) 270 | preds = [] 271 | 272 | def to_tuple(tup): 273 | if isinstance(tup, tuple): 274 | return tup 275 | return tuple(tup.cpu().tolist()) 276 | 277 | for cur_logits, cur_masks, cur_boxes, size, target_size in zip( 278 | out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes 279 | ): 280 | # we filter empty queries and detection below threshold 281 | scores, labels = cur_logits.softmax(-1).max(-1) 282 | keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) 283 | cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) 284 | cur_scores = cur_scores[keep] 285 | cur_classes = cur_classes[keep] 286 | cur_masks = cur_masks[keep] 287 | cur_masks = interpolate(cur_masks[None], to_tuple(size), mode="bilinear").squeeze(0) 288 | cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) 289 | 290 | h, w = cur_masks.shape[-2:] 291 | assert len(cur_boxes) == len(cur_classes) 292 | 293 | # It may be that we have several predicted masks for the same stuff class. 294 | # In the following, we track the list of masks ids for each stuff class (they are merged later on) 295 | cur_masks = cur_masks.flatten(1) 296 | stuff_equiv_classes = defaultdict(lambda: []) 297 | for k, label in enumerate(cur_classes): 298 | if not self.is_thing_map[label.item()]: 299 | stuff_equiv_classes[label.item()].append(k) 300 | 301 | def get_ids_area(masks, scores, dedup=False): 302 | # This helper function creates the final panoptic segmentation image 303 | # It also returns the area of the masks that appears on the image 304 | 305 | m_id = masks.transpose(0, 1).softmax(-1) 306 | 307 | if m_id.shape[-1] == 0: 308 | # We didn't detect any mask :( 309 | m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) 310 | else: 311 | m_id = m_id.argmax(-1).view(h, w) 312 | 313 | if dedup: 314 | # Merge the masks corresponding to the same stuff class 315 | for equiv in stuff_equiv_classes.values(): 316 | if len(equiv) > 1: 317 | for eq_id in equiv: 318 | m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) 319 | 320 | final_h, final_w = to_tuple(target_size) 321 | 322 | seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) 323 | seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) 324 | 325 | np_seg_img = ( 326 | torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() 327 | ) 328 | m_id = torch.from_numpy(rgb2id(np_seg_img)) 329 | 330 | area = [] 331 | for i in range(len(scores)): 332 | area.append(m_id.eq(i).sum().item()) 333 | return area, seg_img 334 | 335 | area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) 336 | if cur_classes.numel() > 0: 337 | # We know filter empty masks as long as we find some 338 | while True: 339 | filtered_small = torch.as_tensor( 340 | [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device 341 | ) 342 | if filtered_small.any().item(): 343 | cur_scores = cur_scores[~filtered_small] 344 | cur_classes = cur_classes[~filtered_small] 345 | cur_masks = cur_masks[~filtered_small] 346 | area, seg_img = get_ids_area(cur_masks, cur_scores) 347 | else: 348 | break 349 | 350 | else: 351 | cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) 352 | 353 | segments_info = [] 354 | for i, a in enumerate(area): 355 | cat = cur_classes[i].item() 356 | segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) 357 | del cur_classes 358 | 359 | with io.BytesIO() as out: 360 | seg_img.save(out, format="PNG") 361 | predictions = {"png_string": out.getvalue(), "segments_info": segments_info} 362 | preds.append(predictions) 363 | return preds 364 | -------------------------------------------------------------------------------- /models/detr.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 | """ 3 | DETR model and criterion classes. 4 | """ 5 | import torch 6 | import torch.nn.functional as F 7 | from torch import nn 8 | 9 | from util import box_ops 10 | from util.misc import (NestedTensor, nested_tensor_from_tensor_list, 11 | accuracy, get_world_size, interpolate, 12 | is_dist_avail_and_initialized) 13 | 14 | from .backbone import build_backbone 15 | from .matcher import build_matcher 16 | from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, 17 | dice_loss, sigmoid_focal_loss) 18 | from .transformer import build_transformer 19 | 20 | 21 | class DETR(nn.Module): 22 | """ This is the DETR module that performs object detection """ 23 | def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False): 24 | """ Initializes the model. 25 | Parameters: 26 | backbone: torch module of the backbone to be used. See backbone.py 27 | transformer: torch module of the transformer architecture. See transformer.py 28 | num_classes: number of object classes 29 | num_queries: number of object queries, ie detection slot. This is the maximal number of objects 30 | DETR can detect in a single image. For COCO, we recommend 100 queries. 31 | aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. 32 | """ 33 | super().__init__() 34 | self.num_queries = num_queries 35 | self.transformer = transformer 36 | hidden_dim = transformer.d_model 37 | self.class_embed = nn.Linear(hidden_dim, num_classes + 1) 38 | self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) 39 | self.query_embed = nn.Embedding(num_queries, hidden_dim) 40 | self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) 41 | self.backbone = backbone 42 | self.aux_loss = aux_loss 43 | 44 | def forward(self, samples: NestedTensor, sample_ratio=None): 45 | """ The forward expects a NestedTensor, which consists of: 46 | - samples.tensor: batched images, of shape [batch_size x 3 x H x W] 47 | - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels 48 | 49 | It returns a dict with the following elements: 50 | - "pred_logits": the classification logits (including no-object) for all queries. 51 | Shape= [batch_size x num_queries x (num_classes + 1)] 52 | - "pred_boxes": The normalized boxes coordinates for all queries, represented as 53 | (center_x, center_y, height, width). These values are normalized in [0, 1], 54 | relative to the size of each individual image (disregarding possible padding). 55 | See PostProcess for information on how to retrieve the unnormalized bounding box. 56 | - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of 57 | dictionnaries containing the two above keys for each decoder layer. 58 | """ 59 | if not isinstance(samples, NestedTensor): 60 | samples = nested_tensor_from_tensor_list(samples) 61 | features, pos = self.backbone(samples) 62 | 63 | src, mask = features[-1].decompose() 64 | assert mask is not None 65 | hs, sample_reg_loss = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1],sample_ratio) 66 | 67 | outputs_class = self.class_embed(hs) 68 | outputs_coord = self.bbox_embed(hs).sigmoid() 69 | out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} 70 | if self.aux_loss: 71 | out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) 72 | out['sample_reg_loss'] = sample_reg_loss 73 | return out 74 | 75 | @torch.jit.unused 76 | def _set_aux_loss(self, outputs_class, outputs_coord): 77 | # this is a workaround to make torchscript happy, as torchscript 78 | # doesn't support dictionary with non-homogeneous values, such 79 | # as a dict having both a Tensor and a list. 80 | return [{'pred_logits': a, 'pred_boxes': b} 81 | for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] 82 | 83 | 84 | class SetCriterion(nn.Module): 85 | """ This class computes the loss for DETR. 86 | The process happens in two steps: 87 | 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 88 | 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) 89 | """ 90 | def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): 91 | """ Create the criterion. 92 | Parameters: 93 | num_classes: number of object categories, omitting the special no-object category 94 | matcher: module able to compute a matching between targets and proposals 95 | weight_dict: dict containing as key the names of the losses and as values their relative weight. 96 | eos_coef: relative classification weight applied to the no-object category 97 | losses: list of all the losses to be applied. See get_loss for list of available losses. 98 | """ 99 | super().__init__() 100 | self.num_classes = num_classes 101 | self.matcher = matcher 102 | self.weight_dict = weight_dict 103 | self.eos_coef = eos_coef 104 | self.losses = losses 105 | empty_weight = torch.ones(self.num_classes + 1) 106 | empty_weight[-1] = self.eos_coef 107 | self.register_buffer('empty_weight', empty_weight) 108 | 109 | def loss_labels(self, outputs, targets, indices, num_boxes, log=True): 110 | """Classification loss (NLL) 111 | targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] 112 | """ 113 | assert 'pred_logits' in outputs 114 | src_logits = outputs['pred_logits'] 115 | 116 | idx = self._get_src_permutation_idx(indices) 117 | target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) 118 | target_classes = torch.full(src_logits.shape[:2], self.num_classes, 119 | dtype=torch.int64, device=src_logits.device) 120 | target_classes[idx] = target_classes_o 121 | 122 | loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) 123 | losses = {'loss_ce': loss_ce} 124 | 125 | if log: 126 | # TODO this should probably be a separate loss, not hacked in this one here 127 | losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] 128 | return losses 129 | 130 | @torch.no_grad() 131 | def loss_cardinality(self, outputs, targets, indices, num_boxes): 132 | """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes 133 | This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients 134 | """ 135 | pred_logits = outputs['pred_logits'] 136 | device = pred_logits.device 137 | tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) 138 | # Count the number of predictions that are NOT "no-object" (which is the last class) 139 | card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) 140 | card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) 141 | losses = {'cardinality_error': card_err} 142 | return losses 143 | 144 | def loss_boxes(self, outputs, targets, indices, num_boxes): 145 | """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss 146 | targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] 147 | The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. 148 | """ 149 | assert 'pred_boxes' in outputs 150 | idx = self._get_src_permutation_idx(indices) 151 | src_boxes = outputs['pred_boxes'][idx] 152 | target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) 153 | 154 | loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') 155 | 156 | losses = {} 157 | losses['loss_bbox'] = loss_bbox.sum() / num_boxes 158 | 159 | loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( 160 | box_ops.box_cxcywh_to_xyxy(src_boxes), 161 | box_ops.box_cxcywh_to_xyxy(target_boxes))) 162 | losses['loss_giou'] = loss_giou.sum() / num_boxes 163 | return losses 164 | 165 | def loss_masks(self, outputs, targets, indices, num_boxes): 166 | """Compute the losses related to the masks: the focal loss and the dice loss. 167 | targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] 168 | """ 169 | assert "pred_masks" in outputs 170 | 171 | src_idx = self._get_src_permutation_idx(indices) 172 | tgt_idx = self._get_tgt_permutation_idx(indices) 173 | 174 | src_masks = outputs["pred_masks"] 175 | 176 | # TODO use valid to mask invalid areas due to padding in loss 177 | target_masks, valid = nested_tensor_from_tensor_list([t["masks"] for t in targets]).decompose() 178 | target_masks = target_masks.to(src_masks) 179 | 180 | src_masks = src_masks[src_idx] 181 | # upsample predictions to the target size 182 | src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], 183 | mode="bilinear", align_corners=False) 184 | src_masks = src_masks[:, 0].flatten(1) 185 | 186 | target_masks = target_masks[tgt_idx].flatten(1) 187 | 188 | losses = { 189 | "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), 190 | "loss_dice": dice_loss(src_masks, target_masks, num_boxes), 191 | } 192 | return losses 193 | 194 | def _get_src_permutation_idx(self, indices): 195 | # permute predictions following indices 196 | batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) 197 | src_idx = torch.cat([src for (src, _) in indices]) 198 | return batch_idx, src_idx 199 | 200 | def _get_tgt_permutation_idx(self, indices): 201 | # permute targets following indices 202 | batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) 203 | tgt_idx = torch.cat([tgt for (_, tgt) in indices]) 204 | return batch_idx, tgt_idx 205 | 206 | def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): 207 | loss_map = { 208 | 'labels': self.loss_labels, 209 | 'cardinality': self.loss_cardinality, 210 | 'boxes': self.loss_boxes, 211 | 'masks': self.loss_masks 212 | } 213 | assert loss in loss_map, f'do you really want to compute {loss} loss?' 214 | return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) 215 | 216 | def forward(self, outputs, targets): 217 | """ This performs the loss computation. 218 | Parameters: 219 | outputs: dict of tensors, see the output specification of the model for the format 220 | targets: list of dicts, such that len(targets) == batch_size. 221 | The expected keys in each dict depends on the losses applied, see each loss' doc 222 | """ 223 | outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} 224 | 225 | # Retrieve the matching between the outputs of the last layer and the targets 226 | indices = self.matcher(outputs_without_aux, targets) 227 | 228 | # Compute the average number of target boxes accross all nodes, for normalization purposes 229 | num_boxes = sum(len(t["labels"]) for t in targets) 230 | num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) 231 | if is_dist_avail_and_initialized(): 232 | torch.distributed.all_reduce(num_boxes) 233 | num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() 234 | 235 | # Compute all the requested losses 236 | losses = {} 237 | for loss in self.losses: 238 | losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) 239 | 240 | # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. 241 | if 'aux_outputs' in outputs: 242 | for i, aux_outputs in enumerate(outputs['aux_outputs']): 243 | indices = self.matcher(aux_outputs, targets) 244 | for loss in self.losses: 245 | if loss == 'masks': 246 | # Intermediate masks losses are too costly to compute, we ignore them. 247 | continue 248 | kwargs = {} 249 | if loss == 'labels': 250 | # Logging is enabled only for the last layer 251 | kwargs = {'log': False} 252 | l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) 253 | l_dict = {k + f'_{i}': v for k, v in l_dict.items()} 254 | losses.update(l_dict) 255 | 256 | return losses 257 | 258 | 259 | class PostProcess(nn.Module): 260 | """ This module converts the model's output into the format expected by the coco api""" 261 | @torch.no_grad() 262 | def forward(self, outputs, target_sizes): 263 | """ Perform the computation 264 | Parameters: 265 | outputs: raw outputs of the model 266 | target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch 267 | For evaluation, this must be the original image size (before any data augmentation) 268 | For visualization, this should be the image size after data augment, but before padding 269 | """ 270 | out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] 271 | 272 | assert len(out_logits) == len(target_sizes) 273 | assert target_sizes.shape[1] == 2 274 | 275 | prob = F.softmax(out_logits, -1) 276 | scores, labels = prob[..., :-1].max(-1) 277 | 278 | # convert to [x0, y0, x1, y1] format 279 | boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) 280 | # and from relative [0, 1] to absolute [0, height] coordinates 281 | img_h, img_w = target_sizes.unbind(1) 282 | scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) 283 | boxes = boxes * scale_fct[:, None, :] 284 | 285 | results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] 286 | 287 | return results 288 | 289 | 290 | class MLP(nn.Module): 291 | """ Very simple multi-layer perceptron (also called FFN)""" 292 | 293 | def __init__(self, input_dim, hidden_dim, output_dim, num_layers): 294 | super().__init__() 295 | self.num_layers = num_layers 296 | h = [hidden_dim] * (num_layers - 1) 297 | self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) 298 | 299 | def forward(self, x): 300 | for i, layer in enumerate(self.layers): 301 | x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) 302 | return x 303 | 304 | 305 | def build(args): 306 | num_classes = 20 if args.dataset_file != 'coco' else 91 307 | if args.dataset_file == "coco_panoptic": 308 | num_classes = 250 309 | device = torch.device(args.device) 310 | 311 | backbone = build_backbone(args) 312 | 313 | transformer = build_transformer(args) 314 | 315 | model = DETR( 316 | backbone, 317 | transformer, 318 | num_classes=num_classes, 319 | num_queries=args.num_queries, 320 | aux_loss=args.aux_loss, 321 | ) 322 | if args.masks: 323 | model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) 324 | matcher = build_matcher(args) 325 | weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} 326 | weight_dict['loss_giou'] = args.giou_loss_coef 327 | if args.masks: 328 | weight_dict["loss_mask"] = args.mask_loss_coef 329 | weight_dict["loss_dice"] = args.dice_loss_coef 330 | # TODO this is a hack 331 | if args.aux_loss: 332 | aux_weight_dict = {} 333 | for i in range(args.dec_layers - 1): 334 | aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) 335 | weight_dict.update(aux_weight_dict) 336 | 337 | losses = ['labels', 'boxes', 'cardinality'] 338 | if args.masks: 339 | losses += ["masks"] 340 | criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, 341 | eos_coef=args.eos_coef, losses=losses) 342 | criterion.to(device) 343 | postprocessors = {'bbox': PostProcess()} 344 | if args.masks: 345 | postprocessors['segm'] = PostProcessSegm() 346 | if args.dataset_file == "coco_panoptic": 347 | is_thing_map = {i: i <= 90 for i in range(201)} 348 | postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) 349 | 350 | return model, criterion, postprocessors 351 | --------------------------------------------------------------------------------