├── lib
├── __init__.py
├── mmcv_custom
│ └── __init__.py
├── _utils.py
├── loss_functions.py
├── segmentation.py
└── mask_predictor.py
├── refer
├── external
│ ├── __init__.py
│ ├── README.md
│ ├── maskApi.h
│ ├── mask.py
│ ├── maskApi.c
│ └── _mask.pyx
├── evaluation
│ ├── bleu
│ │ ├── __init__.py
│ │ ├── LICENSE
│ │ ├── bleu.py
│ │ └── bleu_scorer.py
│ ├── cider
│ │ ├── __init__.py
│ │ ├── cider.py
│ │ └── cider_scorer.py
│ ├── meteor
│ │ ├── __init__.py
│ │ └── meteor.py
│ ├── rouge
│ │ ├── __init__.py
│ │ └── rouge.py
│ ├── tokenizer
│ │ ├── __init__.py
│ │ ├── stanford-corenlp-3.4.1.jar
│ │ └── ptbtokenizer.py
│ ├── __init__.py
│ ├── readme.txt
│ └── refEvaluation.py
├── .DS_Store
├── Makefile
├── setup.py
└── LICENSE
├── bert
├── requirements.txt
├── __init__.py
├── CONTRIBUTING.md
├── activations.py
├── optimization_test.py
├── sample_text.txt
├── tokenization_test.py
├── optimization.py
├── configuration_bert.py
├── modeling_test.py
├── LICENSE
├── multilingual.md
└── run_classifier_with_tfhub.py
├── .gitignore
├── test.sh
├── train.sh
├── args.py
├── README.md
├── transforms.py
├── test.py
├── data
├── dataset.py
└── dataset_zom.py
└── utils.py
/lib/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/refer/external/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/bleu/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/cider/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/meteor/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tylin'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/rouge/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'vrama91'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/tokenizer/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'hfang'
2 |
--------------------------------------------------------------------------------
/refer/evaluation/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'licheng'
2 |
3 |
4 |
--------------------------------------------------------------------------------
/refer/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/toggle1995/RIS-DMMI/HEAD/refer/.DS_Store
--------------------------------------------------------------------------------
/refer/external/README.md:
--------------------------------------------------------------------------------
1 | The codes inside this folder are copied from pycocotools: https://github.com/pdollar/coco
--------------------------------------------------------------------------------
/bert/requirements.txt:
--------------------------------------------------------------------------------
1 | tensorflow >= 1.11.0 # CPU Version of TensorFlow.
2 | # tensorflow-gpu >= 1.11.0 # GPU version of TensorFlow.
3 |
--------------------------------------------------------------------------------
/lib/mmcv_custom/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from .checkpoint import load_checkpoint
4 |
5 | __all__ = ['load_checkpoint']
6 |
--------------------------------------------------------------------------------
/refer/evaluation/tokenizer/stanford-corenlp-3.4.1.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/toggle1995/RIS-DMMI/HEAD/refer/evaluation/tokenizer/stanford-corenlp-3.4.1.jar
--------------------------------------------------------------------------------
/refer/Makefile:
--------------------------------------------------------------------------------
1 | all:
2 | # install pycocotools/mask locally
3 | # copy from https://github.com/pdollar/coco.git
4 | python setup.py build_ext --inplace
5 | rm -rf build
6 |
7 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode
2 | .idea
3 | lib/__pycache__/*
4 | *.pkl
5 | *.json
6 | models/*
7 | bert/pytorch_model.bin
8 | *.jpg
9 | *.pth
10 | *.png
11 | vis*
12 | *.zip
13 | *.csv
14 | *.pyc
15 | *.so
16 | *.json
17 | *.p
--------------------------------------------------------------------------------
/test.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python test.py --model dmmi_swin --swin_type base --dataset ref-zom --splitBy final --split test --test_parameter /path_to/trained_parameter --workers 0 --ddp_trained_weights --window12 --img_size 480
--------------------------------------------------------------------------------
/refer/evaluation/readme.txt:
--------------------------------------------------------------------------------
1 | This folder contains modified coco-caption evaluation, which is downloaded from https://github.com/tylin/coco-caption.git
2 | and refEvaluation which is to be called by the refer algorithm.
3 |
4 | More specifically, this folder contains:
5 | 1. bleu/
6 | 2. cider/
7 | 3. meteor/
8 | 4. rouge/
9 | 5. tokenizer/
10 | 6. __init__.py
11 | 7. refEvaluation.py
12 |
--------------------------------------------------------------------------------
/train.sh:
--------------------------------------------------------------------------------
1 | CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --nproc_per_node 1 --master_port 99999 train.py --model dmmi_swin --dataset ref-zom --splitBy final --split test \
2 | --model_id 1234 --batch-size 2 --lr 0.00005 --wd 1e-2 --window12 --swin_type base --pretrained_backbone /path_to/swin_base_patch4_window12_384_22k.pth \
3 | --output_dir '/path_to/output_dir' --epochs 40 --img_size 480 2>&1 |tee /path_to/output_dir/output.log
--------------------------------------------------------------------------------
/bert/__init__.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 |
16 |
--------------------------------------------------------------------------------
/refer/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from Cython.Build import cythonize
3 | from distutils.extension import Extension
4 | import numpy as np
5 |
6 | ext_modules = [
7 | Extension(
8 | 'external._mask',
9 | sources=['external/maskApi.c', 'external/_mask.pyx'],
10 | include_dirs = [np.get_include(), 'external'],
11 | extra_compile_args=['-Wno-cpp', '-Wno-unused-function', '-std=c99'],
12 | )
13 | ]
14 |
15 | setup(
16 | name='external',
17 | packages=['external'],
18 | package_dir = {'external': 'external'},
19 | version='2.0',
20 | ext_modules=cythonize(ext_modules)
21 | )
22 |
--------------------------------------------------------------------------------
/refer/evaluation/bleu/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2015 Xinlei Chen, Hao Fang, Tsung-Yi Lin, and Ramakrishna Vedantam
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 | THE SOFTWARE.
20 |
--------------------------------------------------------------------------------
/lib/_utils.py:
--------------------------------------------------------------------------------
1 | import pdb
2 | from torch import nn
3 | from torch.nn import functional as F
4 | from .loss_functions import Contrastive_Loss, Cosine_Sim_Loss
5 |
6 | class _DMMI_Framework(nn.Module):
7 | def __init__(self, backbone, classifier):
8 | super(_DMMI_Framework, self).__init__()
9 | self.backbone = backbone
10 | self.classifier = classifier
11 | self.cossim = Cosine_Sim_Loss()
12 | self.contrastive = Contrastive_Loss()
13 |
14 | def forward(self, x, l_feats, l_feats1, l_mask, target_flag=None, training_flag=True):
15 |
16 | input_shape = x.shape[-2:]
17 | l_1, features = self.backbone(x, l_feats, l_mask)
18 | x_c1, x_c2, x_c3, x_c4 = features
19 | de_feat, l_2, x = self.classifier(l_1, l_feats1, x_c4, x_c3, x_c2, x_c1)
20 | seg_mag = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=True)
21 |
22 | if training_flag and target_flag!=None:
23 | loss_contrastive = self.contrastive(de_feat, l_1, target_flag)
24 | loss_cossim = self.cossim(l_1, l_2, l_mask, target_flag)
25 | else:
26 | loss_contrastive = 0
27 | loss_cossim = 0
28 |
29 | return loss_contrastive, loss_cossim, seg_mag
30 |
31 | class DMMI(_DMMI_Framework):
32 | pass
33 |
--------------------------------------------------------------------------------
/bert/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | BERT needs to maintain permanent compatibility with the pre-trained model files,
4 | so we do not plan to make any major changes to this library (other than what was
5 | promised in the README). However, we can accept small patches related to
6 | re-factoring and documentation. To submit contributes, there are just a few
7 | small guidelines you need to follow.
8 |
9 | ## Contributor License Agreement
10 |
11 | Contributions to this project must be accompanied by a Contributor License
12 | Agreement. You (or your employer) retain the copyright to your contribution;
13 | this simply gives us permission to use and redistribute your contributions as
14 | part of the project. Head over to to see
15 | your current agreements on file or to sign a new one.
16 |
17 | You generally only need to submit a CLA once, so if you've already submitted one
18 | (even if it was for a different project), you probably don't need to do it
19 | again.
20 |
21 | ## Code reviews
22 |
23 | All submissions, including submissions by project members, require review. We
24 | use GitHub pull requests for this purpose. Consult
25 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
26 | information on using pull requests.
27 |
28 | ## Community Guidelines
29 |
30 | This project follows
31 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/).
32 |
--------------------------------------------------------------------------------
/refer/evaluation/bleu/bleu.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # File Name : bleu.py
4 | #
5 | # Description : Wrapper for BLEU scorer.
6 | #
7 | # Creation Date : 06-01-2015
8 | # Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
9 | # Authors : Hao Fang and Tsung-Yi Lin
10 |
11 | from bleu_scorer import BleuScorer
12 |
13 |
14 | class Bleu:
15 | def __init__(self, n=4):
16 | # default compute Blue score up to 4
17 | self._n = n
18 | self._hypo_for_image = {}
19 | self.ref_for_image = {}
20 |
21 | def compute_score(self, gts, res):
22 |
23 | assert(gts.keys() == res.keys())
24 | imgIds = gts.keys()
25 |
26 | bleu_scorer = BleuScorer(n=self._n)
27 | for id in imgIds:
28 | hypo = res[id]
29 | ref = gts[id]
30 |
31 | # Sanity check.
32 | assert(type(hypo) is list)
33 | assert(len(hypo) == 1)
34 | assert(type(ref) is list)
35 | assert(len(ref) >= 1)
36 |
37 | bleu_scorer += (hypo[0], ref)
38 |
39 | #score, scores = bleu_scorer.compute_score(option='shortest')
40 | score, scores = bleu_scorer.compute_score(option='closest', verbose=1)
41 | #score, scores = bleu_scorer.compute_score(option='average', verbose=1)
42 |
43 | # return (bleu, bleu_info)
44 | return score, scores
45 |
46 | def method(self):
47 | return "Bleu"
48 |
--------------------------------------------------------------------------------
/bert/activations.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import math
3 |
4 | import torch
5 | import torch.nn.functional as F
6 |
7 |
8 | logger = logging.getLogger(__name__)
9 |
10 |
11 | def swish(x):
12 | return x * torch.sigmoid(x)
13 |
14 |
15 | def _gelu_python(x):
16 | """ Original Implementation of the gelu activation function in Google Bert repo when initially created.
17 | For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
18 | 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
19 | This is now written in C in torch.nn.functional
20 | Also see https://arxiv.org/abs/1606.08415
21 | """
22 | return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
23 |
24 |
25 | def gelu_new(x):
26 | """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
27 | Also see https://arxiv.org/abs/1606.08415
28 | """
29 | return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
30 |
31 |
32 | if torch.__version__ < "1.4.0":
33 | gelu = _gelu_python
34 | else:
35 | gelu = F.gelu
36 |
37 |
38 | def gelu_fast(x):
39 | return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
40 |
41 |
42 | ACT2FN = {
43 | "relu": F.relu,
44 | "swish": swish,
45 | "gelu": gelu,
46 | "tanh": torch.tanh,
47 | "gelu_new": gelu_new,
48 | "gelu_fast": gelu_fast,
49 | }
50 |
51 |
52 | def get_activation(activation_string):
53 | if activation_string in ACT2FN:
54 | return ACT2FN[activation_string]
55 | else:
56 | raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
57 |
--------------------------------------------------------------------------------
/refer/evaluation/cider/cider.py:
--------------------------------------------------------------------------------
1 | # Filename: cider.py
2 | #
3 | # Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric
4 | # by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
5 | #
6 | # Creation Date: Sun Feb 8 14:16:54 2015
7 | #
8 | # Authors: Ramakrishna Vedantam and Tsung-Yi Lin
9 |
10 | from cider_scorer import CiderScorer
11 | import pdb
12 |
13 | class Cider:
14 | """
15 | Main Class to compute the CIDEr metric
16 |
17 | """
18 | def __init__(self, test=None, refs=None, n=4, sigma=6.0):
19 | # set cider to sum over 1 to 4-grams
20 | self._n = n
21 | # set the standard deviation parameter for gaussian penalty
22 | self._sigma = sigma
23 |
24 | def compute_score(self, gts, res):
25 | """
26 | Main function to compute CIDEr score
27 | :param hypo_for_image (dict) : dictionary with key and value
28 | ref_for_image (dict) : dictionary with key and value
29 | :return: cider (float) : computed CIDEr score for the corpus
30 | """
31 |
32 | assert(gts.keys() == res.keys())
33 | imgIds = gts.keys()
34 |
35 | cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
36 |
37 | for id in imgIds:
38 | hypo = res[id]
39 | ref = gts[id]
40 |
41 | # Sanity check.
42 | assert(type(hypo) is list)
43 | assert(len(hypo) == 1)
44 | assert(type(ref) is list)
45 | assert(len(ref) > 0)
46 |
47 | cider_scorer += (hypo[0], ref)
48 |
49 | (score, scores) = cider_scorer.compute_score()
50 |
51 | return score, scores
52 |
53 | def method(self):
54 | return "CIDEr"
--------------------------------------------------------------------------------
/bert/optimization_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import optimization
20 | import tensorflow as tf
21 |
22 |
23 | class OptimizationTest(tf.test.TestCase):
24 |
25 | def test_adam(self):
26 | with self.test_session() as sess:
27 | w = tf.get_variable(
28 | "w",
29 | shape=[3],
30 | initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
31 | x = tf.constant([0.4, 0.2, -0.5])
32 | loss = tf.reduce_mean(tf.square(x - w))
33 | tvars = tf.trainable_variables()
34 | grads = tf.gradients(loss, tvars)
35 | global_step = tf.train.get_or_create_global_step()
36 | optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
37 | train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
38 | init_op = tf.group(tf.global_variables_initializer(),
39 | tf.local_variables_initializer())
40 | sess.run(init_op)
41 | for _ in range(100):
42 | sess.run(train_op)
43 | w_np = sess.run(w)
44 | self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
45 |
46 |
47 | if __name__ == "__main__":
48 | tf.test.main()
49 |
--------------------------------------------------------------------------------
/refer/external/maskApi.h:
--------------------------------------------------------------------------------
1 | /**************************************************************************
2 | * Microsoft COCO Toolbox. version 2.0
3 | * Data, paper, and tutorials available at: http://mscoco.org/
4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
5 | * Licensed under the Simplified BSD License [see coco/license.txt]
6 | **************************************************************************/
7 | #pragma once
8 |
9 | typedef unsigned int uint;
10 | typedef unsigned long siz;
11 | typedef unsigned char byte;
12 | typedef double* BB;
13 | typedef struct { siz h, w, m; uint *cnts; } RLE;
14 |
15 | /* Initialize/destroy RLE. */
16 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts );
17 | void rleFree( RLE *R );
18 |
19 | /* Initialize/destroy RLE array. */
20 | void rlesInit( RLE **R, siz n );
21 | void rlesFree( RLE **R, siz n );
22 |
23 | /* Encode binary masks using RLE. */
24 | void rleEncode( RLE *R, const byte *mask, siz h, siz w, siz n );
25 |
26 | /* Decode binary masks encoded via RLE. */
27 | void rleDecode( const RLE *R, byte *mask, siz n );
28 |
29 | /* Compute union or intersection of encoded masks. */
30 | void rleMerge( const RLE *R, RLE *M, siz n, int intersect );
31 |
32 | /* Compute area of encoded masks. */
33 | void rleArea( const RLE *R, siz n, uint *a );
34 |
35 | /* Compute intersection over union between masks. */
36 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o );
37 |
38 | /* Compute non-maximum suppression between bounding masks */
39 | void rleNms( RLE *dt, siz n, uint *keep, double thr );
40 |
41 | /* Compute intersection over union between bounding boxes. */
42 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o );
43 |
44 | /* Compute non-maximum suppression between bounding boxes */
45 | void bbNms( BB dt, siz n, uint *keep, double thr );
46 |
47 | /* Get bounding boxes surrounding encoded masks. */
48 | void rleToBbox( const RLE *R, BB bb, siz n );
49 |
50 | /* Convert bounding boxes to encoded masks. */
51 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n );
52 |
53 | /* Convert polygon to encoded mask. */
54 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w );
55 |
56 | /* Get compressed string representation of encoded mask. */
57 | char* rleToString( const RLE *R );
58 |
59 | /* Convert from compressed string representation of encoded mask. */
60 | void rleFrString( RLE *R, char *s, siz h, siz w );
61 |
--------------------------------------------------------------------------------
/lib/loss_functions.py:
--------------------------------------------------------------------------------
1 | import pdb
2 | import torch
3 | import torch.nn as nn
4 | import torch.nn.functional as F
5 | import numpy as np
6 |
7 |
8 | class Contrastive_Loss(nn.Module):
9 | """Triplet loss with hard positive/negative mining.
10 |
11 | """
12 | def __init__(self, temperature=0.05):
13 | super(Contrastive_Loss, self).__init__()
14 | self.temperature = temperature
15 | self.adpool = nn.AdaptiveAvgPool2d((1, 1))
16 | self.align_lan = nn.Sequential(
17 | nn.Conv1d(768, 768, kernel_size=1, stride=1),
18 | )
19 |
20 |
21 | def forward(self, vis_feature, lan_feature, target_flag):
22 | """
23 | """
24 | vis_feature1 = F.normalize(vis_feature, dim=1)
25 | lan_feature1 = self.align_lan(lan_feature)
26 | lan_feature1 = self.adpool(lan_feature1.unsqueeze(3)).view(lan_feature.shape[0], lan_feature.shape[1])
27 | lan_feature1 = F.normalize(lan_feature1, dim=1)
28 |
29 | img_text_logits = torch.matmul(vis_feature1, lan_feature1.permute(1, 0)) / self.temperature
30 | text_img_logits = img_text_logits.permute(1, 0)
31 | labels = torch.arange(0, len(lan_feature)).cuda()
32 | loss_a = nn.functional.cross_entropy(img_text_logits, labels, reduce=False)
33 | loss_b = nn.functional.cross_entropy(text_img_logits, labels, reduce=False)
34 | loss_a = torch.mean(loss_a * target_flag)
35 | loss_b = torch.mean(loss_b * target_flag)
36 |
37 | loss_con = loss_a + loss_b
38 | return loss_con
39 |
40 | class Cosine_Sim_Loss(nn.Module):
41 | """cosine similarity function.
42 | """
43 |
44 | def __init__(self):
45 | super(Cosine_Sim_Loss, self).__init__()
46 | self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
47 |
48 | def forward(self, lan1, lan2, mask_full, target_flag):
49 | """
50 | """
51 | maskf1 = mask_full.permute(0, 2, 1)
52 | target_flag = target_flag.view(target_flag.shape[0], 1)
53 |
54 | lan1_1 = lan1 * maskf1
55 | lan2_1 = lan2 * maskf1
56 | lan1_1_clone = lan1_1.detach()
57 | score = self.cos(lan1_1_clone, lan2_1)
58 | score = score * target_flag
59 | score1 = torch.sum(score, dim=-1)
60 | length = torch.sum(maskf1, dim=-1).squeeze(-1)
61 | mean_score = score1 / length
62 | # pdb.set_trace()
63 |
64 | loss_cossim = 1 - torch.mean(mean_score)
65 | return loss_cossim
--------------------------------------------------------------------------------
/refer/evaluation/tokenizer/ptbtokenizer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # File Name : ptbtokenizer.py
4 | #
5 | # Description : Do the PTB Tokenization and remove punctuations.
6 | #
7 | # Creation Date : 29-12-2014
8 | # Last Modified : Thu Mar 19 09:53:35 2015
9 | # Authors : Hao Fang and Tsung-Yi Lin
10 |
11 | import os
12 | import sys
13 | import subprocess
14 | import tempfile
15 | import itertools
16 |
17 | # path to the stanford corenlp jar
18 | STANFORD_CORENLP_3_4_1_JAR = 'stanford-corenlp-3.4.1.jar'
19 |
20 | # punctuations to be removed from the sentences
21 | PUNCTUATIONS = ["''", "'", "``", "`", "-LRB-", "-RRB-", "-LCB-", "-RCB-", \
22 | ".", "?", "!", ",", ":", "-", "--", "...", ";"]
23 |
24 | class PTBTokenizer:
25 | """Python wrapper of Stanford PTBTokenizer"""
26 |
27 | def tokenize(self, captions_for_image):
28 | cmd = ['java', '-cp', STANFORD_CORENLP_3_4_1_JAR, \
29 | 'edu.stanford.nlp.process.PTBTokenizer', \
30 | '-preserveLines', '-lowerCase']
31 |
32 | # ======================================================
33 | # prepare data for PTB Tokenizer
34 | # ======================================================
35 | final_tokenized_captions_for_image = {}
36 | image_id = [k for k, v in captions_for_image.items() for _ in range(len(v))]
37 | sentences = '\n'.join([c.replace('\n', ' ') for k, v in captions_for_image.items() for c in v])
38 |
39 | # ======================================================
40 | # save sentences to temporary file
41 | # ======================================================
42 | path_to_jar_dirname=os.path.dirname(os.path.abspath(__file__))
43 | tmp_file = tempfile.NamedTemporaryFile(delete=False, dir=path_to_jar_dirname)
44 | tmp_file.write(sentences)
45 | tmp_file.close()
46 |
47 | # ======================================================
48 | # tokenize sentence
49 | # ======================================================
50 | cmd.append(os.path.basename(tmp_file.name))
51 | p_tokenizer = subprocess.Popen(cmd, cwd=path_to_jar_dirname, \
52 | stdout=subprocess.PIPE)
53 | token_lines = p_tokenizer.communicate(input=sentences.rstrip())[0]
54 | lines = token_lines.split('\n')
55 | # remove temp file
56 | os.remove(tmp_file.name)
57 |
58 | # ======================================================
59 | # create dictionary for tokenized captions
60 | # ======================================================
61 | for k, line in zip(image_id, lines):
62 | if not k in final_tokenized_captions_for_image:
63 | final_tokenized_captions_for_image[k] = []
64 | tokenized_caption = ' '.join([w for w in line.rstrip().split(' ') \
65 | if w not in PUNCTUATIONS])
66 | final_tokenized_captions_for_image[k].append(tokenized_caption)
67 |
68 | return final_tokenized_captions_for_image
69 |
--------------------------------------------------------------------------------
/refer/evaluation/meteor/meteor.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # Python wrapper for METEOR implementation, by Xinlei Chen
4 | # Acknowledge Michael Denkowski for the generous discussion and help
5 |
6 | import os
7 | import sys
8 | import subprocess
9 | import threading
10 |
11 | # Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
12 | METEOR_JAR = 'meteor-1.5.jar'
13 | # print METEOR_JAR
14 |
15 | class Meteor:
16 |
17 | def __init__(self):
18 | self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, \
19 | '-', '-', '-stdio', '-l', 'en', '-norm']
20 | self.meteor_p = subprocess.Popen(self.meteor_cmd, \
21 | cwd=os.path.dirname(os.path.abspath(__file__)), \
22 | stdin=subprocess.PIPE, \
23 | stdout=subprocess.PIPE, \
24 | stderr=subprocess.PIPE)
25 | # Used to guarantee thread safety
26 | self.lock = threading.Lock()
27 |
28 | def compute_score(self, gts, res):
29 | assert(gts.keys() == res.keys())
30 | imgIds = gts.keys()
31 | scores = []
32 |
33 | eval_line = 'EVAL'
34 | self.lock.acquire()
35 | for i in imgIds:
36 | assert(len(res[i]) == 1)
37 | stat = self._stat(res[i][0], gts[i])
38 | eval_line += ' ||| {}'.format(stat)
39 |
40 | self.meteor_p.stdin.write('{}\n'.format(eval_line))
41 | for i in range(0,len(imgIds)):
42 | scores.append(float(self.meteor_p.stdout.readline().strip()))
43 | score = float(self.meteor_p.stdout.readline().strip())
44 | self.lock.release()
45 |
46 | return score, scores
47 |
48 | def method(self):
49 | return "METEOR"
50 |
51 | def _stat(self, hypothesis_str, reference_list):
52 | # SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
53 | hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
54 | score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
55 | self.meteor_p.stdin.write('{}\n'.format(score_line))
56 | return self.meteor_p.stdout.readline().strip()
57 |
58 | def _score(self, hypothesis_str, reference_list):
59 | self.lock.acquire()
60 | # SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
61 | hypothesis_str = hypothesis_str.replace('|||','').replace(' ',' ')
62 | score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
63 | self.meteor_p.stdin.write('{}\n'.format(score_line))
64 | stats = self.meteor_p.stdout.readline().strip()
65 | eval_line = 'EVAL ||| {}'.format(stats)
66 | # EVAL ||| stats
67 | self.meteor_p.stdin.write('{}\n'.format(eval_line))
68 | score = float(self.meteor_p.stdout.readline().strip())
69 | self.lock.release()
70 | return score
71 |
72 | def __exit__(self):
73 | self.lock.acquire()
74 | self.meteor_p.stdin.close()
75 | self.meteor_p.wait()
76 | self.lock.release()
77 |
--------------------------------------------------------------------------------
/lib/segmentation.py:
--------------------------------------------------------------------------------
1 | from .mask_predictor import Decoder
2 | from .backbone import MultiModalSwinTransformer
3 | from .backbone_resnet import MultiModalResNet
4 | from ._utils import DMMI
5 |
6 | __all__ = ['dmmi_swin', 'dmmi_res']
7 |
8 | # DMMI based on swin-transformer
9 | def _segm_dmmi_swin(pretrained, args):
10 | # initialize the SwinTransformer backbone with the specified version
11 | if args.swin_type == 'tiny':
12 | embed_dim = 96
13 | depths = [2, 2, 6, 2]
14 | num_heads = [3, 6, 12, 24]
15 | elif args.swin_type == 'small':
16 | embed_dim = 96
17 | depths = [2, 2, 18, 2]
18 | num_heads = [3, 6, 12, 24]
19 | elif args.swin_type == 'base':
20 | embed_dim = 128
21 | depths = [2, 2, 18, 2]
22 | num_heads = [4, 8, 16, 32]
23 | elif args.swin_type == 'large':
24 | embed_dim = 192
25 | depths = [2, 2, 18, 2]
26 | num_heads = [6, 12, 24, 48]
27 | else:
28 | assert False
29 | # args.window12 added for test.py because state_dict is loaded after model initialization
30 | if 'window12' in pretrained or args.window12:
31 | print('Window size 12!')
32 | window_size = 12
33 | else:
34 | window_size = 7
35 |
36 | if args.mha:
37 | mha = args.mha.split('-') # if non-empty, then ['a', 'b', 'c', 'd']
38 | mha = [int(a) for a in mha]
39 | else:
40 | mha = [1, 1, 1, 1]
41 |
42 | out_indices = (0, 1, 2, 3)
43 | backbone = MultiModalSwinTransformer(embed_dim=embed_dim, depths=depths, num_heads=num_heads,
44 | window_size=window_size,
45 | ape=False, drop_path_rate=0.3, patch_norm=True,
46 | out_indices=out_indices,
47 | use_checkpoint=False, num_heads_fusion=mha,
48 | fusion_drop=args.fusion_drop
49 | )
50 | if pretrained:
51 | print('Initializing Multi-modal Swin Transformer weights from ' + pretrained)
52 | backbone.init_weights(pretrained=pretrained)
53 | else:
54 | print('Randomly initialize Multi-modal Swin Transformer weights.')
55 | backbone.init_weights()
56 |
57 |
58 | model = DMMI(backbone, Decoder(8*embed_dim))
59 |
60 | return model
61 |
62 | def _load_model_dmmi_swin(pretrained, args):
63 | model = _segm_dmmi_swin(pretrained, args)
64 | return model
65 |
66 |
67 | def dmmi_swin(pretrained='', args=None):
68 | return _load_model_dmmi_swin(pretrained, args)
69 |
70 |
71 | #############################################
72 | # DMMI based on resnet
73 |
74 | def _segm_dmmi_res(pretrained, args):
75 | backbone = MultiModalResNet(pretrained)
76 |
77 | model = DMMI(backbone, Decoder(2048))
78 | return model
79 |
80 |
81 | def _load_model_dmmi_res(pretrained, args):
82 | model = _segm_dmmi_res(pretrained, args)
83 | return model
84 |
85 |
86 | def dmmi_res(pretrained='', args=None):
87 | return _load_model_dmmi_res(pretrained, args)
--------------------------------------------------------------------------------
/args.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 |
4 | def get_parser():
5 | parser = argparse.ArgumentParser(description='dmmi training and testing')
6 | parser.add_argument('--amsgrad', action='store_true',
7 | help='if true, set amsgrad to True in an Adam or AdamW optimizer.')
8 | parser.add_argument('-b', '--batch-size', default=8, type=int)
9 | parser.add_argument('--bert_tokenizer', default='bert-base-uncased', help='BERT tokenizer')
10 | parser.add_argument('--ck_bert', default='bert-base-uncased', help='pre-trained BERT weights')
11 | parser.add_argument('--dataset', default='refcoco', help='refcoco, refcoco+, or refcocog')
12 | parser.add_argument('--ddp_trained_weights', action='store_true',
13 | help='Only needs specified when testing,'
14 | 'whether the weights to be loaded are from a DDP-trained model')
15 | parser.add_argument('--device', default='cuda:0', help='device') # only used when testing on a single machine
16 | parser.add_argument('--epochs', default=40, type=int, metavar='N', help='number of total epochs to run')
17 | parser.add_argument('--fusion_drop', default=0.0, type=float, help='dropout rate for PWAMs')
18 | parser.add_argument('--img_size', default=480, type=int, help='input image size')
19 | parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
20 | parser.add_argument('--lr', default=0.00005, type=float, help='the initial learning rate')
21 | parser.add_argument('--mha', default='', help='If specified, should be in the format of a-b-c-d, e.g., 4-4-4-4,'
22 | 'where a, b, c, and d refer to the numbers of heads in stage-1,'
23 | 'stage-2, stage-3, and stage-4 PWAMs')
24 | parser.add_argument('--model', default='dmmi', help='model: dmmi')
25 | parser.add_argument('--model_id', default='dmmi', help='name to identify the model')
26 | parser.add_argument('--output_dir', default='./', help='path to save the paramters')
27 | parser.add_argument('--pin_mem', action='store_true',
28 | help='If true, pin memory when using the data loader.')
29 | parser.add_argument('--pretrained_backbone', default='',
30 | help='path to pre-trained Swin backbone weights')
31 | parser.add_argument('--print-freq', default=10, type=int, help='print frequency')
32 | parser.add_argument('--refer_data_root', default='./refer/data/', help='REFER dataset root directory')
33 | parser.add_argument('--resume', default='', help='resume from checkpoint')
34 | parser.add_argument('--test_parameter', default='', help='test from this parameter')
35 | parser.add_argument('--split', default='val', help='only used when testing')
36 | parser.add_argument('--splitBy', default='unc', help='change to umd or google when the dataset is G-Ref (RefCOCOg)')
37 | parser.add_argument('--swin_type', default='base',
38 | help='tiny, small, base, or large variants of the Swin Transformer')
39 | parser.add_argument('--wd', '--weight-decay', default=1e-2, type=float, metavar='W', help='weight decay',
40 | dest='weight_decay')
41 | parser.add_argument('--window12', action='store_true',
42 | help='only needs specified when testing,'
43 | 'when training, window size is inferred from pre-trained weights file name'
44 | '(containing \'window12\'). Initialize Swin with window size 12 instead of the default 7.')
45 | parser.add_argument('-j', '--workers', default=8, type=int, metavar='N', help='number of data loading workers')
46 |
47 | return parser
48 |
49 |
50 | if __name__ == "__main__":
51 | parser = get_parser()
52 | args_dict = parser.parse_args()
53 |
--------------------------------------------------------------------------------
/refer/evaluation/rouge/rouge.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # File Name : rouge.py
4 | #
5 | # Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
6 | #
7 | # Creation Date : 2015-01-07 06:03
8 | # Author : Ramakrishna Vedantam
9 |
10 | import numpy as np
11 | import pdb
12 |
13 | def my_lcs(string, sub):
14 | """
15 | Calculates longest common subsequence for a pair of tokenized strings
16 | :param string : list of str : tokens from a string split using whitespace
17 | :param sub : list of str : shorter string, also split using whitespace
18 | :returns: length (list of int): length of the longest common subsequence between the two strings
19 |
20 | Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
21 | """
22 | if(len(string)< len(sub)):
23 | sub, string = string, sub
24 |
25 | lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
26 |
27 | for j in range(1,len(sub)+1):
28 | for i in range(1,len(string)+1):
29 | if(string[i-1] == sub[j-1]):
30 | lengths[i][j] = lengths[i-1][j-1] + 1
31 | else:
32 | lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
33 |
34 | return lengths[len(string)][len(sub)]
35 |
36 | class Rouge():
37 | '''
38 | Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
39 |
40 | '''
41 | def __init__(self):
42 | # vrama91: updated the value below based on discussion with Hovey
43 | self.beta = 1.2
44 |
45 | def calc_score(self, candidate, refs):
46 | """
47 | Compute ROUGE-L score given one candidate and references for an image
48 | :param candidate: str : candidate sentence to be evaluated
49 | :param refs: list of str : COCO reference sentences for the particular image to be evaluated
50 | :returns score: int (ROUGE-L score for the candidate evaluated against references)
51 | """
52 | assert(len(candidate)==1)
53 | assert(len(refs)>0)
54 | prec = []
55 | rec = []
56 |
57 | # split into tokens
58 | token_c = candidate[0].split(" ")
59 |
60 | for reference in refs:
61 | # split into tokens
62 | token_r = reference.split(" ")
63 | # compute the longest common subsequence
64 | lcs = my_lcs(token_r, token_c)
65 | prec.append(lcs/float(len(token_c)))
66 | rec.append(lcs/float(len(token_r)))
67 |
68 | prec_max = max(prec)
69 | rec_max = max(rec)
70 |
71 | if(prec_max!=0 and rec_max !=0):
72 | score = ((1 + self.beta**2)*prec_max*rec_max)/float(rec_max + self.beta**2*prec_max)
73 | else:
74 | score = 0.0
75 | return score
76 |
77 | def compute_score(self, gts, res):
78 | """
79 | Computes Rouge-L score given a set of reference and candidate sentences for the dataset
80 | Invoked by evaluate_captions.py
81 | :param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
82 | :param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
83 | :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
84 | """
85 | assert(gts.keys() == res.keys())
86 | imgIds = gts.keys()
87 |
88 | score = []
89 | for id in imgIds:
90 | hypo = res[id]
91 | ref = gts[id]
92 |
93 | score.append(self.calc_score(hypo, ref))
94 |
95 | # Sanity check.
96 | assert(type(hypo) is list)
97 | assert(len(hypo) == 1)
98 | assert(type(ref) is list)
99 | assert(len(ref) > 0)
100 |
101 | average_score = np.mean(np.array(score))
102 | return average_score, np.array(score)
103 |
104 | def method(self):
105 | return "Rouge"
106 |
--------------------------------------------------------------------------------
/refer/external/mask.py:
--------------------------------------------------------------------------------
1 | __author__ = 'tsungyi'
2 |
3 | import external._mask as _mask
4 |
5 | # Interface for manipulating masks stored in RLE format.
6 | #
7 | # RLE is a simple yet efficient format for storing binary masks. RLE
8 | # first divides a vector (or vectorized image) into a series of piecewise
9 | # constant regions and then for each piece simply stores the length of
10 | # that piece. For example, given M=[0 0 1 1 1 0 1] the RLE counts would
11 | # be [2 3 1 1], or for M=[1 1 1 1 1 1 0] the counts would be [0 6 1]
12 | # (note that the odd counts are always the numbers of zeros). Instead of
13 | # storing the counts directly, additional compression is achieved with a
14 | # variable bitrate representation based on a common scheme called LEB128.
15 | #
16 | # Compression is greatest given large piecewise constant regions.
17 | # Specifically, the size of the RLE is proportional to the number of
18 | # *boundaries* in M (or for an image the number of boundaries in the y
19 | # direction). Assuming fairly simple shapes, the RLE representation is
20 | # O(sqrt(n)) where n is number of pixels in the object. Hence space usage
21 | # is substantially lower, especially for large simple objects (large n).
22 | #
23 | # Many common operations on masks can be computed directly using the RLE
24 | # (without need for decoding). This includes computations such as area,
25 | # union, intersection, etc. All of these operations are linear in the
26 | # size of the RLE, in other words they are O(sqrt(n)) where n is the area
27 | # of the object. Computing these operations on the original mask is O(n).
28 | # Thus, using the RLE can result in substantial computational savings.
29 | #
30 | # The following API functions are defined:
31 | # encode - Encode binary masks using RLE.
32 | # decode - Decode binary masks encoded via RLE.
33 | # merge - Compute union or intersection of encoded masks.
34 | # iou - Compute intersection over union between masks.
35 | # area - Compute area of encoded masks.
36 | # toBbox - Get bounding boxes surrounding encoded masks.
37 | # frPyObjects - Convert polygon, bbox, and uncompressed RLE to encoded RLE mask.
38 | #
39 | # Usage:
40 | # Rs = encode( masks )
41 | # masks = decode( Rs )
42 | # R = merge( Rs, intersect=false )
43 | # o = iou( dt, gt, iscrowd )
44 | # a = area( Rs )
45 | # bbs = toBbox( Rs )
46 | # Rs = frPyObjects( [pyObjects], h, w )
47 | #
48 | # In the API the following formats are used:
49 | # Rs - [dict] Run-length encoding of binary masks
50 | # R - dict Run-length encoding of binary mask
51 | # masks - [hxwxn] Binary mask(s) (must have type np.ndarray(dtype=uint8) in column-major order)
52 | # iscrowd - [nx1] list of np.ndarray. 1 indicates corresponding gt image has crowd region to ignore
53 | # bbs - [nx4] Bounding box(es) stored as [x y w h]
54 | # poly - Polygon stored as [[x1 y1 x2 y2...],[x1 y1 ...],...] (2D list)
55 | # dt,gt - May be either bounding boxes or encoded masks
56 | # Both poly and bbs are 0-indexed (bbox=[0 0 1 1] encloses first pixel).
57 | #
58 | # Finally, a note about the intersection over union (iou) computation.
59 | # The standard iou of a ground truth (gt) and detected (dt) object is
60 | # iou(gt,dt) = area(intersect(gt,dt)) / area(union(gt,dt))
61 | # For "crowd" regions, we use a modified criteria. If a gt object is
62 | # marked as "iscrowd", we allow a dt to match any subregion of the gt.
63 | # Choosing gt' in the crowd gt that best matches the dt can be done using
64 | # gt'=intersect(dt,gt). Since by definition union(gt',dt)=dt, computing
65 | # iou(gt,dt,iscrowd) = iou(gt',dt) = area(intersect(gt,dt)) / area(dt)
66 | # For crowd gt regions we use this modified criteria above for the iou.
67 | #
68 | # To compile run "python setup.py build_ext --inplace"
69 | # Please do not contact us for help with compiling.
70 | #
71 | # Microsoft COCO Toolbox. version 2.0
72 | # Data, paper, and tutorials available at: http://mscoco.org/
73 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
74 | # Licensed under the Simplified BSD License [see coco/license.txt]
75 |
76 | encode = _mask.encode
77 | decode = _mask.decode
78 | iou = _mask.iou
79 | merge = _mask.merge
80 | area = _mask.area
81 | toBbox = _mask.toBbox
82 | frPyObjects = _mask.frPyObjects
--------------------------------------------------------------------------------
/bert/sample_text.txt:
--------------------------------------------------------------------------------
1 | This text is included to make sure Unicode is handled properly: 力加勝北区ᴵᴺᵀᵃছজটডণত
2 | Text should be one-sentence-per-line, with empty lines between documents.
3 | This sample text is public domain and was randomly selected from Project Guttenberg.
4 |
5 | The rain had only ceased with the gray streaks of morning at Blazing Star, and the settlement awoke to a moral sense of cleanliness, and the finding of forgotten knives, tin cups, and smaller camp utensils, where the heavy showers had washed away the debris and dust heaps before the cabin doors.
6 | Indeed, it was recorded in Blazing Star that a fortunate early riser had once picked up on the highway a solid chunk of gold quartz which the rain had freed from its incumbering soil, and washed into immediate and glittering popularity.
7 | Possibly this may have been the reason why early risers in that locality, during the rainy season, adopted a thoughtful habit of body, and seldom lifted their eyes to the rifted or india-ink washed skies above them.
8 | "Cass" Beard had risen early that morning, but not with a view to discovery.
9 | A leak in his cabin roof,--quite consistent with his careless, improvident habits,--had roused him at 4 A. M., with a flooded "bunk" and wet blankets.
10 | The chips from his wood pile refused to kindle a fire to dry his bed-clothes, and he had recourse to a more provident neighbor's to supply the deficiency.
11 | This was nearly opposite.
12 | Mr. Cassius crossed the highway, and stopped suddenly.
13 | Something glittered in the nearest red pool before him.
14 | Gold, surely!
15 | But, wonderful to relate, not an irregular, shapeless fragment of crude ore, fresh from Nature's crucible, but a bit of jeweler's handicraft in the form of a plain gold ring.
16 | Looking at it more attentively, he saw that it bore the inscription, "May to Cass."
17 | Like most of his fellow gold-seekers, Cass was superstitious.
18 |
19 | The fountain of classic wisdom, Hypatia herself.
20 | As the ancient sage--the name is unimportant to a monk--pumped water nightly that he might study by day, so I, the guardian of cloaks and parasols, at the sacred doors of her lecture-room, imbibe celestial knowledge.
21 | From my youth I felt in me a soul above the matter-entangled herd.
22 | She revealed to me the glorious fact, that I am a spark of Divinity itself.
23 | A fallen star, I am, sir!' continued he, pensively, stroking his lean stomach--'a fallen star!--fallen, if the dignity of philosophy will allow of the simile, among the hogs of the lower world--indeed, even into the hog-bucket itself. Well, after all, I will show you the way to the Archbishop's.
24 | There is a philosophic pleasure in opening one's treasures to the modest young.
25 | Perhaps you will assist me by carrying this basket of fruit?' And the little man jumped up, put his basket on Philammon's head, and trotted off up a neighbouring street.
26 | Philammon followed, half contemptuous, half wondering at what this philosophy might be, which could feed the self-conceit of anything so abject as his ragged little apish guide;
27 | but the novel roar and whirl of the street, the perpetual stream of busy faces, the line of curricles, palanquins, laden asses, camels, elephants, which met and passed him, and squeezed him up steps and into doorways, as they threaded their way through the great Moon-gate into the ample street beyond, drove everything from his mind but wondering curiosity, and a vague, helpless dread of that great living wilderness, more terrible than any dead wilderness of sand which he had left behind.
28 | Already he longed for the repose, the silence of the Laura--for faces which knew him and smiled upon him; but it was too late to turn back now.
29 | His guide held on for more than a mile up the great main street, crossed in the centre of the city, at right angles, by one equally magnificent, at each end of which, miles away, appeared, dim and distant over the heads of the living stream of passengers, the yellow sand-hills of the desert;
30 | while at the end of the vista in front of them gleamed the blue harbour, through a network of countless masts.
31 | At last they reached the quay at the opposite end of the street;
32 | and there burst on Philammon's astonished eyes a vast semicircle of blue sea, ringed with palaces and towers.
33 | He stopped involuntarily; and his little guide stopped also, and looked askance at the young monk, to watch the effect which that grand panorama should produce on him.
34 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # RIS-DMMI
2 | This repository provides the PyTorch implementation of DMMI in the following papers:
3 | __Beyond One-to-One: Rethinking the Referring Image Segmentation (ICCV2023)__
4 |
5 | # News
6 | * 2023.10.03-The final version of our dataset has been released. Please remember to download the latest version.
7 | * 2023.10.03-We release our code.
8 |
9 | # Dataset
10 | We collect a new comprehensive dataset Ref-ZOM (**Z**ero/**O**ne/**M**any), which contains image-text pairs in one-to-zero, one-to-one and one-to-many conditions. Similar to RefCOCO, RefCOCO+ and G-Ref, all the images in Ref-ZOM are selected from COCO dataset. Here, we provide the text, image and annotation information of Ref-ZOM, which should be utilized with COCO_trainval2014 together.
11 | Our dataset could be downloaded from:
12 | [[Baidu Cloud](https://pan.baidu.com/s/1CxPYGWEadHhcViTH2iI7jw?pwd=g7uu)] [[Google Drive](https://drive.google.com/drive/folders/1FaH6U5pywSf0Ufnn4lYIVaykYxqU2vrA?usp=sharing)]
13 | Remember to download original COCO dataset from:
14 | [[COCO Dowanload](https://cocodataset.org/#download)]
15 |
16 | # Code
17 |
18 | **Prepare**
19 | * Download the COCO_train2014 and COCO_val2014, and merge the two dataset as a new folder “trainval2014”. Then, in the Line-52 in `/refer/refer.py`, give the path of this folder to `self.Image_DIR`
20 | * Download and rename the "Ref-ZOM(final).p" as "refs(final).p". Then put refs(final).p and instances.json into `/refer/data/ref-zom/*`.
21 | * Prepare the Bert similar to [LAVT](https://github.com/yz93/LAVT-RIS)
22 | * Prepare the Refcoco, Refcoco+ and Refcocog similar to [LAVT](https://github.com/yz93/LAVT-RIS)
23 |
24 | **Train**
25 | * Remember to change `--output_dir` and `--pretrained_backbone` as your path.
26 | * Utilize `--model` to select the backbone. 'dmmi-swin' for Swin-Base and 'dmmi_res' for resnet-50.
27 | * Utilize `--dataset`, `--splitBy` and `--split` to select the dataset as follwos:
28 | ```
29 | # Refcoco
30 | --dataset refcoco, --splitBy unc, --split val
31 | # Refcoco+
32 | --dataset refcoco+, --splitBy unc, --split val
33 | # Refcocog(umd)
34 | --dataset refcocog, --splitBy umd, --split val
35 | # Refcocog(google)
36 | --dataset refcocog, --splitBy google, --split val
37 | # Ref-zom
38 | --dataset ref-zom, --splitBy final, --split test
39 | ```
40 | * Begin training!!
41 | ```
42 | sh train.sh
43 | ```
44 |
45 | **Test**
46 | * Remember to change `--test_parameter` as your path. Meanwhile, set the `--model`, `--dataset`, `--splitBy` and `--split` properly.
47 | * Begin test!!
48 | ```
49 | sh test.sh
50 | ```
51 |
52 | # Parameter
53 | **Refcocog(umd)**
54 | | Backbone | oIoU | mIoU | Google Drive |Baidu Cloud |
55 | | ------------- | ------------- | ------------- | ------------- | ------------- |
56 | | ResNet-101 | 59.02 | 62.59 | [Link](https://drive.google.com/file/d/1ziDIeioglD08QQyL-_yGFFlao3PtcJJS/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1uKJ-Wu5TtJhphXNOXo3mIA?pwd=6cgb) |
57 | | Swin-Base | 63.46 | 66.48 | [Link](https://drive.google.com/file/d/1uuGWSYLGYa_qMxTlnZxH6p9FMxQLOQfZ/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1eAT0NgkID4qXpoXMf2bjEg?pwd=bq7w) |
58 |
59 | **Ref-ZOM**
60 | | Backbone | oIoU | mIoU | Google Drive |Baidu Cloud |
61 | | ------------- | ------------- | ------------- | ------------- | ------------- |
62 | | Swin-Base | 68.77 | 68.25 | [Link](https://drive.google.com/file/d/1Ut_E-Fru0bCmjtaC2YhgOLZ7eJorOOpi/view?usp=drive_link) | [Link](https://pan.baidu.com/s/1T-u55rpbc4_CNEXmsA-OJg?pwd=hc6e) |
63 |
64 | # Acknowledgements
65 |
66 | We strongly appreciate the wonderful work of [LAVT](https://github.com/yz93/LAVT-RIS). Our code is partially founded on this code-base. If you think our work is helpful, we suggest you refer to [LAVT](https://github.com/yz93/LAVT-RIS) and cite it as well.
67 |
68 | # Citation
69 | If you find our work is helpful and want to cite our work, please use the following citation info.
70 | ```
71 | @InProceedings{Hu_2023_ICCV,
72 | author = {Hu, Yutao and Wang, Qixiong and Shao, Wenqi and Xie, Enze and Li, Zhenguo and Han, Jungong and Luo, Ping},
73 | title = {Beyond One-to-One: Rethinking the Referring Image Segmentation},
74 | booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
75 | month = {October},
76 | year = {2023},
77 | pages = {4067-4077}
78 | }
79 |
80 |
--------------------------------------------------------------------------------
/refer/evaluation/refEvaluation.py:
--------------------------------------------------------------------------------
1 | from tokenizer.ptbtokenizer import PTBTokenizer
2 | from bleu.bleu import Bleu
3 | from meteor.meteor import Meteor
4 | from rouge.rouge import Rouge
5 | from cider.cider import Cider
6 |
7 | """
8 | Input: refer and Res = [{ref_id, sent}]
9 |
10 | Things of interest
11 | evalRefs - list of ['ref_id', 'CIDEr', 'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'ROUGE_L', 'METEOR']
12 | eval - dict of {metric: score}
13 | refToEval - dict of {ref_id: ['ref_id', 'CIDEr', 'Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4', 'ROUGE_L', 'METEOR']}
14 | """
15 |
16 | class RefEvaluation:
17 | def __init__ (self, refer, Res):
18 | """
19 | :param refer: refer class of current dataset
20 | :param Res: [{'ref_id', 'sent'}]
21 | """
22 | self.evalRefs = []
23 | self.eval = {}
24 | self.refToEval = {}
25 | self.refer = refer
26 | self.Res = Res
27 |
28 | def evaluate(self):
29 |
30 | evalRefIds = [ann['ref_id'] for ann in self.Res]
31 |
32 | refToGts = {}
33 | for ref_id in evalRefIds:
34 | ref = self.refer.Refs[ref_id]
35 | gt_sents = [sent['sent'].encode('ascii', 'ignore').decode('ascii') for sent in ref['sentences']] # up to 3 expressions
36 | refToGts[ref_id] = gt_sents
37 | refToRes = {ann['ref_id']: [ann['sent']] for ann in self.Res}
38 |
39 | print 'tokenization...'
40 | tokenizer = PTBTokenizer()
41 | self.refToRes = tokenizer.tokenize(refToRes)
42 | self.refToGts = tokenizer.tokenize(refToGts)
43 |
44 | # =================================================
45 | # Set up scorers
46 | # =================================================
47 | print 'setting up scorers...'
48 | scorers = [
49 | (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
50 | (Meteor(),"METEOR"),
51 | (Rouge(), "ROUGE_L"),
52 | (Cider(), "CIDEr")
53 | ]
54 |
55 | # =================================================
56 | # Compute scores
57 | # =================================================
58 | for scorer, method in scorers:
59 | print 'computing %s score...'%(scorer.method())
60 | score, scores = scorer.compute_score(self.refToGts, self.refToRes)
61 | if type(method) == list:
62 | for sc, scs, m in zip(score, scores, method):
63 | self.setEval(sc, m)
64 | self.setRefToEvalRefs(scs, self.refToGts.keys(), m)
65 | print "%s: %0.3f"%(m, sc)
66 | else:
67 | self.setEval(score, method)
68 | self.setRefToEvalRefs(scores, self.refToGts.keys(), method)
69 | print "%s: %0.3f"%(method, score)
70 | self.setEvalRefs()
71 |
72 | def setEval(self, score, method):
73 | self.eval[method] = score
74 |
75 | def setRefToEvalRefs(self, scores, refIds, method):
76 | for refId, score in zip(refIds, scores):
77 | if not refId in self.refToEval:
78 | self.refToEval[refId] = {}
79 | self.refToEval[refId]["ref_id"] = refId
80 | self.refToEval[refId][method] = score
81 |
82 | def setEvalRefs(self):
83 | self.evalRefs = [eval for refId, eval in self.refToEval.items()]
84 |
85 |
86 | if __name__ == '__main__':
87 |
88 | import os.path as osp
89 | import sys
90 | ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
91 | sys.path.insert(0, osp.join(ROOT_DIR, 'lib', 'datasets'))
92 | from refer import REFER
93 |
94 | # load refer of dataset
95 | dataset = 'refcoco'
96 | refer = REFER(dataset, splitBy = 'google')
97 |
98 | # mimic some Res
99 | val_refIds = refer.getRefIds(split='test')
100 | ref_id = 49767
101 | print "GD: %s" % refer.Refs[ref_id]['sentences']
102 | Res = [{'ref_id': ref_id, 'sent': 'left bottle'}]
103 |
104 | # evaluate some refer expressions
105 | refEval = RefEvaluation(refer, Res)
106 | refEval.evaluate()
107 |
108 | # print output evaluation scores
109 | for metric, score in refEval.eval.items():
110 | print '%s: %.3f'%(metric, score)
111 |
112 | # demo how to use evalImgs to retrieve low score result
113 | # evals = [eva for eva in refEval.evalRefs if eva['CIDEr']<30]
114 | # print 'ground truth sents'
115 | # refId = evals[0]['ref_id']
116 | # print 'refId: %s' % refId
117 | # print [sent['sent'] for sent in refer.Refs[refId]['sentences']]
118 | #
119 | # print 'generated sent (CIDEr score %0.1f)' % (evals[0]['CIDEr'])
120 |
121 | # print refEval.refToEval[8]
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
--------------------------------------------------------------------------------
/transforms.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from PIL import Image
3 | import random
4 |
5 | import torch
6 | from torchvision import transforms as T
7 | from torchvision.transforms import functional as F
8 |
9 | class transform(object):
10 | def __init__(self, args):
11 | self.Resize = Resize(args.img_size, args.img_size)
12 | self.ToTensor = ToTensor()
13 | self.Norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
14 |
15 |
16 | def __call__(self, image, target):
17 | image, target = self.Resize(image, target)
18 | image, target = self.ToTensor(image, target)
19 | image, target = self.Norm(image, target)
20 |
21 | return image, target
22 |
23 |
24 |
25 | def pad_if_smaller(img, size, fill=0):
26 | min_size = min(img.size)
27 | if min_size < size:
28 | ow, oh = img.size
29 | padh = size - oh if oh < size else 0
30 | padw = size - ow if ow < size else 0
31 | img = F.pad(img, (0, 0, padw, padh), fill=fill)
32 | return img
33 |
34 |
35 | class Compose(object):
36 | def __init__(self, transforms):
37 | self.transforms = transforms
38 |
39 | def __call__(self, image, target):
40 | for t in self.transforms:
41 | image, target = t(image, target)
42 | return image, target
43 |
44 |
45 | class Resize(object):
46 | def __init__(self, h, w):
47 | self.h = h
48 | self.w = w
49 |
50 | def __call__(self, image, target):
51 | image = F.resize(image, (self.h, self.w))
52 | # If size is a sequence like (h, w), the output size will be matched to this.
53 | # If size is an int, the smaller edge of the image will be matched to this number maintaining the aspect ratio
54 | target = F.resize(target, (self.h, self.w), interpolation=Image.NEAREST)
55 | return image, target
56 |
57 |
58 | class RandomResize(object):
59 | def __init__(self, min_size, max_size=None):
60 | self.min_size = min_size
61 | if max_size is None:
62 | max_size = min_size
63 | self.max_size = max_size
64 |
65 | def __call__(self, image, target):
66 | size = random.randint(self.min_size, self.max_size) # Return a random integer N such that a <= N <= b. Alias for randrange(a, b+1)
67 | image = F.resize(image, size)
68 | # If size is a sequence like (h, w), the output size will be matched to this.
69 | # If size is an int, the smaller edge of the image will be matched to this number maintaining the aspect ratio
70 | target = F.resize(target, size, interpolation=Image.NEAREST)
71 | return image, target
72 |
73 |
74 | class RandomHorizontalFlip(object):
75 | def __init__(self, flip_prob):
76 | self.flip_prob = flip_prob
77 |
78 | def __call__(self, image, target):
79 | if random.random() < self.flip_prob:
80 | image = F.hflip(image)
81 | target = F.hflip(target)
82 | return image, target
83 |
84 |
85 | class RandomCrop(object):
86 | def __init__(self, size):
87 | self.size = size
88 |
89 | def __call__(self, image, target):
90 | image = pad_if_smaller(image, self.size)
91 | target = pad_if_smaller(target, self.size, fill=0)
92 | crop_params = T.RandomCrop.get_params(image, (self.size, self.size))
93 | image = F.crop(image, *crop_params)
94 | target = F.crop(target, *crop_params)
95 | # print(image.size, target.size)
96 | return image, target
97 |
98 |
99 | class CenterCrop(object):
100 | def __init__(self, size):
101 | self.size = size
102 |
103 | def __call__(self, image, target):
104 | image = F.center_crop(image, self.size)
105 | target = F.center_crop(target, self.size)
106 | return image, target
107 |
108 |
109 | class ToTensor(object):
110 | def __call__(self, image, target):
111 | image = F.to_tensor(image)
112 | target = torch.as_tensor(np.asarray(target).copy(), dtype=torch.int64)
113 | return image, target
114 |
115 |
116 | class RandomAffine(object):
117 | def __init__(self, angle, translate, scale, shear, resample=0, fillcolor=None):
118 | self.angle = angle
119 | self.translate = translate
120 | self.scale = scale
121 | self.shear = shear
122 | self.resample = resample
123 | self.fillcolor = fillcolor
124 |
125 | def __call__(self, image, target):
126 | affine_params = T.RandomAffine.get_params(self.angle, self.translate, self.scale, self.shear, image.size)
127 | image = F.affine(image, *affine_params)
128 | target = F.affine(target, *affine_params)
129 | return image, target
130 |
131 |
132 | class Normalize(object):
133 | def __init__(self, mean, std):
134 | self.mean = mean
135 | self.std = std
136 |
137 | def __call__(self, image, target):
138 | image = F.normalize(image, mean=self.mean, std=self.std)
139 | return image, target
140 |
141 |
--------------------------------------------------------------------------------
/bert/tokenization_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import os
20 | import tempfile
21 | import tokenization
22 | import six
23 | import tensorflow as tf
24 |
25 |
26 | class TokenizationTest(tf.test.TestCase):
27 |
28 | def test_full_tokenizer(self):
29 | vocab_tokens = [
30 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
31 | "##ing", ","
32 | ]
33 | with tempfile.NamedTemporaryFile(delete=False) as vocab_writer:
34 | if six.PY2:
35 | vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
36 | else:
37 | vocab_writer.write("".join(
38 | [x + "\n" for x in vocab_tokens]).encode("utf-8"))
39 |
40 | vocab_file = vocab_writer.name
41 |
42 | tokenizer = tokenization.FullTokenizer(vocab_file)
43 | os.unlink(vocab_file)
44 |
45 | tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
46 | self.assertAllEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
47 |
48 | self.assertAllEqual(
49 | tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
50 |
51 | def test_chinese(self):
52 | tokenizer = tokenization.BasicTokenizer()
53 |
54 | self.assertAllEqual(
55 | tokenizer.tokenize(u"ah\u535A\u63A8zz"),
56 | [u"ah", u"\u535A", u"\u63A8", u"zz"])
57 |
58 | def test_basic_tokenizer_lower(self):
59 | tokenizer = tokenization.BasicTokenizer(do_lower_case=True)
60 |
61 | self.assertAllEqual(
62 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
63 | ["hello", "!", "how", "are", "you", "?"])
64 | self.assertAllEqual(tokenizer.tokenize(u"H\u00E9llo"), ["hello"])
65 |
66 | def test_basic_tokenizer_no_lower(self):
67 | tokenizer = tokenization.BasicTokenizer(do_lower_case=False)
68 |
69 | self.assertAllEqual(
70 | tokenizer.tokenize(u" \tHeLLo!how \n Are yoU? "),
71 | ["HeLLo", "!", "how", "Are", "yoU", "?"])
72 |
73 | def test_wordpiece_tokenizer(self):
74 | vocab_tokens = [
75 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
76 | "##ing"
77 | ]
78 |
79 | vocab = {}
80 | for (i, token) in enumerate(vocab_tokens):
81 | vocab[token] = i
82 | tokenizer = tokenization.WordpieceTokenizer(vocab=vocab)
83 |
84 | self.assertAllEqual(tokenizer.tokenize(""), [])
85 |
86 | self.assertAllEqual(
87 | tokenizer.tokenize("unwanted running"),
88 | ["un", "##want", "##ed", "runn", "##ing"])
89 |
90 | self.assertAllEqual(
91 | tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
92 |
93 | def test_convert_tokens_to_ids(self):
94 | vocab_tokens = [
95 | "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
96 | "##ing"
97 | ]
98 |
99 | vocab = {}
100 | for (i, token) in enumerate(vocab_tokens):
101 | vocab[token] = i
102 |
103 | self.assertAllEqual(
104 | tokenization.convert_tokens_to_ids(
105 | vocab, ["un", "##want", "##ed", "runn", "##ing"]), [7, 4, 5, 8, 9])
106 |
107 | def test_is_whitespace(self):
108 | self.assertTrue(tokenization._is_whitespace(u" "))
109 | self.assertTrue(tokenization._is_whitespace(u"\t"))
110 | self.assertTrue(tokenization._is_whitespace(u"\r"))
111 | self.assertTrue(tokenization._is_whitespace(u"\n"))
112 | self.assertTrue(tokenization._is_whitespace(u"\u00A0"))
113 |
114 | self.assertFalse(tokenization._is_whitespace(u"A"))
115 | self.assertFalse(tokenization._is_whitespace(u"-"))
116 |
117 | def test_is_control(self):
118 | self.assertTrue(tokenization._is_control(u"\u0005"))
119 |
120 | self.assertFalse(tokenization._is_control(u"A"))
121 | self.assertFalse(tokenization._is_control(u" "))
122 | self.assertFalse(tokenization._is_control(u"\t"))
123 | self.assertFalse(tokenization._is_control(u"\r"))
124 | self.assertFalse(tokenization._is_control(u"\U0001F4A9"))
125 |
126 | def test_is_punctuation(self):
127 | self.assertTrue(tokenization._is_punctuation(u"-"))
128 | self.assertTrue(tokenization._is_punctuation(u"$"))
129 | self.assertTrue(tokenization._is_punctuation(u"`"))
130 | self.assertTrue(tokenization._is_punctuation(u"."))
131 |
132 | self.assertFalse(tokenization._is_punctuation(u"A"))
133 | self.assertFalse(tokenization._is_punctuation(u" "))
134 |
135 |
136 | if __name__ == "__main__":
137 | tf.test.main()
138 |
--------------------------------------------------------------------------------
/test.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import torch.utils.data
3 |
4 | from bert.modeling_bert import BertModel
5 | from data.dataset_zom import Referzom_Dataset
6 | from data.dataset import ReferDataset
7 |
8 | from lib import segmentation
9 | import transforms as T
10 | import utils
11 |
12 | import numpy as np
13 | import torch.nn.functional as F
14 | import pdb
15 |
16 |
17 | def get_dataset(image_set, transform, args, eval_mode):
18 | if args.dataset == 'ref-zom':
19 | ds = Referzom_Dataset(args,
20 | split=image_set,
21 | image_transforms=transform,
22 | target_transforms=None,
23 | eval_mode=eval_mode
24 | )
25 | else:
26 | ds = ReferDataset(args,
27 | split=image_set,
28 | image_transforms=transform,
29 | target_transforms=None,
30 | eval_mode=eval_mode
31 | )
32 | num_classes = 2
33 |
34 | return ds, num_classes
35 |
36 |
37 | def evaluate(model, data_loader, bert_model, device):
38 | model.eval()
39 | metric_logger = utils.MetricLogger(delimiter=" ")
40 |
41 | # evaluation variables
42 | cum_I, cum_U = 0, 0
43 | eval_seg_iou_list = [.5, .6, .7, .8, .9]
44 | seg_correct = np.zeros(len(eval_seg_iou_list), dtype=np.int32)
45 | seg_total = 0
46 | mean_IoU = []
47 | mean_acc = []
48 |
49 | header = 'Test:'
50 |
51 | with torch.no_grad():
52 | for data in metric_logger.log_every(data_loader, 100, header):
53 | image, target, source_type, sentences, sentences_masked, attentions = data
54 |
55 | image, target, sentences, attentions = image.to(device), target.to(device), \
56 | sentences.to(device), attentions.to(device)
57 | sentences = sentences.squeeze(1)
58 | attentions = attentions.squeeze(1)
59 | target = target.cpu().data.numpy()
60 | for j in range(sentences.size(-1)):
61 |
62 | last_hidden_states = bert_model(sentences[:, :, j], attention_mask=attentions[:, :, j])[0]
63 | embedding = last_hidden_states.permute(0, 2, 1)
64 | output = model(image, embedding, embedding, l_mask=attentions[:, :, j].unsqueeze(-1))[2]
65 |
66 | output = output.cpu()
67 | output_mask = output.argmax(1).data.numpy()
68 |
69 | if source_type[0] == 'zero':
70 | incorrect_num = np.sum(output_mask)
71 | if incorrect_num == 0:
72 | acc = 1
73 | else:
74 | acc = 0
75 | mean_acc.append(acc)
76 | else:
77 | I, U = computeIoU(output_mask, target)
78 |
79 | if U == 0:
80 | this_iou = 0.0
81 | else:
82 | this_iou = I*1.0/U
83 | mean_IoU.append(this_iou)
84 | cum_I += I
85 | cum_U += U
86 |
87 | for n_eval_iou in range(len(eval_seg_iou_list)):
88 | eval_seg_iou = eval_seg_iou_list[n_eval_iou]
89 | seg_correct[n_eval_iou] += (this_iou >= eval_seg_iou)
90 |
91 | seg_total += 1
92 |
93 | del image, target, sentences, attentions, output, output_mask
94 | if bert_model is not None:
95 | del last_hidden_states, embedding
96 |
97 | mean_IoU = np.array(mean_IoU)
98 | mIoU = np.mean(mean_IoU)
99 |
100 |
101 | mean_acc = np.array(mean_acc)
102 | mean_acc = np.mean(mean_acc)
103 |
104 | print('Final results:')
105 |
106 | results_str = ''
107 | for n_eval_iou in range(len(eval_seg_iou_list)):
108 | results_str += ' precision@%s = %.2f\n' % \
109 | (str(eval_seg_iou_list[n_eval_iou]), seg_correct[n_eval_iou] * 100. / seg_total)
110 | results_str += ' overall IoU = %.2f\n' % (cum_I * 100. / cum_U)
111 | results_str += ' mean IoU = %.2f\n' % (mIoU * 100.)
112 | print(results_str)
113 | if args.dataset == 'ref-zom':
114 | print('Mean accuracy for one-to-zero sample is %.2f\n' % (mean_acc*100))
115 |
116 | def get_transform(args):
117 | transforms = [T.Resize(args.img_size, args.img_size),
118 | T.ToTensor(),
119 | T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
120 | ]
121 |
122 | return T.Compose(transforms)
123 |
124 |
125 | def computeIoU(pred_seg, gd_seg):
126 | I = np.sum(np.logical_and(pred_seg, gd_seg))
127 | U = np.sum(np.logical_or(pred_seg, gd_seg))
128 |
129 | return I, U
130 |
131 |
132 | def main(args):
133 | device = torch.device(args.device)
134 | dataset_test, _ = get_dataset(args.split, get_transform(args=args), args, eval_mode=True)
135 | test_sampler = torch.utils.data.SequentialSampler(dataset_test)
136 | data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1,
137 | sampler=test_sampler, num_workers=args.workers)
138 | print(args.model)
139 | single_model = segmentation.__dict__[args.model](pretrained='',args=args)
140 | checkpoint = torch.load(args.test_parameter, map_location='cpu')
141 | single_model.load_state_dict(checkpoint['model'])
142 | model = single_model.to(device)
143 |
144 | model_class = BertModel
145 | single_bert_model = model_class.from_pretrained(args.ck_bert)
146 | # work-around for a transformers bug; need to update to a newer version of transformers to remove these two lines
147 | if args.ddp_trained_weights:
148 | single_bert_model.pooler = None
149 | single_bert_model.load_state_dict(checkpoint['bert_model'])
150 | bert_model = single_bert_model.to(device)
151 |
152 |
153 | evaluate(model, data_loader_test, bert_model, device=device)
154 |
155 |
156 | if __name__ == "__main__":
157 | from args import get_parser
158 | parser = get_parser()
159 | args = parser.parse_args()
160 | print('Image size: {}'.format(str(args.img_size)))
161 | main(args)
--------------------------------------------------------------------------------
/lib/mask_predictor.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import nn
3 | from torch.nn import functional as F
4 | import pdb
5 |
6 | class CrossLayerFuse(nn.Module):
7 | def __init__(self, in_dims1, in_dims2, out_dims):
8 | super(CrossLayerFuse, self).__init__()
9 |
10 | self.linear = nn.Linear(in_dims1 + in_dims2, out_dims)
11 | self.adpool = nn.AdaptiveAvgPool2d((1, 1))
12 |
13 | def forward(self, defea, x):
14 | x_pre = defea
15 | x = self.adpool(x).view(x.shape[0], x.shape[1])
16 | x1 = torch.cat([x_pre, x], dim=1)
17 | x1 = self.linear(x1)
18 |
19 | return x1
20 |
21 | class Transformer_Fusion(nn.Module):
22 | def __init__(self, dim=768, nhead=8, num_layers=1):
23 | super(Transformer_Fusion, self).__init__()
24 |
25 | self.decoder_layer = nn.TransformerDecoderLayer(d_model=dim, nhead=nhead)
26 | self.transformer_model = nn.TransformerDecoder(self.decoder_layer, num_layers=num_layers)
27 |
28 | def forward(self, vis, lan_full):
29 | WW, HH = vis.shape[2], vis.shape[3]
30 | vis = vis.view(vis.shape[0], vis.shape[1], -1)
31 | vis = vis.permute(2, 0, 1)
32 | lan = lan_full.permute(2, 0, 1)
33 | vis = self.transformer_model(vis, lan)
34 | vis = vis.permute(1, 2, 0)
35 | vis = vis.view(vis.shape[0], vis.shape[1], WW, HH)
36 |
37 | return vis
38 |
39 |
40 | class Language_Transformer(nn.Module):
41 | def __init__(self, hidden_size, lan_size):
42 | super(Language_Transformer, self).__init__()
43 |
44 | self.decoder_layer = nn.TransformerDecoderLayer(d_model=768, nhead=8)
45 | self.transformer_model = nn.TransformerDecoder(self.decoder_layer, num_layers=1)
46 | self.conv1 = nn.Conv2d(hidden_size, lan_size, 3, padding=1, bias=False)
47 | self.bn1 = nn.BatchNorm2d(lan_size)
48 | self.relu1 = nn.ReLU()
49 |
50 | def forward(self, vis, lan):
51 |
52 | vis = self.conv1(vis)
53 | vis = self.bn1(vis)
54 | vis = self.relu1(vis)
55 | vis = vis.view(vis.shape[0], vis.shape[1], -1)
56 | vis = vis.permute(2, 0, 1)
57 | lan = lan.permute(2, 0, 1)
58 | out = self.transformer_model(lan, vis)
59 | out = out.permute(1, 2, 0)
60 |
61 | return out
62 |
63 |
64 | class Decoder(nn.Module):
65 | def __init__(self, c4_dims, factor=2):
66 | super(Decoder, self).__init__()
67 |
68 | lan_size = 768
69 | hidden_size = lan_size
70 | c4_size = c4_dims
71 | c3_size = c4_dims//(factor**1)
72 | c2_size = c4_dims//(factor**2)
73 | c1_size = c4_dims//(factor**3)
74 |
75 | self.adpool = nn.AdaptiveAvgPool2d((1, 1))
76 |
77 | self.conv1_4 = nn.Conv2d(c4_size+c3_size, hidden_size, 3, padding=1, bias=False)
78 | self.bn1_4 = nn.BatchNorm2d(hidden_size)
79 | self.relu1_4 = nn.ReLU()
80 | self.conv2_4 = nn.Conv2d(hidden_size, hidden_size, 3, padding=1, bias=False)
81 | self.bn2_4 = nn.BatchNorm2d(hidden_size)
82 | self.relu2_4 = nn.ReLU()
83 |
84 | self.transformer_fusion1 = Transformer_Fusion(dim=768, nhead=8, num_layers=1)
85 |
86 | self.conv1_3 = nn.Conv2d(hidden_size + c2_size, hidden_size, 3, padding=1, bias=False)
87 | self.bn1_3 = nn.BatchNorm2d(hidden_size)
88 | self.relu1_3 = nn.ReLU()
89 | self.conv2_3 = nn.Conv2d(hidden_size, hidden_size, 3, padding=1, bias=False)
90 | self.bn2_3 = nn.BatchNorm2d(hidden_size)
91 | self.relu2_3 = nn.ReLU()
92 | self.crossfuse1 = CrossLayerFuse(hidden_size, hidden_size, lan_size)
93 | self.transformer_fusion2 = Transformer_Fusion(dim=768, nhead=8, num_layers=1)
94 |
95 |
96 | self.conv1_2 = nn.Conv2d(hidden_size + c1_size, hidden_size, 3, padding=1, bias=False)
97 | self.bn1_2 = nn.BatchNorm2d(hidden_size)
98 | self.relu1_2 = nn.ReLU()
99 | self.conv2_2 = nn.Conv2d(hidden_size, hidden_size, 3, padding=1, bias=False)
100 | self.bn2_2 = nn.BatchNorm2d(hidden_size)
101 | self.relu2_2 = nn.ReLU()
102 |
103 | self.conv1_1 = nn.Conv2d(hidden_size, 2, 1)
104 | self.lan_func = Language_Transformer(hidden_size, lan_size=768)
105 | self.crossfuse2 = CrossLayerFuse(lan_size, hidden_size, lan_size)
106 |
107 |
108 | def forward(self, lan_full, lan, x_c4, x_c3, x_c2, x_c1):
109 | # fuse Y4 and Y3
110 | if x_c4.size(-2) < x_c3.size(-2) or x_c4.size(-1) < x_c3.size(-1):
111 | x_c4 = F.interpolate(input=x_c4, size=(x_c3.size(-2), x_c3.size(-1)), mode='bilinear', align_corners=True)
112 | x = torch.cat([x_c4, x_c3], dim=1)
113 | x = self.conv1_4(x)
114 | x = self.bn1_4(x)
115 | x = self.relu1_4(x)
116 | x = self.conv2_4(x)
117 | x = self.bn2_4(x)
118 | x = self.relu2_4(x) # [B, 512, 30, 30]
119 | de_feat = self.adpool(x).view(x.shape[0], x.shape[1])
120 |
121 |
122 |
123 | x = self.transformer_fusion1(x, lan_full)
124 |
125 | # fuse top-down features and Y2 features and pre1
126 | if x.size(-2) < x_c2.size(-2) or x.size(-1) < x_c2.size(-1):
127 | x = F.interpolate(input=x, size=(x_c2.size(-2), x_c2.size(-1)), mode='bilinear', align_corners=True)
128 | x = torch.cat([x, x_c2], dim=1)
129 | x = self.conv1_3(x)
130 | x = self.bn1_3(x)
131 | x = self.relu1_3(x)
132 | x = self.conv2_3(x)
133 | x = self.bn2_3(x)
134 | x = self.relu2_3(x) # [B, 512, 60, 60]
135 |
136 | new_lan = self.lan_func(x, lan)
137 | de_feat = self.crossfuse1(de_feat, x)
138 |
139 | x = self.transformer_fusion2(x, lan_full)
140 |
141 | # fuse top-down features and Y1 features
142 | if x.size(-2) < x_c1.size(-2) or x.size(-1) < x_c1.size(-1):
143 | x = F.interpolate(input=x, size=(x_c1.size(-2), x_c1.size(-1)), mode='bilinear', align_corners=True)
144 | x = torch.cat([x, x_c1], dim=1)
145 | x = self.conv1_2(x)
146 | x = self.bn1_2(x)
147 | x = self.relu1_2(x)
148 | x = self.conv2_2(x)
149 | x = self.bn2_2(x)
150 | x = self.relu2_2(x) # [B, 512, 120, 120]
151 | de_feat = self.crossfuse2(de_feat, x)
152 |
153 | return de_feat, new_lan, self.conv1_1(x)
154 |
--------------------------------------------------------------------------------
/data/dataset.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch.utils.data as data
3 | import torch
4 | import numpy as np
5 | from PIL import Image
6 | import pdb
7 | import copy
8 | from random import choice
9 | from bert.tokenization_bert import BertTokenizer
10 | from textblob import TextBlob
11 |
12 | from refer.refer import REFER
13 |
14 | from args import get_parser
15 |
16 | # Dataset configuration initialization
17 | parser = get_parser()
18 | args = parser.parse_args()
19 |
20 |
21 | class ReferDataset(data.Dataset):
22 |
23 | def __init__(self,
24 | args,
25 | image_transforms=None,
26 | target_transforms=None,
27 | split='train',
28 | eval_mode=False):
29 |
30 | self.classes = []
31 | self.image_transforms = image_transforms
32 | self.target_transform = target_transforms
33 | self.split = split
34 | self.refer = REFER(args.refer_data_root, args.dataset, args.splitBy)
35 | self.dataset_type = args.dataset
36 | self.max_tokens = 20
37 | ref_ids = self.refer.getRefIds(split=self.split)
38 | self.img_ids = self.refer.getImgIds()
39 |
40 | all_imgs = self.refer.Imgs
41 | self.imgs = list(all_imgs[i] for i in self.img_ids)
42 | self.ref_ids = ref_ids
43 |
44 |
45 | self.input_ids = []
46 | self.input_ids_masked = []
47 | self.attention_masks = []
48 | self.tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer)
49 |
50 | self.eval_mode = eval_mode
51 |
52 | for r in ref_ids:
53 | ref = self.refer.Refs[r]
54 |
55 | sentences_for_ref = []
56 | sentences_for_ref_masked = []
57 | attentions_for_ref = []
58 |
59 | for i, (el, sent_id) in enumerate(zip(ref['sentences'], ref['sent_ids'])):
60 | sentence_raw = el['raw']
61 | attention_mask = [0] * self.max_tokens
62 | padded_input_ids = [0] * self.max_tokens
63 | padded_input_ids_masked = [0] * self.max_tokens
64 |
65 | blob = TextBlob(sentence_raw.lower())
66 | chara_list = blob.tags
67 | mask_ops = []
68 | mask_ops1 = []
69 | for word_i, (word_now, chara) in enumerate(chara_list):
70 | if (chara == 'NN' or chara == 'NNS') and word_i < 19 and word_now.lower():
71 | mask_ops.append(word_i)
72 | mask_ops1.append(word_now)
73 | mask_ops2 = self.get_adjacent_word(mask_ops)
74 |
75 |
76 | input_ids = self.tokenizer.encode(text=sentence_raw, add_special_tokens=True)
77 |
78 | # truncation of tokens
79 | input_ids = input_ids[:self.max_tokens]
80 |
81 | padded_input_ids[:len(input_ids)] = input_ids
82 | attention_mask[:len(input_ids)] = [1]*len(input_ids)
83 | if len(mask_ops) == 0:
84 | attention_remask = attention_mask
85 | input_ids_masked = input_ids
86 | else:
87 | could_mask = choice(mask_ops2)
88 | input_ids_masked = copy.deepcopy(input_ids)
89 | for i in could_mask:
90 | input_ids_masked[i + 1] = 0
91 | padded_input_ids_masked[:len(input_ids_masked)] = input_ids_masked
92 |
93 |
94 | sentences_for_ref.append(torch.tensor(padded_input_ids).unsqueeze(0))
95 | sentences_for_ref_masked.append(torch.tensor(padded_input_ids_masked).unsqueeze(0))
96 | attentions_for_ref.append(torch.tensor(attention_mask).unsqueeze(0))
97 |
98 | self.input_ids.append(sentences_for_ref)
99 | self.input_ids_masked.append(sentences_for_ref_masked)
100 | self.attention_masks.append(attentions_for_ref)
101 |
102 |
103 | def get_classes(self):
104 | return self.classes
105 |
106 | def __len__(self):
107 | return len(self.ref_ids)
108 |
109 | def get_adjacent_word(self, mask_list):
110 | output_mask_list = []
111 | length = len(mask_list)
112 | i = 0
113 | while i < length:
114 | begin_pos = i
115 | while i+1 < length and mask_list[i+1] == mask_list[i] + 1:
116 | i += 1
117 | end_pos = i+1
118 | output_mask_list.append(mask_list[begin_pos:end_pos])
119 | i = end_pos
120 |
121 | return output_mask_list
122 |
123 | def __getitem__(self, index):
124 | this_ref_id = self.ref_ids[index]
125 |
126 | this_img_id = self.refer.getImgIds(this_ref_id)
127 | this_img = self.refer.Imgs[this_img_id[0]]
128 |
129 | img = Image.open(os.path.join(self.refer.IMAGE_DIR, this_img['file_name'])).convert("RGB")
130 |
131 | ref = self.refer.loadRefs(this_ref_id)
132 | if self.dataset_type == 'ref_zom':
133 | source_type = ref[0]['source']
134 | else:
135 | source_type = 'one'
136 |
137 | ref_mask = np.array(self.refer.getMask(ref[0])['mask'])
138 |
139 | annot = np.zeros(ref_mask.shape)
140 | annot[ref_mask == 1] = 1
141 | annot = Image.fromarray(annot.astype(np.uint8), mode="P")
142 |
143 |
144 | if self.image_transforms is not None:
145 |
146 | if self.split == 'train':
147 | img, target = self.image_transforms(img, annot)
148 | elif self.split == 'val':
149 | img, target = self.image_transforms(img, annot)
150 | else:
151 | img, target = self.image_transforms(img, annot)
152 |
153 | if self.eval_mode:
154 | embedding = []
155 | embedding_masked = []
156 | att = []
157 | for s in range(len(self.input_ids[index])):
158 | e = self.input_ids[index][s]
159 | a = self.attention_masks[index][s]
160 | embedding.append(e.unsqueeze(-1))
161 | embedding_masked.append(e.unsqueeze(-1))
162 | att.append(a.unsqueeze(-1))
163 |
164 | tensor_embeddings = torch.cat(embedding, dim=-1)
165 | tensor_embeddings_masked = torch.cat(embedding_masked, dim=-1)
166 | attention_mask = torch.cat(att, dim=-1)
167 | else:
168 | choice_sent = np.random.choice(len(self.input_ids[index]))
169 | tensor_embeddings = self.input_ids[index][choice_sent]
170 | tensor_embeddings_masked = self.input_ids_masked[index][choice_sent]
171 | attention_mask = self.attention_masks[index][choice_sent]
172 |
173 | return img, target, source_type, tensor_embeddings, tensor_embeddings_masked, attention_mask
--------------------------------------------------------------------------------
/bert/optimization.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """Functions and classes related to optimization (weight updates)."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import re
22 | import tensorflow as tf
23 |
24 |
25 | def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
26 | """Creates an optimizer training op."""
27 | global_step = tf.train.get_or_create_global_step()
28 |
29 | learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
30 |
31 | # Implements linear decay of the learning rate.
32 | learning_rate = tf.train.polynomial_decay(
33 | learning_rate,
34 | global_step,
35 | num_train_steps,
36 | end_learning_rate=0.0,
37 | power=1.0,
38 | cycle=False)
39 |
40 | # Implements linear warmup. I.e., if global_step < num_warmup_steps, the
41 | # learning rate will be `global_step/num_warmup_steps * init_lr`.
42 | if num_warmup_steps:
43 | global_steps_int = tf.cast(global_step, tf.int32)
44 | warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
45 |
46 | global_steps_float = tf.cast(global_steps_int, tf.float32)
47 | warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
48 |
49 | warmup_percent_done = global_steps_float / warmup_steps_float
50 | warmup_learning_rate = init_lr * warmup_percent_done
51 |
52 | is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
53 | learning_rate = (
54 | (1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
55 |
56 | # It is recommended that you use this optimizer for fine tuning, since this
57 | # is how the model was trained (note that the Adam m/v variables are NOT
58 | # loaded from init_checkpoint.)
59 | optimizer = AdamWeightDecayOptimizer(
60 | learning_rate=learning_rate,
61 | weight_decay_rate=0.01,
62 | beta_1=0.9,
63 | beta_2=0.999,
64 | epsilon=1e-6,
65 | exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
66 |
67 | if use_tpu:
68 | optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
69 |
70 | tvars = tf.trainable_variables()
71 | grads = tf.gradients(loss, tvars)
72 |
73 | # This is how the model was pre-trained.
74 | (grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
75 |
76 | train_op = optimizer.apply_gradients(
77 | zip(grads, tvars), global_step=global_step)
78 |
79 | # Normally the global step update is done inside of `apply_gradients`.
80 | # However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
81 | # a different optimizer, you should probably take this line out.
82 | new_global_step = global_step + 1
83 | train_op = tf.group(train_op, [global_step.assign(new_global_step)])
84 | return train_op
85 |
86 |
87 | class AdamWeightDecayOptimizer(tf.train.Optimizer):
88 | """A basic Adam optimizer that includes "correct" L2 weight decay."""
89 |
90 | def __init__(self,
91 | learning_rate,
92 | weight_decay_rate=0.0,
93 | beta_1=0.9,
94 | beta_2=0.999,
95 | epsilon=1e-6,
96 | exclude_from_weight_decay=None,
97 | name="AdamWeightDecayOptimizer"):
98 | """Constructs a AdamWeightDecayOptimizer."""
99 | super(AdamWeightDecayOptimizer, self).__init__(False, name)
100 |
101 | self.learning_rate = learning_rate
102 | self.weight_decay_rate = weight_decay_rate
103 | self.beta_1 = beta_1
104 | self.beta_2 = beta_2
105 | self.epsilon = epsilon
106 | self.exclude_from_weight_decay = exclude_from_weight_decay
107 |
108 | def apply_gradients(self, grads_and_vars, global_step=None, name=None):
109 | """See base class."""
110 | assignments = []
111 | for (grad, param) in grads_and_vars:
112 | if grad is None or param is None:
113 | continue
114 |
115 | param_name = self._get_variable_name(param.name)
116 |
117 | m = tf.get_variable(
118 | name=param_name + "/adam_m",
119 | shape=param.shape.as_list(),
120 | dtype=tf.float32,
121 | trainable=False,
122 | initializer=tf.zeros_initializer())
123 | v = tf.get_variable(
124 | name=param_name + "/adam_v",
125 | shape=param.shape.as_list(),
126 | dtype=tf.float32,
127 | trainable=False,
128 | initializer=tf.zeros_initializer())
129 |
130 | # Standard Adam update.
131 | next_m = (
132 | tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))
133 | next_v = (
134 | tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,
135 | tf.square(grad)))
136 |
137 | update = next_m / (tf.sqrt(next_v) + self.epsilon)
138 |
139 | # Just adding the square of the weights to the loss function is *not*
140 | # the correct way of using L2 regularization/weight decay with Adam,
141 | # since that will interact with the m and v parameters in strange ways.
142 | #
143 | # Instead we want ot decay the weights in a manner that doesn't interact
144 | # with the m/v parameters. This is equivalent to adding the square
145 | # of the weights to the loss with plain (non-momentum) SGD.
146 | if self._do_use_weight_decay(param_name):
147 | update += self.weight_decay_rate * param
148 |
149 | update_with_lr = self.learning_rate * update
150 |
151 | next_param = param - update_with_lr
152 |
153 | assignments.extend(
154 | [param.assign(next_param),
155 | m.assign(next_m),
156 | v.assign(next_v)])
157 | return tf.group(*assignments, name=name)
158 |
159 | def _do_use_weight_decay(self, param_name):
160 | """Whether to use L2 weight decay for `param_name`."""
161 | if not self.weight_decay_rate:
162 | return False
163 | if self.exclude_from_weight_decay:
164 | for r in self.exclude_from_weight_decay:
165 | if re.search(r, param_name) is not None:
166 | return False
167 | return True
168 |
169 | def _get_variable_name(self, param_name):
170 | """Get the variable name from the tensor name."""
171 | m = re.match("^(.*):\\d+$", param_name)
172 | if m is not None:
173 | param_name = m.group(1)
174 | return param_name
175 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from collections import defaultdict, deque
3 | import datetime
4 | import time
5 | import torch
6 | import torch.distributed as dist
7 |
8 | import errno
9 | import os
10 |
11 | import sys
12 |
13 |
14 |
15 | class SmoothedValue(object):
16 | """Track a series of values and provide access to smoothed values over a
17 | window or the global series average.
18 | """
19 |
20 | def __init__(self, window_size=100, fmt=None):
21 | if fmt is None:
22 | fmt = "{median:.4f} ({global_avg:.4f})"
23 | self.deque = deque(maxlen=window_size)
24 | self.total = 0.0
25 | self.count = 0
26 | self.fmt = fmt
27 |
28 | def update(self, value, n=1):
29 | self.deque.append(value)
30 | self.count += n
31 | self.total += value * n
32 |
33 | def synchronize_between_processes(self):
34 | """
35 | Warning: does not synchronize the deque!
36 | """
37 | if not is_dist_avail_and_initialized():
38 | return
39 | t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
40 | dist.barrier()
41 | dist.all_reduce(t)
42 | t = t.tolist()
43 | self.count = int(t[0])
44 | self.total = t[1]
45 |
46 | @property
47 | def median(self):
48 | d = torch.tensor(list(self.deque))
49 | return d.median().item()
50 |
51 | @property
52 | def avg(self):
53 | d = torch.tensor(list(self.deque), dtype=torch.float32)
54 | return d.mean().item()
55 |
56 | @property
57 | def global_avg(self):
58 | return self.total / self.count
59 |
60 | @property
61 | def max(self):
62 | return max(self.deque)
63 |
64 | @property
65 | def value(self):
66 | return self.deque[-1]
67 |
68 | def __str__(self):
69 | return self.fmt.format(
70 | median=self.median,
71 | avg=self.avg,
72 | global_avg=self.global_avg,
73 | max=self.max,
74 | value=self.value)
75 |
76 |
77 | class MetricLogger(object):
78 | def __init__(self, delimiter="\t"):
79 | self.meters = defaultdict(SmoothedValue)
80 | self.delimiter = delimiter
81 |
82 | def update(self, **kwargs):
83 | for k, v in kwargs.items():
84 | if isinstance(v, torch.Tensor):
85 | v = v.item()
86 | assert isinstance(v, (float, int))
87 | self.meters[k].update(v)
88 |
89 | def __getattr__(self, attr):
90 | if attr in self.meters:
91 | return self.meters[attr]
92 | if attr in self.__dict__:
93 | return self.__dict__[attr]
94 | raise AttributeError("'{}' object has no attribute '{}'".format(
95 | type(self).__name__, attr))
96 |
97 | def __str__(self):
98 | loss_str = []
99 | for name, meter in self.meters.items():
100 | loss_str.append(
101 | "{}: {}".format(name, str(meter))
102 | )
103 | return self.delimiter.join(loss_str)
104 |
105 | def synchronize_between_processes(self):
106 | for meter in self.meters.values():
107 | meter.synchronize_between_processes()
108 |
109 | def add_meter(self, name, meter):
110 | self.meters[name] = meter
111 |
112 | def log_every(self, iterable, print_freq, header=None):
113 | i = 0
114 | if not header:
115 | header = ''
116 | start_time = time.time()
117 | end = time.time()
118 | iter_time = SmoothedValue(fmt='{avg:.4f}')
119 | data_time = SmoothedValue(fmt='{avg:.4f}')
120 | space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
121 | log_msg = self.delimiter.join([
122 | header,
123 | '[{0' + space_fmt + '}/{1}]',
124 | 'eta: {eta}',
125 | '{meters}',
126 | 'time: {time}',
127 | 'data: {data}',
128 | 'max mem: {memory:.0f}'
129 | ])
130 | MB = 1024.0 * 1024.0
131 | for obj in iterable:
132 | data_time.update(time.time() - end)
133 | yield obj
134 | iter_time.update(time.time() - end)
135 | if i % print_freq == 0:
136 | eta_seconds = iter_time.global_avg * (len(iterable) - i)
137 | eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
138 | print(log_msg.format(
139 | i, len(iterable), eta=eta_string,
140 | meters=str(self),
141 | time=str(iter_time), data=str(data_time),
142 | memory=torch.cuda.max_memory_allocated() / MB))
143 | sys.stdout.flush()
144 |
145 | i += 1
146 | end = time.time()
147 | total_time = time.time() - start_time
148 | total_time_str = str(datetime.timedelta(seconds=int(total_time)))
149 | print('{} Total time: {}'.format(header, total_time_str))
150 |
151 |
152 | def mkdir(path):
153 | try:
154 | os.makedirs(path)
155 | except OSError as e:
156 | if e.errno != errno.EEXIST:
157 | raise
158 |
159 |
160 | def setup_for_distributed(is_master):
161 | """
162 | This function disables printing when not in master process
163 | """
164 | import builtins as __builtin__
165 | builtin_print = __builtin__.print
166 |
167 | def print(*args, **kwargs):
168 | force = kwargs.pop('force', False)
169 | if is_master or force:
170 | builtin_print(*args, **kwargs)
171 |
172 | __builtin__.print = print
173 |
174 |
175 | def is_dist_avail_and_initialized():
176 | if not dist.is_available():
177 | return False
178 | if not dist.is_initialized():
179 | return False
180 | return True
181 |
182 |
183 | def get_world_size():
184 | if not is_dist_avail_and_initialized():
185 | return 1
186 | return dist.get_world_size()
187 |
188 |
189 | def get_rank():
190 | if not is_dist_avail_and_initialized():
191 | return 0
192 | return dist.get_rank()
193 |
194 |
195 | def is_main_process():
196 | return get_rank() == 0
197 |
198 |
199 | def save_on_master(*args, **kwargs):
200 | if is_main_process():
201 | torch.save(*args, **kwargs)
202 |
203 |
204 | def init_distributed_mode(args):
205 | if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
206 | rank = int(os.environ["RANK"])
207 | world_size = int(os.environ['WORLD_SIZE'])
208 | print(f"RANK and WORLD_SIZE in environment: {rank}/{world_size}")
209 | else:
210 | rank = -1
211 | world_size = -1
212 |
213 | torch.cuda.set_device(args.local_rank)
214 | torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
215 | torch.distributed.barrier()
216 | setup_for_distributed(is_main_process())
217 |
218 | if args.output_dir:
219 | mkdir(args.output_dir)
220 |
--------------------------------------------------------------------------------
/refer/evaluation/cider/cider_scorer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Tsung-Yi Lin
3 | # Ramakrishna Vedantam
4 |
5 | import copy
6 | from collections import defaultdict
7 | import numpy as np
8 | import pdb
9 | import math
10 |
11 | def precook(s, n=4, out=False):
12 | """
13 | Takes a string as input and returns an object that can be given to
14 | either cook_refs or cook_test. This is optional: cook_refs and cook_test
15 | can take string arguments as well.
16 | :param s: string : sentence to be converted into ngrams
17 | :param n: int : number of ngrams for which representation is calculated
18 | :return: term frequency vector for occuring ngrams
19 | """
20 | words = s.split()
21 | counts = defaultdict(int)
22 | for k in xrange(1,n+1):
23 | for i in xrange(len(words)-k+1):
24 | ngram = tuple(words[i:i+k])
25 | counts[ngram] += 1
26 | return counts
27 |
28 | def cook_refs(refs, n=4): ## lhuang: oracle will call with "average"
29 | '''Takes a list of reference sentences for a single segment
30 | and returns an object that encapsulates everything that BLEU
31 | needs to know about them.
32 | :param refs: list of string : reference sentences for some image
33 | :param n: int : number of ngrams for which (ngram) representation is calculated
34 | :return: result (list of dict)
35 | '''
36 | return [precook(ref, n) for ref in refs]
37 |
38 | def cook_test(test, n=4):
39 | '''Takes a test sentence and returns an object that
40 | encapsulates everything that BLEU needs to know about it.
41 | :param test: list of string : hypothesis sentence for some image
42 | :param n: int : number of ngrams for which (ngram) representation is calculated
43 | :return: result (dict)
44 | '''
45 | return precook(test, n, True)
46 |
47 | class CiderScorer(object):
48 | """CIDEr scorer.
49 | """
50 |
51 | def copy(self):
52 | ''' copy the refs.'''
53 | new = CiderScorer(n=self.n)
54 | new.ctest = copy.copy(self.ctest)
55 | new.crefs = copy.copy(self.crefs)
56 | return new
57 |
58 | def __init__(self, test=None, refs=None, n=4, sigma=6.0):
59 | ''' singular instance '''
60 | self.n = n
61 | self.sigma = sigma
62 | self.crefs = []
63 | self.ctest = []
64 | self.document_frequency = defaultdict(float)
65 | self.cook_append(test, refs)
66 | self.ref_len = None
67 |
68 | def cook_append(self, test, refs):
69 | '''called by constructor and __iadd__ to avoid creating new instances.'''
70 |
71 | if refs is not None:
72 | self.crefs.append(cook_refs(refs))
73 | if test is not None:
74 | self.ctest.append(cook_test(test)) ## N.B.: -1
75 | else:
76 | self.ctest.append(None) # lens of crefs and ctest have to match
77 |
78 | def size(self):
79 | assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
80 | return len(self.crefs)
81 |
82 | def __iadd__(self, other):
83 | '''add an instance (e.g., from another sentence).'''
84 |
85 | if type(other) is tuple:
86 | ## avoid creating new CiderScorer instances
87 | self.cook_append(other[0], other[1])
88 | else:
89 | self.ctest.extend(other.ctest)
90 | self.crefs.extend(other.crefs)
91 |
92 | return self
93 | def compute_doc_freq(self):
94 | '''
95 | Compute term frequency for reference data.
96 | This will be used to compute idf (inverse document frequency later)
97 | The term frequency is stored in the object
98 | :return: None
99 | '''
100 | for refs in self.crefs:
101 | # refs, k ref captions of one image
102 | for ngram in set([ngram for ref in refs for (ngram,count) in ref.iteritems()]):
103 | self.document_frequency[ngram] += 1
104 | # maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
105 |
106 | def compute_cider(self):
107 | def counts2vec(cnts):
108 | """
109 | Function maps counts of ngram to vector of tfidf weights.
110 | The function returns vec, an array of dictionary that store mapping of n-gram and tf-idf weights.
111 | The n-th entry of array denotes length of n-grams.
112 | :param cnts:
113 | :return: vec (array of dict), norm (array of float), length (int)
114 | """
115 | vec = [defaultdict(float) for _ in range(self.n)]
116 | length = 0
117 | norm = [0.0 for _ in range(self.n)]
118 | for (ngram,term_freq) in cnts.iteritems():
119 | # give word count 1 if it doesn't appear in reference corpus
120 | df = np.log(max(1.0, self.document_frequency[ngram]))
121 | # ngram index
122 | n = len(ngram)-1
123 | # tf (term_freq) * idf (precomputed idf) for n-grams
124 | vec[n][ngram] = float(term_freq)*(self.ref_len - df)
125 | # compute norm for the vector. the norm will be used for computing similarity
126 | norm[n] += pow(vec[n][ngram], 2)
127 |
128 | if n == 1:
129 | length += term_freq
130 | norm = [np.sqrt(n) for n in norm]
131 | return vec, norm, length
132 |
133 | def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
134 | '''
135 | Compute the cosine similarity of two vectors.
136 | :param vec_hyp: array of dictionary for vector corresponding to hypothesis
137 | :param vec_ref: array of dictionary for vector corresponding to reference
138 | :param norm_hyp: array of float for vector corresponding to hypothesis
139 | :param norm_ref: array of float for vector corresponding to reference
140 | :param length_hyp: int containing length of hypothesis
141 | :param length_ref: int containing length of reference
142 | :return: array of score for each n-grams cosine similarity
143 | '''
144 | delta = float(length_hyp - length_ref)
145 | # measure consine similarity
146 | val = np.array([0.0 for _ in range(self.n)])
147 | for n in range(self.n):
148 | # ngram
149 | for (ngram,count) in vec_hyp[n].iteritems():
150 | # vrama91 : added clipping
151 | val[n] += min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram]
152 |
153 | if (norm_hyp[n] != 0) and (norm_ref[n] != 0):
154 | val[n] /= (norm_hyp[n]*norm_ref[n])
155 |
156 | assert(not math.isnan(val[n]))
157 | # vrama91: added a length based gaussian penalty
158 | val[n] *= np.e**(-(delta**2)/(2*self.sigma**2))
159 | return val
160 |
161 | # compute log reference length
162 | self.ref_len = np.log(float(len(self.crefs)))
163 |
164 | scores = []
165 | for test, refs in zip(self.ctest, self.crefs):
166 | # compute vector for test captions
167 | vec, norm, length = counts2vec(test)
168 | # compute vector for ref captions
169 | score = np.array([0.0 for _ in range(self.n)])
170 | for ref in refs:
171 | vec_ref, norm_ref, length_ref = counts2vec(ref)
172 | score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
173 | # change by vrama91 - mean of ngram scores, instead of sum
174 | score_avg = np.mean(score)
175 | # divide by number of references
176 | score_avg /= len(refs)
177 | # multiply score by 10
178 | score_avg *= 10.0
179 | # append score of an image to the score list
180 | scores.append(score_avg)
181 | return scores
182 |
183 | def compute_score(self, option=None, verbose=0):
184 | # compute idf
185 | self.compute_doc_freq()
186 | # assert to check document frequency
187 | assert(len(self.ctest) >= max(self.document_frequency.values()))
188 | # compute cider score
189 | score = self.compute_cider()
190 | # debug
191 | # print score
192 | return np.mean(np.array(score)), np.array(score)
--------------------------------------------------------------------------------
/refer/external/maskApi.c:
--------------------------------------------------------------------------------
1 | /**************************************************************************
2 | * Microsoft COCO Toolbox. version 2.0
3 | * Data, paper, and tutorials available at: http://mscoco.org/
4 | * Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
5 | * Licensed under the Simplified BSD License [see coco/license.txt]
6 | **************************************************************************/
7 | #include "maskApi.h"
8 | #include
9 | #include
10 |
11 | uint umin( uint a, uint b ) { return (ab) ? a : b; }
13 |
14 | void rleInit( RLE *R, siz h, siz w, siz m, uint *cnts ) {
15 | R->h=h; R->w=w; R->m=m; R->cnts=(m==0)?0:malloc(sizeof(uint)*m);
16 | siz j; if(cnts) for(j=0; jcnts[j]=cnts[j];
17 | }
18 |
19 | void rleFree( RLE *R ) {
20 | free(R->cnts); R->cnts=0;
21 | }
22 |
23 | void rlesInit( RLE **R, siz n ) {
24 | siz i; *R = (RLE*) malloc(sizeof(RLE)*n);
25 | for(i=0; i0 ) {
61 | c=umin(ca,cb); cc+=c; ct=0;
62 | ca-=c; if(!ca && a0) {
83 | crowd=iscrowd!=NULL && iscrowd[g];
84 | if(dt[d].h!=gt[g].h || dt[d].w!=gt[g].w) { o[g*m+d]=-1; continue; }
85 | siz ka, kb, a, b; uint c, ca, cb, ct, i, u; int va, vb;
86 | ca=dt[d].cnts[0]; ka=dt[d].m; va=vb=0;
87 | cb=gt[g].cnts[0]; kb=gt[g].m; a=b=1; i=u=0; ct=1;
88 | while( ct>0 ) {
89 | c=umin(ca,cb); if(va||vb) { u+=c; if(va&&vb) i+=c; } ct=0;
90 | ca-=c; if(!ca && athr) keep[j]=0;
105 | }
106 | }
107 | }
108 |
109 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o ) {
110 | double h, w, i, u, ga, da; siz g, d; int crowd;
111 | for( g=0; gthr) keep[j]=0;
129 | }
130 | }
131 | }
132 |
133 | void rleToBbox( const RLE *R, BB bb, siz n ) {
134 | siz i; for( i=0; id?1:c=dy && xs>xe) || (dxye);
173 | if(flip) { t=xs; xs=xe; xe=t; t=ys; ys=ye; ye=t; }
174 | s = dx>=dy ? (double)(ye-ys)/dx : (double)(xe-xs)/dy;
175 | if(dx>=dy) for( d=0; d<=dx; d++ ) {
176 | t=flip?dx-d:d; u[m]=t+xs; v[m]=(int)(ys+s*t+.5); m++;
177 | } else for( d=0; d<=dy; d++ ) {
178 | t=flip?dy-d:d; v[m]=t+ys; u[m]=(int)(xs+s*t+.5); m++;
179 | }
180 | }
181 | /* get points along y-boundary and downsample */
182 | free(x); free(y); k=m; m=0; double xd, yd;
183 | x=malloc(sizeof(int)*k); y=malloc(sizeof(int)*k);
184 | for( j=1; jw-1 ) continue;
187 | yd=(double)(v[j]h) yd=h; yd=ceil(yd);
189 | x[m]=(int) xd; y[m]=(int) yd; m++;
190 | }
191 | /* compute rle encoding given y-boundary points */
192 | k=m; a=malloc(sizeof(uint)*(k+1));
193 | for( j=0; j0) b[m++]=a[j++]; else {
199 | j++; if(jm, p=0; long x; int more;
206 | char *s=malloc(sizeof(char)*m*6);
207 | for( i=0; icnts[i]; if(i>2) x-=(long) R->cnts[i-2]; more=1;
209 | while( more ) {
210 | char c=x & 0x1f; x >>= 5; more=(c & 0x10) ? x!=-1 : x!=0;
211 | if(more) c |= 0x20; c+=48; s[p++]=c;
212 | }
213 | }
214 | s[p]=0; return s;
215 | }
216 |
217 | void rleFrString( RLE *R, char *s, siz h, siz w ) {
218 | siz m=0, p=0, k; long x; int more; uint *cnts;
219 | while( s[m] ) m++; cnts=malloc(sizeof(uint)*m); m=0;
220 | while( s[p] ) {
221 | x=0; k=0; more=1;
222 | while( more ) {
223 | char c=s[p]-48; x |= (c & 0x1f) << 5*k;
224 | more = c & 0x20; p++; k++;
225 | if(!more && (c & 0x10)) x |= -1 << 5*k;
226 | }
227 | if(m>2) x+=(long) cnts[m-2]; cnts[m++]=(uint) x;
228 | }
229 | rleInit(R,h,w,m,cnts); free(cnts);
230 | }
231 |
--------------------------------------------------------------------------------
/bert/configuration_bert.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3 | # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 | """ BERT model configuration """
17 |
18 |
19 | import logging
20 |
21 | from .configuration_utils import PretrainedConfig
22 |
23 |
24 | logger = logging.getLogger(__name__)
25 |
26 | BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
27 | # "bert-base-uncased": "/mnt/petrelfs/huyutao.vendor/code/lavit/bert/config.json",
28 | "bert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-config.json",
29 | "bert-large-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-config.json",
30 | "bert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json",
31 | "bert-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-config.json",
32 | "bert-base-multilingual-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-config.json",
33 | "bert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-config.json",
34 | "bert-base-chinese": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-config.json",
35 | "bert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-config.json",
36 | "bert-large-uncased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-config.json",
37 | "bert-large-cased-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-config.json",
38 | "bert-large-uncased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-config.json",
39 | "bert-large-cased-whole-word-masking-finetuned-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-config.json",
40 | "bert-base-cased-finetuned-mrpc": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-config.json",
41 | "bert-base-german-dbmdz-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-config.json",
42 | "bert-base-german-dbmdz-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-config.json",
43 | "cl-tohoku/bert-base-japanese": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese/config.json",
44 | "cl-tohoku/bert-base-japanese-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking/config.json",
45 | "cl-tohoku/bert-base-japanese-char": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char/config.json",
46 | "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking/config.json",
47 | "TurkuNLP/bert-base-finnish-cased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/config.json",
48 | "TurkuNLP/bert-base-finnish-uncased-v1": "https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/config.json",
49 | "wietsedv/bert-base-dutch-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json",
50 | # See all BERT models at https://huggingface.co/models?filter=bert
51 | }
52 |
53 |
54 | class BertConfig(PretrainedConfig):
55 | r"""
56 | This is the configuration class to store the configuration of a :class:`~transformers.BertModel`.
57 | It is used to instantiate an BERT model according to the specified arguments, defining the model
58 | architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
59 | the BERT `bert-base-uncased `__ architecture.
60 |
61 | Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
62 | to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
63 | for more information.
64 |
65 |
66 | Args:
67 | vocab_size (:obj:`int`, optional, defaults to 30522):
68 | Vocabulary size of the BERT model. Defines the different tokens that
69 | can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.
70 | hidden_size (:obj:`int`, optional, defaults to 768):
71 | Dimensionality of the encoder layers and the pooler layer.
72 | num_hidden_layers (:obj:`int`, optional, defaults to 12):
73 | Number of hidden layers in the Transformer encoder.
74 | num_attention_heads (:obj:`int`, optional, defaults to 12):
75 | Number of attention heads for each attention layer in the Transformer encoder.
76 | intermediate_size (:obj:`int`, optional, defaults to 3072):
77 | Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
78 | hidden_act (:obj:`str` or :obj:`function`, optional, defaults to "gelu"):
79 | The non-linear activation function (function or string) in the encoder and pooler.
80 | If string, "gelu", "relu", "swish" and "gelu_new" are supported.
81 | hidden_dropout_prob (:obj:`float`, optional, defaults to 0.1):
82 | The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
83 | attention_probs_dropout_prob (:obj:`float`, optional, defaults to 0.1):
84 | The dropout ratio for the attention probabilities.
85 | max_position_embeddings (:obj:`int`, optional, defaults to 512):
86 | The maximum sequence length that this model might ever be used with.
87 | Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
88 | type_vocab_size (:obj:`int`, optional, defaults to 2):
89 | The vocabulary size of the `token_type_ids` passed into :class:`~transformers.BertModel`.
90 | initializer_range (:obj:`float`, optional, defaults to 0.02):
91 | The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
92 | layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
93 | The epsilon used by the layer normalization layers.
94 | gradient_checkpointing (:obj:`bool`, optional, defaults to False):
95 | If True, use gradient checkpointing to save memory at the expense of slower backward pass.
96 |
97 | Example::
98 |
99 | >>> from transformers import BertModel, BertConfig
100 |
101 | >>> # Initializing a BERT bert-base-uncased style configuration
102 | >>> configuration = BertConfig()
103 |
104 | >>> # Initializing a model from the bert-base-uncased style configuration
105 | >>> model = BertModel(configuration)
106 |
107 | >>> # Accessing the model configuration
108 | >>> configuration = model.config
109 | """
110 | model_type = "bert"
111 |
112 | def __init__(
113 | self,
114 | vocab_size=30522,
115 | hidden_size=768,
116 | num_hidden_layers=12,
117 | num_attention_heads=12,
118 | intermediate_size=3072,
119 | hidden_act="gelu",
120 | hidden_dropout_prob=0.1,
121 | attention_probs_dropout_prob=0.1,
122 | max_position_embeddings=512,
123 | type_vocab_size=2,
124 | initializer_range=0.02,
125 | layer_norm_eps=1e-12,
126 | pad_token_id=0,
127 | gradient_checkpointing=False,
128 | **kwargs
129 | ):
130 | super().__init__(pad_token_id=pad_token_id, **kwargs)
131 |
132 | self.vocab_size = vocab_size
133 | self.hidden_size = hidden_size
134 | self.num_hidden_layers = num_hidden_layers
135 | self.num_attention_heads = num_attention_heads
136 | self.hidden_act = hidden_act
137 | self.intermediate_size = intermediate_size
138 | self.hidden_dropout_prob = hidden_dropout_prob
139 | self.attention_probs_dropout_prob = attention_probs_dropout_prob
140 | self.max_position_embeddings = max_position_embeddings
141 | self.type_vocab_size = type_vocab_size
142 | self.initializer_range = initializer_range
143 | self.layer_norm_eps = layer_norm_eps
144 | self.gradient_checkpointing = gradient_checkpointing
145 |
--------------------------------------------------------------------------------
/data/dataset_zom.py:
--------------------------------------------------------------------------------
1 | import os
2 | import torch.utils.data as data
3 | import torch
4 | import numpy as np
5 | from PIL import Image
6 | import pdb
7 | import copy
8 | from random import choice
9 | from bert.tokenization_bert import BertTokenizer
10 | from textblob import TextBlob
11 |
12 | from refer.refer import REFER
13 | import copy
14 | import random
15 | import torch
16 | from collections import defaultdict
17 |
18 | import torch
19 | import torch.distributed as dist
20 | from torch.utils.data.distributed import DistributedSampler
21 |
22 | from args import get_parser
23 | import random
24 | # Dataset configuration initialization
25 | parser = get_parser()
26 | args = parser.parse_args()
27 |
28 |
29 | class Referzom_Dataset(data.Dataset):
30 |
31 | def __init__(self,
32 | args,
33 | image_transforms=None,
34 | target_transforms=None,
35 | split='train',
36 | eval_mode=False):
37 |
38 | self.classes = []
39 | self.image_transforms = image_transforms
40 | self.target_transform = target_transforms
41 | self.split = split
42 | self.refer = REFER(args.refer_data_root, args.dataset, args.splitBy)
43 | self.dataset_type = args.dataset
44 | self.max_tokens = 20
45 | ref_ids = self.refer.getRefIds(split=self.split)
46 | self.img_ids = self.refer.getImgIds()
47 |
48 | all_imgs = self.refer.Imgs
49 | self.imgs = list(all_imgs[i] for i in self.img_ids)
50 | self.ref_ids = ref_ids
51 |
52 | self.input_ids = []
53 | self.input_ids_masked = []
54 | self.attention_masks = []
55 | self.tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer)
56 |
57 | self.eval_mode = eval_mode
58 |
59 | self.zero_sent_id_list = []
60 | self.one_sent_id_list = []
61 | self.all_sent_id_list = []
62 | self.sent_2_refid = {}
63 | for r in ref_ids:
64 | ref = self.refer.loadRefs(r)
65 |
66 | source_type = ref[0]['source']
67 |
68 | for sent_dict in ref[0]['sentences']:
69 | sent_id = sent_dict['sent_id']
70 |
71 | self.sent_2_refid[sent_id] = r
72 | self.all_sent_id_list.append(sent_id)
73 | if source_type=='zero':
74 | self.zero_sent_id_list.append(sent_id)
75 | else:
76 | self.one_sent_id_list.append(sent_id)
77 |
78 | for r in ref_ids:
79 | ref = self.refer.Refs[r]
80 |
81 | sentences_for_ref = []
82 | sentences_for_ref_masked = []
83 | attentions_for_ref = []
84 |
85 | for i, el in enumerate(ref['sentences']):
86 | sentence_raw = el['raw']
87 | attention_mask = [0] * self.max_tokens
88 | padded_input_ids = [0] * self.max_tokens
89 | padded_input_ids_masked = [0] * self.max_tokens
90 |
91 | blob = TextBlob(sentence_raw.lower())
92 | chara_list = blob.tags
93 | mask_ops = []
94 | mask_ops1 = []
95 | for word_i, (word_now, chara) in enumerate(chara_list):
96 | if (chara == 'NN' or chara == 'NNS') and word_i < 19 and word_now.lower():
97 | mask_ops.append(word_i)
98 | mask_ops1.append(word_now)
99 | mask_ops2 = self.get_adjacent_word(mask_ops)
100 |
101 |
102 | input_ids = self.tokenizer.encode(text=sentence_raw, add_special_tokens=True)
103 |
104 | # truncation of tokens
105 | input_ids = input_ids[:self.max_tokens]
106 |
107 | padded_input_ids[:len(input_ids)] = input_ids
108 | attention_mask[:len(input_ids)] = [1]*len(input_ids)
109 | if len(mask_ops) == 0:
110 | attention_remask = attention_mask
111 | input_ids_masked = input_ids
112 | else:
113 | could_mask = choice(mask_ops2)
114 | input_ids_masked = copy.deepcopy(input_ids)
115 | for i in could_mask:
116 | input_ids_masked[i + 1] = 0
117 | padded_input_ids_masked[:len(input_ids_masked)] = input_ids_masked
118 |
119 | sentences_for_ref.append(torch.tensor(padded_input_ids).unsqueeze(0))
120 | sentences_for_ref_masked.append(torch.tensor(padded_input_ids_masked).unsqueeze(0))
121 | attentions_for_ref.append(torch.tensor(attention_mask).unsqueeze(0))
122 |
123 | self.input_ids.extend(sentences_for_ref)
124 | self.input_ids_masked.extend(sentences_for_ref_masked)
125 | self.attention_masks.extend(attentions_for_ref)
126 |
127 |
128 | def get_classes(self):
129 | return self.classes
130 |
131 | def __len__(self):
132 | return len(self.all_sent_id_list)
133 |
134 | def get_adjacent_word(self, mask_list):
135 | output_mask_list = []
136 | length = len(mask_list)
137 | i = 0
138 | while i < length:
139 | begin_pos = i
140 | while i+1 < length and mask_list[i+1] == mask_list[i] + 1:
141 | i += 1
142 | end_pos = i+1
143 | output_mask_list.append(mask_list[begin_pos:end_pos])
144 | i = end_pos
145 |
146 | return output_mask_list
147 |
148 | def __getitem__(self, index):
149 |
150 | sent_id = self.all_sent_id_list[index]
151 | this_ref_id = self.sent_2_refid[sent_id]
152 |
153 | this_img_id = self.refer.getImgIds(this_ref_id)
154 | this_img = self.refer.Imgs[this_img_id[0]]
155 |
156 | img = Image.open(os.path.join(self.refer.IMAGE_DIR, this_img['file_name'])).convert("RGB")
157 |
158 | ref = self.refer.loadRefs(this_ref_id)
159 | if self.dataset_type == 'ref-zom':
160 | source_type = ref[0]['source']
161 | else:
162 | source_type = 'not_zero'
163 |
164 | ref_mask = np.array(self.refer.getMask(ref[0])['mask'])
165 |
166 | annot = np.zeros(ref_mask.shape)
167 | annot[ref_mask == 1] = 1
168 | annot = Image.fromarray(annot.astype(np.uint8), mode="P")
169 |
170 |
171 | if self.image_transforms is not None:
172 |
173 | if self.split == 'train':
174 | img, target = self.image_transforms(img, annot)
175 | elif self.split == 'val':
176 | img, target = self.image_transforms(img, annot)
177 | else:
178 | img, target = self.image_transforms(img, annot)
179 |
180 | if self.eval_mode:
181 | embedding = []
182 | embedding_masked = []
183 | att = []
184 | for s in range(len(self.input_ids[index])):
185 | e = self.input_ids[index][s]
186 | # e1 = self.input_ids_masked[index][s]
187 | a = self.attention_masks[index][s]
188 | embedding.append(e.unsqueeze(-1))
189 | embedding_masked.append(e.unsqueeze(-1))
190 | att.append(a.unsqueeze(-1))
191 |
192 | tensor_embeddings = torch.cat(embedding, dim=-1)
193 | tensor_embeddings_masked = torch.cat(embedding_masked, dim=-1)
194 | attention_mask = torch.cat(att, dim=-1)
195 | else:
196 | choice_sent = np.random.choice(len(self.input_ids[index]))
197 | tensor_embeddings = self.input_ids[index][choice_sent]
198 | tensor_embeddings_masked = self.input_ids_masked[index][choice_sent]
199 | attention_mask = self.attention_masks[index][choice_sent]
200 |
201 | return img, target, source_type, tensor_embeddings, tensor_embeddings_masked, attention_mask
202 |
203 |
204 |
205 |
206 | class Refzom_DistributedSampler(DistributedSampler):
207 | def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
208 | super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
209 | self.one_id_list = dataset.one_sent_id_list
210 |
211 | self.zero_id_list = dataset.zero_sent_id_list
212 | self.sent_ids_list = dataset.all_sent_id_list
213 | if self.shuffle==True:
214 | random.shuffle(self.one_id_list)
215 | random.shuffle(self.zero_id_list)
216 |
217 | self.sent_id = self.insert_evenly(self.zero_id_list,self.one_id_list)
218 | self.indices = self.get_positions(self.sent_ids_list, self.sent_id)
219 |
220 | def get_positions(self, list_a, list_b):
221 | position_dict = {value: index for index, value in enumerate(list_a)}
222 | positions = [position_dict[item] for item in list_b]
223 |
224 | return positions
225 |
226 | def insert_evenly(self, list_a, list_b):
227 | len_a = len(list_a)
228 | len_b = len(list_b)
229 | block_size = len_b // len_a
230 |
231 | result = []
232 | for i in range(len_a):
233 | start = i * block_size
234 | end = (i + 1) * block_size
235 | result.extend(list_b[start:end])
236 | result.append(list_a[i])
237 |
238 | remaining = list_b[(len_a * block_size):]
239 | result.extend(remaining)
240 |
241 | return result
242 |
243 | def __iter__(self):
244 |
245 | indices_per_process = self.indices[self.rank::self.num_replicas]
246 | return iter(indices_per_process)
--------------------------------------------------------------------------------
/refer/evaluation/bleu/bleu_scorer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # bleu_scorer.py
4 | # David Chiang
5 |
6 | # Copyright (c) 2004-2006 University of Maryland. All rights
7 | # reserved. Do not redistribute without permission from the
8 | # author. Not for commercial use.
9 |
10 | # Modified by:
11 | # Hao Fang
12 | # Tsung-Yi Lin
13 |
14 | '''Provides:
15 | cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
16 | cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
17 | '''
18 |
19 | import copy
20 | import sys, math, re
21 | from collections import defaultdict
22 |
23 | def precook(s, n=4, out=False):
24 | """Takes a string as input and returns an object that can be given to
25 | either cook_refs or cook_test. This is optional: cook_refs and cook_test
26 | can take string arguments as well."""
27 | words = s.split()
28 | counts = defaultdict(int)
29 | for k in xrange(1,n+1):
30 | for i in xrange(len(words)-k+1):
31 | ngram = tuple(words[i:i+k])
32 | counts[ngram] += 1
33 | return (len(words), counts)
34 |
35 | def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
36 | '''Takes a list of reference sentences for a single segment
37 | and returns an object that encapsulates everything that BLEU
38 | needs to know about them.'''
39 |
40 | reflen = []
41 | maxcounts = {}
42 | for ref in refs:
43 | rl, counts = precook(ref, n)
44 | reflen.append(rl)
45 | for (ngram,count) in counts.iteritems():
46 | maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
47 |
48 | # Calculate effective reference sentence length.
49 | if eff == "shortest":
50 | reflen = min(reflen)
51 | elif eff == "average":
52 | reflen = float(sum(reflen))/len(reflen)
53 |
54 | ## lhuang: N.B.: leave reflen computaiton to the very end!!
55 |
56 | ## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
57 |
58 | return (reflen, maxcounts)
59 |
60 | def cook_test(test, (reflen, refmaxcounts), eff=None, n=4):
61 | '''Takes a test sentence and returns an object that
62 | encapsulates everything that BLEU needs to know about it.'''
63 |
64 | testlen, counts = precook(test, n, True)
65 |
66 | result = {}
67 |
68 | # Calculate effective reference sentence length.
69 |
70 | if eff == "closest":
71 | result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1]
72 | else: ## i.e., "average" or "shortest" or None
73 | result["reflen"] = reflen
74 |
75 | result["testlen"] = testlen
76 |
77 | result["guess"] = [max(0,testlen-k+1) for k in xrange(1,n+1)]
78 |
79 | result['correct'] = [0]*n
80 | for (ngram, count) in counts.iteritems():
81 | result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
82 |
83 | return result
84 |
85 | class BleuScorer(object):
86 | """Bleu scorer.
87 | """
88 |
89 | __slots__ = "n", "crefs", "ctest", "_score", "_ratio", "_testlen", "_reflen", "special_reflen"
90 | # special_reflen is used in oracle (proportional effective ref len for a node).
91 |
92 | def copy(self):
93 | ''' copy the refs.'''
94 | new = BleuScorer(n=self.n)
95 | new.ctest = copy.copy(self.ctest)
96 | new.crefs = copy.copy(self.crefs)
97 | new._score = None
98 | return new
99 |
100 | def __init__(self, test=None, refs=None, n=4, special_reflen=None):
101 | ''' singular instance '''
102 |
103 | self.n = n
104 | self.crefs = []
105 | self.ctest = []
106 | self.cook_append(test, refs)
107 | self.special_reflen = special_reflen
108 |
109 | def cook_append(self, test, refs):
110 | '''called by constructor and __iadd__ to avoid creating new instances.'''
111 |
112 | if refs is not None:
113 | self.crefs.append(cook_refs(refs))
114 | if test is not None:
115 | cooked_test = cook_test(test, self.crefs[-1])
116 | self.ctest.append(cooked_test) ## N.B.: -1
117 | else:
118 | self.ctest.append(None) # lens of crefs and ctest have to match
119 |
120 | self._score = None ## need to recompute
121 |
122 | def ratio(self, option=None):
123 | self.compute_score(option=option)
124 | return self._ratio
125 |
126 | def score_ratio(self, option=None):
127 | '''return (bleu, len_ratio) pair'''
128 | return (self.fscore(option=option), self.ratio(option=option))
129 |
130 | def score_ratio_str(self, option=None):
131 | return "%.4f (%.2f)" % self.score_ratio(option)
132 |
133 | def reflen(self, option=None):
134 | self.compute_score(option=option)
135 | return self._reflen
136 |
137 | def testlen(self, option=None):
138 | self.compute_score(option=option)
139 | return self._testlen
140 |
141 | def retest(self, new_test):
142 | if type(new_test) is str:
143 | new_test = [new_test]
144 | assert len(new_test) == len(self.crefs), new_test
145 | self.ctest = []
146 | for t, rs in zip(new_test, self.crefs):
147 | self.ctest.append(cook_test(t, rs))
148 | self._score = None
149 |
150 | return self
151 |
152 | def rescore(self, new_test):
153 | ''' replace test(s) with new test(s), and returns the new score.'''
154 |
155 | return self.retest(new_test).compute_score()
156 |
157 | def size(self):
158 | assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
159 | return len(self.crefs)
160 |
161 | def __iadd__(self, other):
162 | '''add an instance (e.g., from another sentence).'''
163 |
164 | if type(other) is tuple:
165 | ## avoid creating new BleuScorer instances
166 | self.cook_append(other[0], other[1])
167 | else:
168 | assert self.compatible(other), "incompatible BLEUs."
169 | self.ctest.extend(other.ctest)
170 | self.crefs.extend(other.crefs)
171 | self._score = None ## need to recompute
172 |
173 | return self
174 |
175 | def compatible(self, other):
176 | return isinstance(other, BleuScorer) and self.n == other.n
177 |
178 | def single_reflen(self, option="average"):
179 | return self._single_reflen(self.crefs[0][0], option)
180 |
181 | def _single_reflen(self, reflens, option=None, testlen=None):
182 |
183 | if option == "shortest":
184 | reflen = min(reflens)
185 | elif option == "average":
186 | reflen = float(sum(reflens))/len(reflens)
187 | elif option == "closest":
188 | reflen = min((abs(l-testlen), l) for l in reflens)[1]
189 | else:
190 | assert False, "unsupported reflen option %s" % option
191 |
192 | return reflen
193 |
194 | def recompute_score(self, option=None, verbose=0):
195 | self._score = None
196 | return self.compute_score(option, verbose)
197 |
198 | def compute_score(self, option=None, verbose=0):
199 | n = self.n
200 | small = 1e-9
201 | tiny = 1e-15 ## so that if guess is 0 still return 0
202 | bleu_list = [[] for _ in range(n)]
203 |
204 | if self._score is not None:
205 | return self._score
206 |
207 | if option is None:
208 | option = "average" if len(self.crefs) == 1 else "closest"
209 |
210 | self._testlen = 0
211 | self._reflen = 0
212 | totalcomps = {'testlen':0, 'reflen':0, 'guess':[0]*n, 'correct':[0]*n}
213 |
214 | # for each sentence
215 | for comps in self.ctest:
216 | testlen = comps['testlen']
217 | self._testlen += testlen
218 |
219 | if self.special_reflen is None: ## need computation
220 | reflen = self._single_reflen(comps['reflen'], option, testlen)
221 | else:
222 | reflen = self.special_reflen
223 |
224 | self._reflen += reflen
225 |
226 | for key in ['guess','correct']:
227 | for k in xrange(n):
228 | totalcomps[key][k] += comps[key][k]
229 |
230 | # append per image bleu score
231 | bleu = 1.
232 | for k in xrange(n):
233 | bleu *= (float(comps['correct'][k]) + tiny) \
234 | /(float(comps['guess'][k]) + small)
235 | bleu_list[k].append(bleu ** (1./(k+1)))
236 | ratio = (testlen + tiny) / (reflen + small) ## N.B.: avoid zero division
237 | if ratio < 1:
238 | for k in xrange(n):
239 | bleu_list[k][-1] *= math.exp(1 - 1/ratio)
240 |
241 | if verbose > 1:
242 | print comps, reflen
243 |
244 | totalcomps['reflen'] = self._reflen
245 | totalcomps['testlen'] = self._testlen
246 |
247 | bleus = []
248 | bleu = 1.
249 | for k in xrange(n):
250 | bleu *= float(totalcomps['correct'][k] + tiny) \
251 | / (totalcomps['guess'][k] + small)
252 | bleus.append(bleu ** (1./(k+1)))
253 | ratio = (self._testlen + tiny) / (self._reflen + small) ## N.B.: avoid zero division
254 | if ratio < 1:
255 | for k in xrange(n):
256 | bleus[k] *= math.exp(1 - 1/ratio)
257 |
258 | if verbose > 0:
259 | print totalcomps
260 | print "ratio:", ratio
261 |
262 | self._score = bleus
263 | return self._score, bleu_list
264 |
--------------------------------------------------------------------------------
/bert/modeling_test.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | from __future__ import absolute_import
16 | from __future__ import division
17 | from __future__ import print_function
18 |
19 | import collections
20 | import json
21 | import random
22 | import re
23 |
24 | import modeling
25 | import six
26 | import tensorflow as tf
27 |
28 |
29 | class BertModelTest(tf.test.TestCase):
30 |
31 | class BertModelTester(object):
32 |
33 | def __init__(self,
34 | parent,
35 | batch_size=13,
36 | seq_length=7,
37 | is_training=True,
38 | use_input_mask=True,
39 | use_token_type_ids=True,
40 | vocab_size=99,
41 | hidden_size=32,
42 | num_hidden_layers=5,
43 | num_attention_heads=4,
44 | intermediate_size=37,
45 | hidden_act="gelu",
46 | hidden_dropout_prob=0.1,
47 | attention_probs_dropout_prob=0.1,
48 | max_position_embeddings=512,
49 | type_vocab_size=16,
50 | initializer_range=0.02,
51 | scope=None):
52 | self.parent = parent
53 | self.batch_size = batch_size
54 | self.seq_length = seq_length
55 | self.is_training = is_training
56 | self.use_input_mask = use_input_mask
57 | self.use_token_type_ids = use_token_type_ids
58 | self.vocab_size = vocab_size
59 | self.hidden_size = hidden_size
60 | self.num_hidden_layers = num_hidden_layers
61 | self.num_attention_heads = num_attention_heads
62 | self.intermediate_size = intermediate_size
63 | self.hidden_act = hidden_act
64 | self.hidden_dropout_prob = hidden_dropout_prob
65 | self.attention_probs_dropout_prob = attention_probs_dropout_prob
66 | self.max_position_embeddings = max_position_embeddings
67 | self.type_vocab_size = type_vocab_size
68 | self.initializer_range = initializer_range
69 | self.scope = scope
70 |
71 | def create_model(self):
72 | input_ids = BertModelTest.ids_tensor([self.batch_size, self.seq_length],
73 | self.vocab_size)
74 |
75 | input_mask = None
76 | if self.use_input_mask:
77 | input_mask = BertModelTest.ids_tensor(
78 | [self.batch_size, self.seq_length], vocab_size=2)
79 |
80 | token_type_ids = None
81 | if self.use_token_type_ids:
82 | token_type_ids = BertModelTest.ids_tensor(
83 | [self.batch_size, self.seq_length], self.type_vocab_size)
84 |
85 | config = modeling.BertConfig(
86 | vocab_size=self.vocab_size,
87 | hidden_size=self.hidden_size,
88 | num_hidden_layers=self.num_hidden_layers,
89 | num_attention_heads=self.num_attention_heads,
90 | intermediate_size=self.intermediate_size,
91 | hidden_act=self.hidden_act,
92 | hidden_dropout_prob=self.hidden_dropout_prob,
93 | attention_probs_dropout_prob=self.attention_probs_dropout_prob,
94 | max_position_embeddings=self.max_position_embeddings,
95 | type_vocab_size=self.type_vocab_size,
96 | initializer_range=self.initializer_range)
97 |
98 | model = modeling.BertModel(
99 | config=config,
100 | is_training=self.is_training,
101 | input_ids=input_ids,
102 | input_mask=input_mask,
103 | token_type_ids=token_type_ids,
104 | scope=self.scope)
105 |
106 | outputs = {
107 | "embedding_output": model.get_embedding_output(),
108 | "sequence_output": model.get_sequence_output(),
109 | "pooled_output": model.get_pooled_output(),
110 | "all_encoder_layers": model.get_all_encoder_layers(),
111 | }
112 | return outputs
113 |
114 | def check_output(self, result):
115 | self.parent.assertAllEqual(
116 | result["embedding_output"].shape,
117 | [self.batch_size, self.seq_length, self.hidden_size])
118 |
119 | self.parent.assertAllEqual(
120 | result["sequence_output"].shape,
121 | [self.batch_size, self.seq_length, self.hidden_size])
122 |
123 | self.parent.assertAllEqual(result["pooled_output"].shape,
124 | [self.batch_size, self.hidden_size])
125 |
126 | def test_default(self):
127 | self.run_tester(BertModelTest.BertModelTester(self))
128 |
129 | def test_config_to_json_string(self):
130 | config = modeling.BertConfig(vocab_size=99, hidden_size=37)
131 | obj = json.loads(config.to_json_string())
132 | self.assertEqual(obj["vocab_size"], 99)
133 | self.assertEqual(obj["hidden_size"], 37)
134 |
135 | def run_tester(self, tester):
136 | with self.test_session() as sess:
137 | ops = tester.create_model()
138 | init_op = tf.group(tf.global_variables_initializer(),
139 | tf.local_variables_initializer())
140 | sess.run(init_op)
141 | output_result = sess.run(ops)
142 | tester.check_output(output_result)
143 |
144 | self.assert_all_tensors_reachable(sess, [init_op, ops])
145 |
146 | @classmethod
147 | def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
148 | """Creates a random int32 tensor of the shape within the vocab size."""
149 | if rng is None:
150 | rng = random.Random()
151 |
152 | total_dims = 1
153 | for dim in shape:
154 | total_dims *= dim
155 |
156 | values = []
157 | for _ in range(total_dims):
158 | values.append(rng.randint(0, vocab_size - 1))
159 |
160 | return tf.constant(value=values, dtype=tf.int32, shape=shape, name=name)
161 |
162 | def assert_all_tensors_reachable(self, sess, outputs):
163 | """Checks that all the tensors in the graph are reachable from outputs."""
164 | graph = sess.graph
165 |
166 | ignore_strings = [
167 | "^.*/assert_less_equal/.*$",
168 | "^.*/dilation_rate$",
169 | "^.*/Tensordot/concat$",
170 | "^.*/Tensordot/concat/axis$",
171 | "^testing/.*$",
172 | ]
173 |
174 | ignore_regexes = [re.compile(x) for x in ignore_strings]
175 |
176 | unreachable = self.get_unreachable_ops(graph, outputs)
177 | filtered_unreachable = []
178 | for x in unreachable:
179 | do_ignore = False
180 | for r in ignore_regexes:
181 | m = r.match(x.name)
182 | if m is not None:
183 | do_ignore = True
184 | if do_ignore:
185 | continue
186 | filtered_unreachable.append(x)
187 | unreachable = filtered_unreachable
188 |
189 | self.assertEqual(
190 | len(unreachable), 0, "The following ops are unreachable: %s" %
191 | (" ".join([x.name for x in unreachable])))
192 |
193 | @classmethod
194 | def get_unreachable_ops(cls, graph, outputs):
195 | """Finds all of the tensors in graph that are unreachable from outputs."""
196 | outputs = cls.flatten_recursive(outputs)
197 | output_to_op = collections.defaultdict(list)
198 | op_to_all = collections.defaultdict(list)
199 | assign_out_to_in = collections.defaultdict(list)
200 |
201 | for op in graph.get_operations():
202 | for x in op.inputs:
203 | op_to_all[op.name].append(x.name)
204 | for y in op.outputs:
205 | output_to_op[y.name].append(op.name)
206 | op_to_all[op.name].append(y.name)
207 | if str(op.type) == "Assign":
208 | for y in op.outputs:
209 | for x in op.inputs:
210 | assign_out_to_in[y.name].append(x.name)
211 |
212 | assign_groups = collections.defaultdict(list)
213 | for out_name in assign_out_to_in.keys():
214 | name_group = assign_out_to_in[out_name]
215 | for n1 in name_group:
216 | assign_groups[n1].append(out_name)
217 | for n2 in name_group:
218 | if n1 != n2:
219 | assign_groups[n1].append(n2)
220 |
221 | seen_tensors = {}
222 | stack = [x.name for x in outputs]
223 | while stack:
224 | name = stack.pop()
225 | if name in seen_tensors:
226 | continue
227 | seen_tensors[name] = True
228 |
229 | if name in output_to_op:
230 | for op_name in output_to_op[name]:
231 | if op_name in op_to_all:
232 | for input_name in op_to_all[op_name]:
233 | if input_name not in stack:
234 | stack.append(input_name)
235 |
236 | expanded_names = []
237 | if name in assign_groups:
238 | for assign_name in assign_groups[name]:
239 | expanded_names.append(assign_name)
240 |
241 | for expanded_name in expanded_names:
242 | if expanded_name not in stack:
243 | stack.append(expanded_name)
244 |
245 | unreachable_ops = []
246 | for op in graph.get_operations():
247 | is_unreachable = False
248 | all_names = [x.name for x in op.inputs] + [x.name for x in op.outputs]
249 | for name in all_names:
250 | if name not in seen_tensors:
251 | is_unreachable = True
252 | if is_unreachable:
253 | unreachable_ops.append(op)
254 | return unreachable_ops
255 |
256 | @classmethod
257 | def flatten_recursive(cls, item):
258 | """Flattens (potentially nested) a tuple/dictionary/list to a list."""
259 | output = []
260 | if isinstance(item, list):
261 | output.extend(item)
262 | elif isinstance(item, tuple):
263 | output.extend(list(item))
264 | elif isinstance(item, dict):
265 | for (_, v) in six.iteritems(item):
266 | output.append(v)
267 | else:
268 | return [item]
269 |
270 | flat_output = []
271 | for x in output:
272 | flat_output.extend(cls.flatten_recursive(x))
273 | return flat_output
274 |
275 |
276 | if __name__ == "__main__":
277 | tf.test.main()
278 |
--------------------------------------------------------------------------------
/refer/external/_mask.pyx:
--------------------------------------------------------------------------------
1 | # distutils: language = c
2 | # distutils: sources = external/maskApi.c
3 |
4 | #**************************************************************************
5 | # Microsoft COCO Toolbox. version 2.0
6 | # Data, paper, and tutorials available at: http://mscoco.org/
7 | # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
8 | # Licensed under the Simplified BSD License [see coco/license.txt]
9 | #**************************************************************************
10 |
11 | __author__ = 'tsungyi'
12 |
13 | # import both Python-level and C-level symbols of Numpy
14 | # the API uses Numpy to interface C and Python
15 | import numpy as np
16 | cimport numpy as np
17 | from libc.stdlib cimport malloc, free
18 |
19 | # intialized Numpy. must do.
20 | np.import_array()
21 |
22 | # import numpy C function
23 | # we use PyArray_ENABLEFLAGS to make Numpy ndarray responsible to memoery management
24 | cdef extern from "numpy/arrayobject.h":
25 | void PyArray_ENABLEFLAGS(np.ndarray arr, int flags)
26 |
27 | # Declare the prototype of the C functions in MaskApi.h
28 | cdef extern from "maskApi.h":
29 | ctypedef unsigned int uint
30 | ctypedef unsigned long siz
31 | ctypedef unsigned char byte
32 | ctypedef double* BB
33 | ctypedef struct RLE:
34 | siz h,
35 | siz w,
36 | siz m,
37 | uint* cnts,
38 | void rlesInit( RLE **R, siz n )
39 | void rleEncode( RLE *R, const byte *M, siz h, siz w, siz n )
40 | void rleDecode( const RLE *R, byte *mask, siz n )
41 | void rleMerge( const RLE *R, RLE *M, siz n, bint intersect )
42 | void rleArea( const RLE *R, siz n, uint *a )
43 | void rleIou( RLE *dt, RLE *gt, siz m, siz n, byte *iscrowd, double *o )
44 | void bbIou( BB dt, BB gt, siz m, siz n, byte *iscrowd, double *o )
45 | void rleToBbox( const RLE *R, BB bb, siz n )
46 | void rleFrBbox( RLE *R, const BB bb, siz h, siz w, siz n )
47 | void rleFrPoly( RLE *R, const double *xy, siz k, siz h, siz w )
48 | char* rleToString( const RLE *R )
49 | void rleFrString( RLE *R, char *s, siz h, siz w )
50 |
51 | # python class to wrap RLE array in C
52 | # the class handles the memory allocation and deallocation
53 | cdef class RLEs:
54 | cdef RLE *_R
55 | cdef siz _n
56 |
57 | def __cinit__(self, siz n =0):
58 | rlesInit(&self._R, n)
59 | self._n = n
60 |
61 | # free the RLE array here
62 | def __dealloc__(self):
63 | if self._R is not NULL:
64 | for i in range(self._n):
65 | free(self._R[i].cnts)
66 | free(self._R)
67 | def __getattr__(self, key):
68 | if key == 'n':
69 | return self._n
70 | raise AttributeError(key)
71 |
72 | # python class to wrap Mask array in C
73 | # the class handles the memory allocation and deallocation
74 | cdef class Masks:
75 | cdef byte *_mask
76 | cdef siz _h
77 | cdef siz _w
78 | cdef siz _n
79 |
80 | def __cinit__(self, h, w, n):
81 | self._mask = malloc(h*w*n* sizeof(byte))
82 | self._h = h
83 | self._w = w
84 | self._n = n
85 | # def __dealloc__(self):
86 | # the memory management of _mask has been passed to np.ndarray
87 | # it doesn't need to be freed here
88 |
89 | # called when passing into np.array() and return an np.ndarray in column-major order
90 | def __array__(self):
91 | cdef np.npy_intp shape[1]
92 | shape[0] = self._h*self._w*self._n
93 | # Create a 1D array, and reshape it to fortran/Matlab column-major array
94 | ndarray = np.PyArray_SimpleNewFromData(1, shape, np.NPY_UINT8, self._mask).reshape((self._h, self._w, self._n), order='F')
95 | # The _mask allocated by Masks is now handled by ndarray
96 | PyArray_ENABLEFLAGS(ndarray, np.NPY_OWNDATA)
97 | return ndarray
98 |
99 | # internal conversion from Python RLEs object to compressed RLE format
100 | def _toString(RLEs Rs):
101 | cdef siz n = Rs.n
102 | cdef bytes py_string
103 | cdef char* c_string
104 | objs = []
105 | for i in range(n):
106 | c_string = rleToString( &Rs._R[i] )
107 | py_string = c_string
108 | objs.append({
109 | 'size': [Rs._R[i].h, Rs._R[i].w],
110 | 'counts': py_string
111 | })
112 | free(c_string)
113 | return objs
114 |
115 | # internal conversion from compressed RLE format to Python RLEs object
116 | def _frString(rleObjs):
117 | cdef siz n = len(rleObjs)
118 | Rs = RLEs(n)
119 | cdef bytes py_string
120 | cdef char* c_string
121 | for i, obj in enumerate(rleObjs):
122 | py_string = str(obj['counts'])
123 | c_string = py_string
124 | rleFrString( &Rs._R[i], c_string, obj['size'][0], obj['size'][1] )
125 | return Rs
126 |
127 | # encode mask to RLEs objects
128 | # list of RLE string can be generated by RLEs member function
129 | def encode(np.ndarray[np.uint8_t, ndim=3, mode='fortran'] mask):
130 | h, w, n = mask.shape[0], mask.shape[1], mask.shape[2]
131 | cdef RLEs Rs = RLEs(n)
132 | rleEncode(Rs._R,mask.data,h,w,n)
133 | objs = _toString(Rs)
134 | return objs
135 |
136 | # decode mask from compressed list of RLE string or RLEs object
137 | def decode(rleObjs):
138 | cdef RLEs Rs = _frString(rleObjs)
139 | h, w, n = Rs._R[0].h, Rs._R[0].w, Rs._n
140 | masks = Masks(h, w, n)
141 | rleDecode( Rs._R, masks._mask, n );
142 | return np.array(masks)
143 |
144 | def merge(rleObjs, bint intersect=0):
145 | cdef RLEs Rs = _frString(rleObjs)
146 | cdef RLEs R = RLEs(1)
147 | rleMerge(Rs._R, R._R, Rs._n, intersect)
148 | obj = _toString(R)[0]
149 | return obj
150 |
151 | def area(rleObjs):
152 | cdef RLEs Rs = _frString(rleObjs)
153 | cdef uint* _a = malloc(Rs._n* sizeof(uint))
154 | rleArea(Rs._R, Rs._n, _a)
155 | cdef np.npy_intp shape[1]
156 | shape[0] = Rs._n
157 | a = np.array((Rs._n, ), dtype=np.uint8)
158 | a = np.PyArray_SimpleNewFromData(1, shape, np.NPY_UINT32, _a)
159 | PyArray_ENABLEFLAGS(a, np.NPY_OWNDATA)
160 | return a
161 |
162 | # iou computation. support function overload (RLEs-RLEs and bbox-bbox).
163 | def iou( dt, gt, pyiscrowd ):
164 | def _preproc(objs):
165 | if len(objs) == 0:
166 | return objs
167 | if type(objs) == np.ndarray:
168 | if len(objs.shape) == 1:
169 | objs = objs.reshape((objs[0], 1))
170 | # check if it's Nx4 bbox
171 | if not len(objs.shape) == 2 or not objs.shape[1] == 4:
172 | raise Exception('numpy ndarray input is only for *bounding boxes* and should have Nx4 dimension')
173 | objs = objs.astype(np.double)
174 | elif type(objs) == list:
175 | # check if list is in box format and convert it to np.ndarray
176 | isbox = np.all(np.array([(len(obj)==4) and ((type(obj)==list) or (type(obj)==np.ndarray)) for obj in objs]))
177 | isrle = np.all(np.array([type(obj) == dict for obj in objs]))
178 | if isbox:
179 | objs = np.array(objs, dtype=np.double)
180 | if len(objs.shape) == 1:
181 | objs = objs.reshape((1,objs.shape[0]))
182 | elif isrle:
183 | objs = _frString(objs)
184 | else:
185 | raise Exception('list input can be bounding box (Nx4) or RLEs ([RLE])')
186 | else:
187 | raise Exception('unrecognized type. The following type: RLEs (rle), np.ndarray (box), and list (box) are supported.')
188 | return objs
189 | def _rleIou(RLEs dt, RLEs gt, np.ndarray[np.uint8_t, ndim=1] iscrowd, siz m, siz n, np.ndarray[np.double_t, ndim=1] _iou):
190 | rleIou( dt._R, gt._R, m, n, iscrowd.data, _iou.data )
191 | def _bbIou(np.ndarray[np.double_t, ndim=2] dt, np.ndarray[np.double_t, ndim=2] gt, np.ndarray[np.uint8_t, ndim=1] iscrowd, siz m, siz n, np.ndarray[np.double_t, ndim=1] _iou):
192 | bbIou( dt.data, gt.data, m, n, iscrowd.data, _iou.data )
193 | def _len(obj):
194 | cdef siz N = 0
195 | if type(obj) == RLEs:
196 | N = obj.n
197 | elif len(obj)==0:
198 | pass
199 | elif type(obj) == np.ndarray:
200 | N = obj.shape[0]
201 | return N
202 | # convert iscrowd to numpy array
203 | cdef np.ndarray[np.uint8_t, ndim=1] iscrowd = np.array(pyiscrowd, dtype=np.uint8)
204 | # simple type checking
205 | cdef siz m, n
206 | dt = _preproc(dt)
207 | gt = _preproc(gt)
208 | m = _len(dt)
209 | n = _len(gt)
210 | if m == 0 or n == 0:
211 | return []
212 | if not type(dt) == type(gt):
213 | raise Exception('The dt and gt should have the same data type, either RLEs, list or np.ndarray')
214 |
215 | # define local variables
216 | cdef double* _iou = 0
217 | cdef np.npy_intp shape[1]
218 | # check type and assign iou function
219 | if type(dt) == RLEs:
220 | _iouFun = _rleIou
221 | elif type(dt) == np.ndarray:
222 | _iouFun = _bbIou
223 | else:
224 | raise Exception('input data type not allowed.')
225 | _iou = malloc(m*n* sizeof(double))
226 | iou = np.zeros((m*n, ), dtype=np.double)
227 | shape[0] = m*n
228 | iou = np.PyArray_SimpleNewFromData(1, shape, np.NPY_DOUBLE, _iou)
229 | PyArray_ENABLEFLAGS(iou, np.NPY_OWNDATA)
230 | _iouFun(dt, gt, iscrowd, m, n, iou)
231 | return iou.reshape((m,n), order='F')
232 |
233 | def toBbox( rleObjs ):
234 | cdef RLEs Rs = _frString(rleObjs)
235 | cdef siz n = Rs.n
236 | cdef BB _bb = malloc(4*n* sizeof(double))
237 | rleToBbox( Rs._R, _bb, n )
238 | cdef np.npy_intp shape[1]
239 | shape[0] = 4*n
240 | bb = np.array((1,4*n), dtype=np.double)
241 | bb = np.PyArray_SimpleNewFromData(1, shape, np.NPY_DOUBLE, _bb).reshape((n, 4))
242 | PyArray_ENABLEFLAGS(bb, np.NPY_OWNDATA)
243 | return bb
244 |
245 | def frBbox(np.ndarray[np.double_t, ndim=2] bb, siz h, siz w ):
246 | cdef siz n = bb.shape[0]
247 | Rs = RLEs(n)
248 | rleFrBbox( Rs._R, bb.data, h, w, n )
249 | objs = _toString(Rs)
250 | return objs
251 |
252 | def frPoly( poly, siz h, siz w ):
253 | cdef np.ndarray[np.double_t, ndim=1] np_poly
254 | n = len(poly)
255 | Rs = RLEs(n)
256 | for i, p in enumerate(poly):
257 | np_poly = np.array(p, dtype=np.double, order='F')
258 | rleFrPoly( &Rs._R[i], np_poly.data, len(np_poly)/2, h, w )
259 | objs = _toString(Rs)
260 | return objs
261 |
262 | def frUncompressedRLE(ucRles, siz h, siz w):
263 | cdef np.ndarray[np.uint32_t, ndim=1] cnts
264 | cdef RLE R
265 | cdef uint *data
266 | n = len(ucRles)
267 | objs = []
268 | for i in range(n):
269 | Rs = RLEs(1)
270 | cnts = np.array(ucRles[i]['counts'], dtype=np.uint32)
271 | # time for malloc can be saved here but it's fine
272 | data = malloc(len(cnts)* sizeof(uint))
273 | for j in range(len(cnts)):
274 | data[j] = cnts[j]
275 | R = RLE(ucRles[i]['size'][0], ucRles[i]['size'][1], len(cnts), data)
276 | Rs._R[0] = R
277 | objs.append(_toString(Rs)[0])
278 | return objs
279 |
280 | def frPyObjects(pyobj, siz h, w):
281 | if type(pyobj) == np.ndarray:
282 | objs = frBbox(pyobj, h, w )
283 | elif type(pyobj) == list and len(pyobj[0]) == 4:
284 | objs = frBbox(pyobj, h, w )
285 | elif type(pyobj) == list and len(pyobj[0]) > 4:
286 | objs = frPoly(pyobj, h, w )
287 | elif type(pyobj) == list and type(pyobj[0]) == dict:
288 | objs = frUncompressedRLE(pyobj, h, w)
289 | else:
290 | raise Exception('input type is not supported.')
291 | return objs
292 |
--------------------------------------------------------------------------------
/refer/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/bert/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/bert/multilingual.md:
--------------------------------------------------------------------------------
1 | ## Models
2 |
3 | There are two multilingual models currently available. We do not plan to release
4 | more single-language models, but we may release `BERT-Large` versions of these
5 | two in the future:
6 |
7 | * **[`BERT-Base, Multilingual Cased (New, recommended)`](https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip)**:
8 | 104 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
9 | * **[`BERT-Base, Multilingual Uncased (Orig, not recommended)`](https://storage.googleapis.com/bert_models/2018_11_03/multilingual_L-12_H-768_A-12.zip)**:
10 | 102 languages, 12-layer, 768-hidden, 12-heads, 110M parameters
11 | * **[`BERT-Base, Chinese`](https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip)**:
12 | Chinese Simplified and Traditional, 12-layer, 768-hidden, 12-heads, 110M
13 | parameters
14 |
15 | **The `Multilingual Cased (New)` model also fixes normalization issues in many
16 | languages, so it is recommended in languages with non-Latin alphabets (and is
17 | often better for most languages with Latin alphabets). When using this model,
18 | make sure to pass `--do_lower_case=false` to `run_pretraining.py` and other
19 | scripts.**
20 |
21 | See the [list of languages](#list-of-languages) that the Multilingual model
22 | supports. The Multilingual model does include Chinese (and English), but if your
23 | fine-tuning data is Chinese-only, then the Chinese model will likely produce
24 | better results.
25 |
26 | ## Results
27 |
28 | To evaluate these systems, we use the
29 | [XNLI dataset](https://github.com/facebookresearch/XNLI) dataset, which is a
30 | version of [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) where the
31 | dev and test sets have been translated (by humans) into 15 languages. Note that
32 | the training set was *machine* translated (we used the translations provided by
33 | XNLI, not Google NMT). For clarity, we only report on 6 languages below:
34 |
35 |
36 |
37 | | System | English | Chinese | Spanish | German | Arabic | Urdu |
38 | | --------------------------------- | -------- | -------- | -------- | -------- | -------- | -------- |
39 | | XNLI Baseline - Translate Train | 73.7 | 67.0 | 68.8 | 66.5 | 65.8 | 56.6 |
40 | | XNLI Baseline - Translate Test | 73.7 | 68.3 | 70.7 | 68.7 | 66.8 | 59.3 |
41 | | BERT - Translate Train Cased | **81.9** | **76.6** | **77.8** | **75.9** | **70.7** | 61.6 |
42 | | BERT - Translate Train Uncased | 81.4 | 74.2 | 77.3 | 75.2 | 70.5 | 61.7 |
43 | | BERT - Translate Test Uncased | 81.4 | 70.1 | 74.9 | 74.4 | 70.4 | **62.1** |
44 | | BERT - Zero Shot Uncased | 81.4 | 63.8 | 74.3 | 70.5 | 62.1 | 58.3 |
45 |
46 |
47 |
48 | The first two rows are baselines from the XNLI paper and the last three rows are
49 | our results with BERT.
50 |
51 | **Translate Train** means that the MultiNLI training set was machine translated
52 | from English into the foreign language. So training and evaluation were both
53 | done in the foreign language. Unfortunately, training was done on
54 | machine-translated data, so it is impossible to quantify how much of the lower
55 | accuracy (compared to English) is due to the quality of the machine translation
56 | vs. the quality of the pre-trained model.
57 |
58 | **Translate Test** means that the XNLI test set was machine translated from the
59 | foreign language into English. So training and evaluation were both done on
60 | English. However, test evaluation was done on machine-translated English, so the
61 | accuracy depends on the quality of the machine translation system.
62 |
63 | **Zero Shot** means that the Multilingual BERT system was fine-tuned on English
64 | MultiNLI, and then evaluated on the foreign language XNLI test. In this case,
65 | machine translation was not involved at all in either the pre-training or
66 | fine-tuning.
67 |
68 | Note that the English result is worse than the 84.2 MultiNLI baseline because
69 | this training used Multilingual BERT rather than English-only BERT. This implies
70 | that for high-resource languages, the Multilingual model is somewhat worse than
71 | a single-language model. However, it is not feasible for us to train and
72 | maintain dozens of single-language models. Therefore, if your goal is to maximize
73 | performance with a language other than English or Chinese, you might find it
74 | beneficial to run pre-training for additional steps starting from our
75 | Multilingual model on data from your language of interest.
76 |
77 | Here is a comparison of training Chinese models with the Multilingual
78 | `BERT-Base` and Chinese-only `BERT-Base`:
79 |
80 | System | Chinese
81 | ----------------------- | -------
82 | XNLI Baseline | 67.0
83 | BERT Multilingual Model | 74.2
84 | BERT Chinese-only Model | 77.2
85 |
86 | Similar to English, the single-language model does 3% better than the
87 | Multilingual model.
88 |
89 | ## Fine-tuning Example
90 |
91 | The multilingual model does **not** require any special consideration or API
92 | changes. We did update the implementation of `BasicTokenizer` in
93 | `tokenization.py` to support Chinese character tokenization, so please update if
94 | you forked it. However, we did not change the tokenization API.
95 |
96 | To test the new models, we did modify `run_classifier.py` to add support for the
97 | [XNLI dataset](https://github.com/facebookresearch/XNLI). This is a 15-language
98 | version of MultiNLI where the dev/test sets have been human-translated, and the
99 | training set has been machine-translated.
100 |
101 | To run the fine-tuning code, please download the
102 | [XNLI dev/test set](https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip) and the
103 | [XNLI machine-translated training set](https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip)
104 | and then unpack both .zip files into some directory `$XNLI_DIR`.
105 |
106 | To run fine-tuning on XNLI. The language is hard-coded into `run_classifier.py`
107 | (Chinese by default), so please modify `XnliProcessor` if you want to run on
108 | another language.
109 |
110 | This is a large dataset, so this will training will take a few hours on a GPU
111 | (or about 30 minutes on a Cloud TPU). To run an experiment quickly for
112 | debugging, just set `num_train_epochs` to a small value like `0.1`.
113 |
114 | ```shell
115 | export BERT_BASE_DIR=/path/to/bert/chinese_L-12_H-768_A-12 # or multilingual_L-12_H-768_A-12
116 | export XNLI_DIR=/path/to/xnli
117 |
118 | python run_classifier.py \
119 | --task_name=XNLI \
120 | --do_train=true \
121 | --do_eval=true \
122 | --data_dir=$XNLI_DIR \
123 | --vocab_file=$BERT_BASE_DIR/vocab.txt \
124 | --bert_config_file=$BERT_BASE_DIR/bert_config.json \
125 | --init_checkpoint=$BERT_BASE_DIR/bert_model.ckpt \
126 | --max_seq_length=128 \
127 | --train_batch_size=32 \
128 | --learning_rate=5e-5 \
129 | --num_train_epochs=2.0 \
130 | --output_dir=/tmp/xnli_output/
131 | ```
132 |
133 | With the Chinese-only model, the results should look something like this:
134 |
135 | ```
136 | ***** Eval results *****
137 | eval_accuracy = 0.774116
138 | eval_loss = 0.83554
139 | global_step = 24543
140 | loss = 0.74603
141 | ```
142 |
143 | ## Details
144 |
145 | ### Data Source and Sampling
146 |
147 | The languages chosen were the
148 | [top 100 languages with the largest Wikipedias](https://meta.wikimedia.org/wiki/List_of_Wikipedias).
149 | The entire Wikipedia dump for each language (excluding user and talk pages) was
150 | taken as the training data for each language
151 |
152 | However, the size of the Wikipedia for a given language varies greatly, and
153 | therefore low-resource languages may be "under-represented" in terms of the
154 | neural network model (under the assumption that languages are "competing" for
155 | limited model capacity to some extent). At the same time, we also don't want
156 | to overfit the model by performing thousands of epochs over a tiny Wikipedia
157 | for a particular language.
158 |
159 | To balance these two factors, we performed exponentially smoothed weighting of
160 | the data during pre-training data creation (and WordPiece vocab creation). In
161 | other words, let's say that the probability of a language is *P(L)*, e.g.,
162 | *P(English) = 0.21* means that after concatenating all of the Wikipedias
163 | together, 21% of our data is English. We exponentiate each probability by some
164 | factor *S* and then re-normalize, and sample from that distribution. In our case
165 | we use *S=0.7*. So, high-resource languages like English will be under-sampled,
166 | and low-resource languages like Icelandic will be over-sampled. E.g., in the
167 | original distribution English would be sampled 1000x more than Icelandic, but
168 | after smoothing it's only sampled 100x more.
169 |
170 | ### Tokenization
171 |
172 | For tokenization, we use a 110k shared WordPiece vocabulary. The word counts are
173 | weighted the same way as the data, so low-resource languages are upweighted by
174 | some factor. We intentionally do *not* use any marker to denote the input
175 | language (so that zero-shot training can work).
176 |
177 | Because Chinese (and Japanese Kanji and Korean Hanja) does not have whitespace
178 | characters, we add spaces around every character in the
179 | [CJK Unicode range](https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_\(Unicode_block\))
180 | before applying WordPiece. This means that Chinese is effectively
181 | character-tokenized. Note that the CJK Unicode block only includes
182 | Chinese-origin characters and does *not* include Hangul Korean or
183 | Katakana/Hiragana Japanese, which are tokenized with whitespace+WordPiece like
184 | all other languages.
185 |
186 | For all other languages, we apply the
187 | [same recipe as English](https://github.com/google-research/bert#tokenization):
188 | (a) lower casing+accent removal, (b) punctuation splitting, (c) whitespace
189 | tokenization. We understand that accent markers have substantial meaning in some
190 | languages, but felt that the benefits of reducing the effective vocabulary make
191 | up for this. Generally the strong contextual models of BERT should make up for
192 | any ambiguity introduced by stripping accent markers.
193 |
194 | ### List of Languages
195 |
196 | The multilingual model supports the following languages. These languages were
197 | chosen because they are the top 100 languages with the largest Wikipedias:
198 |
199 | * Afrikaans
200 | * Albanian
201 | * Arabic
202 | * Aragonese
203 | * Armenian
204 | * Asturian
205 | * Azerbaijani
206 | * Bashkir
207 | * Basque
208 | * Bavarian
209 | * Belarusian
210 | * Bengali
211 | * Bishnupriya Manipuri
212 | * Bosnian
213 | * Breton
214 | * Bulgarian
215 | * Burmese
216 | * Catalan
217 | * Cebuano
218 | * Chechen
219 | * Chinese (Simplified)
220 | * Chinese (Traditional)
221 | * Chuvash
222 | * Croatian
223 | * Czech
224 | * Danish
225 | * Dutch
226 | * English
227 | * Estonian
228 | * Finnish
229 | * French
230 | * Galician
231 | * Georgian
232 | * German
233 | * Greek
234 | * Gujarati
235 | * Haitian
236 | * Hebrew
237 | * Hindi
238 | * Hungarian
239 | * Icelandic
240 | * Ido
241 | * Indonesian
242 | * Irish
243 | * Italian
244 | * Japanese
245 | * Javanese
246 | * Kannada
247 | * Kazakh
248 | * Kirghiz
249 | * Korean
250 | * Latin
251 | * Latvian
252 | * Lithuanian
253 | * Lombard
254 | * Low Saxon
255 | * Luxembourgish
256 | * Macedonian
257 | * Malagasy
258 | * Malay
259 | * Malayalam
260 | * Marathi
261 | * Minangkabau
262 | * Nepali
263 | * Newar
264 | * Norwegian (Bokmal)
265 | * Norwegian (Nynorsk)
266 | * Occitan
267 | * Persian (Farsi)
268 | * Piedmontese
269 | * Polish
270 | * Portuguese
271 | * Punjabi
272 | * Romanian
273 | * Russian
274 | * Scots
275 | * Serbian
276 | * Serbo-Croatian
277 | * Sicilian
278 | * Slovak
279 | * Slovenian
280 | * South Azerbaijani
281 | * Spanish
282 | * Sundanese
283 | * Swahili
284 | * Swedish
285 | * Tagalog
286 | * Tajik
287 | * Tamil
288 | * Tatar
289 | * Telugu
290 | * Turkish
291 | * Ukrainian
292 | * Urdu
293 | * Uzbek
294 | * Vietnamese
295 | * Volapük
296 | * Waray-Waray
297 | * Welsh
298 | * West Frisian
299 | * Western Punjabi
300 | * Yoruba
301 |
302 | The **Multilingual Cased (New)** release contains additionally **Thai** and
303 | **Mongolian**, which were not included in the original release.
304 |
--------------------------------------------------------------------------------
/bert/run_classifier_with_tfhub.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | # Copyright 2018 The Google AI Language Team Authors.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | """BERT finetuning runner with TF-Hub."""
16 |
17 | from __future__ import absolute_import
18 | from __future__ import division
19 | from __future__ import print_function
20 |
21 | import os
22 | import optimization
23 | import run_classifier
24 | import tokenization
25 | import tensorflow as tf
26 | import tensorflow_hub as hub
27 |
28 | flags = tf.flags
29 |
30 | FLAGS = flags.FLAGS
31 |
32 | flags.DEFINE_string(
33 | "bert_hub_module_handle", None,
34 | "Handle for the BERT TF-Hub module.")
35 |
36 |
37 | def create_model(is_training, input_ids, input_mask, segment_ids, labels,
38 | num_labels, bert_hub_module_handle):
39 | """Creates a classification model."""
40 | tags = set()
41 | if is_training:
42 | tags.add("train")
43 | bert_module = hub.Module(bert_hub_module_handle, tags=tags, trainable=True)
44 | bert_inputs = dict(
45 | input_ids=input_ids,
46 | input_mask=input_mask,
47 | segment_ids=segment_ids)
48 | bert_outputs = bert_module(
49 | inputs=bert_inputs,
50 | signature="tokens",
51 | as_dict=True)
52 |
53 | # In the demo, we are doing a simple classification task on the entire
54 | # segment.
55 | #
56 | # If you want to use the token-level output, use
57 | # bert_outputs["sequence_output"] instead.
58 | output_layer = bert_outputs["pooled_output"]
59 |
60 | hidden_size = output_layer.shape[-1].value
61 |
62 | output_weights = tf.get_variable(
63 | "output_weights", [num_labels, hidden_size],
64 | initializer=tf.truncated_normal_initializer(stddev=0.02))
65 |
66 | output_bias = tf.get_variable(
67 | "output_bias", [num_labels], initializer=tf.zeros_initializer())
68 |
69 | with tf.variable_scope("loss"):
70 | if is_training:
71 | # I.e., 0.1 dropout
72 | output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
73 |
74 | logits = tf.matmul(output_layer, output_weights, transpose_b=True)
75 | logits = tf.nn.bias_add(logits, output_bias)
76 | probabilities = tf.nn.softmax(logits, axis=-1)
77 | log_probs = tf.nn.log_softmax(logits, axis=-1)
78 |
79 | one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
80 |
81 | per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
82 | loss = tf.reduce_mean(per_example_loss)
83 |
84 | return (loss, per_example_loss, logits, probabilities)
85 |
86 |
87 | def model_fn_builder(num_labels, learning_rate, num_train_steps,
88 | num_warmup_steps, use_tpu, bert_hub_module_handle):
89 | """Returns `model_fn` closure for TPUEstimator."""
90 |
91 | def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
92 | """The `model_fn` for TPUEstimator."""
93 |
94 | tf.logging.info("*** Features ***")
95 | for name in sorted(features.keys()):
96 | tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
97 |
98 | input_ids = features["input_ids"]
99 | input_mask = features["input_mask"]
100 | segment_ids = features["segment_ids"]
101 | label_ids = features["label_ids"]
102 |
103 | is_training = (mode == tf.estimator.ModeKeys.TRAIN)
104 |
105 | (total_loss, per_example_loss, logits, probabilities) = create_model(
106 | is_training, input_ids, input_mask, segment_ids, label_ids, num_labels,
107 | bert_hub_module_handle)
108 |
109 | output_spec = None
110 | if mode == tf.estimator.ModeKeys.TRAIN:
111 | train_op = optimization.create_optimizer(
112 | total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
113 |
114 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
115 | mode=mode,
116 | loss=total_loss,
117 | train_op=train_op)
118 | elif mode == tf.estimator.ModeKeys.EVAL:
119 |
120 | def metric_fn(per_example_loss, label_ids, logits):
121 | predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
122 | accuracy = tf.metrics.accuracy(label_ids, predictions)
123 | loss = tf.metrics.mean(per_example_loss)
124 | return {
125 | "eval_accuracy": accuracy,
126 | "eval_loss": loss,
127 | }
128 |
129 | eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
130 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
131 | mode=mode,
132 | loss=total_loss,
133 | eval_metrics=eval_metrics)
134 | elif mode == tf.estimator.ModeKeys.PREDICT:
135 | output_spec = tf.contrib.tpu.TPUEstimatorSpec(
136 | mode=mode, predictions={"probabilities": probabilities})
137 | else:
138 | raise ValueError(
139 | "Only TRAIN, EVAL and PREDICT modes are supported: %s" % (mode))
140 |
141 | return output_spec
142 |
143 | return model_fn
144 |
145 |
146 | def create_tokenizer_from_hub_module(bert_hub_module_handle):
147 | """Get the vocab file and casing info from the Hub module."""
148 | with tf.Graph().as_default():
149 | bert_module = hub.Module(bert_hub_module_handle)
150 | tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
151 | with tf.Session() as sess:
152 | vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
153 | tokenization_info["do_lower_case"]])
154 | return tokenization.FullTokenizer(
155 | vocab_file=vocab_file, do_lower_case=do_lower_case)
156 |
157 |
158 | def main(_):
159 | tf.logging.set_verbosity(tf.logging.INFO)
160 |
161 | processors = {
162 | "cola": run_classifier.ColaProcessor,
163 | "mnli": run_classifier.MnliProcessor,
164 | "mrpc": run_classifier.MrpcProcessor,
165 | }
166 |
167 | if not FLAGS.do_train and not FLAGS.do_eval:
168 | raise ValueError("At least one of `do_train` or `do_eval` must be True.")
169 |
170 | tf.gfile.MakeDirs(FLAGS.output_dir)
171 |
172 | task_name = FLAGS.task_name.lower()
173 |
174 | if task_name not in processors:
175 | raise ValueError("Task not found: %s" % (task_name))
176 |
177 | processor = processors[task_name]()
178 |
179 | label_list = processor.get_labels()
180 |
181 | tokenizer = create_tokenizer_from_hub_module(FLAGS.bert_hub_module_handle)
182 |
183 | tpu_cluster_resolver = None
184 | if FLAGS.use_tpu and FLAGS.tpu_name:
185 | tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
186 | FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
187 |
188 | is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
189 | run_config = tf.contrib.tpu.RunConfig(
190 | cluster=tpu_cluster_resolver,
191 | master=FLAGS.master,
192 | model_dir=FLAGS.output_dir,
193 | save_checkpoints_steps=FLAGS.save_checkpoints_steps,
194 | tpu_config=tf.contrib.tpu.TPUConfig(
195 | iterations_per_loop=FLAGS.iterations_per_loop,
196 | num_shards=FLAGS.num_tpu_cores,
197 | per_host_input_for_training=is_per_host))
198 |
199 | train_examples = None
200 | num_train_steps = None
201 | num_warmup_steps = None
202 | if FLAGS.do_train:
203 | train_examples = processor.get_train_examples(FLAGS.data_dir)
204 | num_train_steps = int(
205 | len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
206 | num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
207 |
208 | model_fn = model_fn_builder(
209 | num_labels=len(label_list),
210 | learning_rate=FLAGS.learning_rate,
211 | num_train_steps=num_train_steps,
212 | num_warmup_steps=num_warmup_steps,
213 | use_tpu=FLAGS.use_tpu,
214 | bert_hub_module_handle=FLAGS.bert_hub_module_handle)
215 |
216 | # If TPU is not available, this will fall back to normal Estimator on CPU
217 | # or GPU.
218 | estimator = tf.contrib.tpu.TPUEstimator(
219 | use_tpu=FLAGS.use_tpu,
220 | model_fn=model_fn,
221 | config=run_config,
222 | train_batch_size=FLAGS.train_batch_size,
223 | eval_batch_size=FLAGS.eval_batch_size,
224 | predict_batch_size=FLAGS.predict_batch_size)
225 |
226 | if FLAGS.do_train:
227 | train_features = run_classifier.convert_examples_to_features(
228 | train_examples, label_list, FLAGS.max_seq_length, tokenizer)
229 | tf.logging.info("***** Running training *****")
230 | tf.logging.info(" Num examples = %d", len(train_examples))
231 | tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
232 | tf.logging.info(" Num steps = %d", num_train_steps)
233 | train_input_fn = run_classifier.input_fn_builder(
234 | features=train_features,
235 | seq_length=FLAGS.max_seq_length,
236 | is_training=True,
237 | drop_remainder=True)
238 | estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
239 |
240 | if FLAGS.do_eval:
241 | eval_examples = processor.get_dev_examples(FLAGS.data_dir)
242 | eval_features = run_classifier.convert_examples_to_features(
243 | eval_examples, label_list, FLAGS.max_seq_length, tokenizer)
244 |
245 | tf.logging.info("***** Running evaluation *****")
246 | tf.logging.info(" Num examples = %d", len(eval_examples))
247 | tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
248 |
249 | # This tells the estimator to run through the entire set.
250 | eval_steps = None
251 | # However, if running eval on the TPU, you will need to specify the
252 | # number of steps.
253 | if FLAGS.use_tpu:
254 | # Eval will be slightly WRONG on the TPU because it will truncate
255 | # the last batch.
256 | eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
257 |
258 | eval_drop_remainder = True if FLAGS.use_tpu else False
259 | eval_input_fn = run_classifier.input_fn_builder(
260 | features=eval_features,
261 | seq_length=FLAGS.max_seq_length,
262 | is_training=False,
263 | drop_remainder=eval_drop_remainder)
264 |
265 | result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
266 |
267 | output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
268 | with tf.gfile.GFile(output_eval_file, "w") as writer:
269 | tf.logging.info("***** Eval results *****")
270 | for key in sorted(result.keys()):
271 | tf.logging.info(" %s = %s", key, str(result[key]))
272 | writer.write("%s = %s\n" % (key, str(result[key])))
273 |
274 | if FLAGS.do_predict:
275 | predict_examples = processor.get_test_examples(FLAGS.data_dir)
276 | if FLAGS.use_tpu:
277 | # Discard batch remainder if running on TPU
278 | n = len(predict_examples)
279 | predict_examples = predict_examples[:(n - n % FLAGS.predict_batch_size)]
280 |
281 | predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
282 | run_classifier.file_based_convert_examples_to_features(
283 | predict_examples, label_list, FLAGS.max_seq_length, tokenizer,
284 | predict_file)
285 |
286 | tf.logging.info("***** Running prediction*****")
287 | tf.logging.info(" Num examples = %d", len(predict_examples))
288 | tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
289 |
290 | predict_input_fn = run_classifier.file_based_input_fn_builder(
291 | input_file=predict_file,
292 | seq_length=FLAGS.max_seq_length,
293 | is_training=False,
294 | drop_remainder=FLAGS.use_tpu)
295 |
296 | result = estimator.predict(input_fn=predict_input_fn)
297 |
298 | output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
299 | with tf.gfile.GFile(output_predict_file, "w") as writer:
300 | tf.logging.info("***** Predict results *****")
301 | for prediction in result:
302 | probabilities = prediction["probabilities"]
303 | output_line = "\t".join(
304 | str(class_probability)
305 | for class_probability in probabilities) + "\n"
306 | writer.write(output_line)
307 |
308 |
309 | if __name__ == "__main__":
310 | flags.mark_flag_as_required("data_dir")
311 | flags.mark_flag_as_required("task_name")
312 | flags.mark_flag_as_required("bert_hub_module_handle")
313 | flags.mark_flag_as_required("output_dir")
314 | tf.app.run()
315 |
--------------------------------------------------------------------------------