├── Example.JPEG ├── code ├── download_occluder_lib.sh ├── Process_anno.py ├── CreateL0.py ├── CreateTrainingData.py └── CreateOccludedDataset.py ├── download_FG.sh ├── download_FG_and_BG.sh └── README.md /Example.JPEG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Angtian/OccludedPASCAL3D/HEAD/Example.JPEG -------------------------------------------------------------------------------- /code/download_occluder_lib.sh: -------------------------------------------------------------------------------- 1 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1DgBs8liddu0sf4PNcEKKkxjR-EqnOHY5' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1DgBs8liddu0sf4PNcEKKkxjR-EqnOHY5" -O OccludedLibs.zip && rm -rf /tmp/cookies.txt && 2 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1rPI5amHBEw3E3WrzS871eBHVs3PizURD' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1rPI5amHBEw3E3WrzS871eBHVs3PizURD" -O ObjMaskes.zip && rm -rf /tmp/cookies.txt && 3 | 4 | unzip ObjMaskes.zip && 5 | unzip OccludedLibs.zip && 6 | rm OccludedLibs.zip && 7 | rm ObjMaskes.zip -------------------------------------------------------------------------------- /code/Process_anno.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | import time 4 | 5 | 6 | path_save = '../OccludedPASCAL3D/' 7 | source_anno_path = path_save + 'annotations_grouped' 8 | save_anno_path = path_save + 'annotations' 9 | 10 | categories = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train', 'boat', 'bottle', 'chair', 'diningtable', 11 | 'sofa', 'tvmonitor'] 12 | 13 | # folder_name_list = ['%sFGL0_BGL0'] + ['%sFGL%d_BGL%d' % ('%s', j, i) for i in range(1, 4) for j in range(1, 4)] 14 | folder_name_list = ['%sFGL%d_BGL%d' % ('%s', j, i) for i in range(1, 4) for j in range(1, 4)] 15 | 16 | for cate in categories: 17 | for name_ in folder_name_list: 18 | print(name_ % cate) 19 | this_folder = os.path.join(save_anno_path, name_) % cate 20 | os.makedirs(this_folder, exist_ok=True) 21 | data = np.load(os.path.join(source_anno_path, (name_ % cate) + '.npz'), allow_pickle=True) 22 | 23 | source_list = data['source'] 24 | mask_list = data['mask'] 25 | box_list = data['box'] 26 | occluder_box_list = data['occluder_box'] 27 | occluder_mask = data['occluder_mask'] 28 | 29 | for i in range(data['mask'].size): 30 | this_name = source_list[i].split('/')[-1].split('.')[0] 31 | np.savez(os.path.join(this_folder, this_name + '.npz'), source=source_list[i], mask=mask_list[i], box=box_list[i], occluder_mask=occluder_mask[i], occluder_box=occluder_box_list[i], category=cate, occluder_level=name_.strip('%s')[1]) 32 | -------------------------------------------------------------------------------- /download_FG.sh: -------------------------------------------------------------------------------- 1 | # Download datasets 2 | # FGL1_BGL1 3 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1X-xwyypLTm9vr-boLYPIPhGxcYaPHSNF' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1X-xwyypLTm9vr-boLYPIPhGxcYaPHSNF" -O OccludedPASCAL3D_FGL1_BGL1.zip 4 | # FGL2_BGL2 5 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1dNP8YE3RJ9Pzr_jQ11O6f6eYgYnq9ROp' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1dNP8YE3RJ9Pzr_jQ11O6f6eYgYnq9ROp" -O OccludedPASCAL3D_FGL2_BGL2.zip 6 | # FGL3_BGL3 7 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GsHCyAYnqcJsAgiih1vKpDQxzF3ouFxS' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1GsHCyAYnqcJsAgiih1vKpDQxzF3ouFxS" -O OccludedPASCAL3D_FGL3_BGL3.zip 8 | 9 | # Unzip files 10 | unzip OccludedPASCAL3D_FGL1_BGL1.zip 11 | unzip OccludedPASCAL3D_FGL2_BGL2.zip 12 | unzip OccludedPASCAL3D_FGL3_BGL3.zip 13 | 14 | # Delete zipped files 15 | rm OccludedPASCAL3D_FGL1_BGL1.zip 16 | rm OccludedPASCAL3D_FGL2_BGL2.zip 17 | rm OccludedPASCAL3D_FGL3_BGL3.zip 18 | 19 | # Clean up 20 | rm -rf /tmp/cookies.txt 21 | 22 | -------------------------------------------------------------------------------- /download_FG_and_BG.sh: -------------------------------------------------------------------------------- 1 | # Download datasets 2 | # FGL1_BGL1 3 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1X-xwyypLTm9vr-boLYPIPhGxcYaPHSNF' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1X-xwyypLTm9vr-boLYPIPhGxcYaPHSNF" -O OccludedPASCAL3D_FGL1_BGL1.zip 4 | # FGL1_BGL2 5 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1fR2eFg91qGEsHCqv15xfqEchYQT8s7HI' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1fR2eFg91qGEsHCqv15xfqEchYQT8s7HI" -O OccludedPASCAL3D_FGL1_BGL2.zip 6 | # FGL1_BGL3 7 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1O8-Z0nmB393ucS_OQLELhtTKW9gVt3St' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1O8-Z0nmB393ucS_OQLELhtTKW9gVt3St" -O OccludedPASCAL3D_FGL1_BGL3.zip 8 | # FGL2_BGL1 9 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GODAwQws3NwVA0eSiGJPMiuTQIxGuTjZ' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1GODAwQws3NwVA0eSiGJPMiuTQIxGuTjZ" -O OccludedPASCAL3D_FGL2_BGL1.zip 10 | # FGL2_BGL2 11 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1dNP8YE3RJ9Pzr_jQ11O6f6eYgYnq9ROp' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1dNP8YE3RJ9Pzr_jQ11O6f6eYgYnq9ROp" -O OccludedPASCAL3D_FGL2_BGL2.zip 12 | # FGL2_BGL3 13 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1o_dGkXsd14N1mfxMuElKMeyq_0HyxKXI' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1o_dGkXsd14N1mfxMuElKMeyq_0HyxKXI" -O OccludedPASCAL3D_FGL2_BGL3.zip 14 | # FGL3_BGL1 15 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1nYsTIBDctkHjYMlgsJx2XfXagCxvQm9F' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1nYsTIBDctkHjYMlgsJx2XfXagCxvQm9F" -O OccludedPASCAL3D_FGL3_BGL1.zip 16 | # FGL3_BGL2 17 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1Da8Q5X9Ni7mjsTkOKGEy6aSnTLQV8AM7' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1Da8Q5X9Ni7mjsTkOKGEy6aSnTLQV8AM7" -O OccludedPASCAL3D_FGL3_BGL2.zip 18 | # FGL3_BGL3 19 | wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1GsHCyAYnqcJsAgiih1vKpDQxzF3ouFxS' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1GsHCyAYnqcJsAgiih1vKpDQxzF3ouFxS" -O OccludedPASCAL3D_FGL3_BGL3.zip 20 | 21 | # Unzip files 22 | unzip OccludedPASCAL3D_FGL1_BGL1.zip 23 | unzip OccludedPASCAL3D_FGL1_BGL2.zip 24 | unzip OccludedPASCAL3D_FGL1_BGL3.zip 25 | unzip OccludedPASCAL3D_FGL2_BGL1.zip 26 | unzip OccludedPASCAL3D_FGL2_BGL2.zip 27 | unzip OccludedPASCAL3D_FGL2_BGL3.zip 28 | unzip OccludedPASCAL3D_FGL3_BGL1.zip 29 | unzip OccludedPASCAL3D_FGL3_BGL2.zip 30 | unzip OccludedPASCAL3D_FGL3_BGL3.zip 31 | 32 | # Delete zipped files 33 | rm OccludedPASCAL3D_FGL1_BGL1.zip 34 | rm OccludedPASCAL3D_FGL1_BGL2.zip 35 | rm OccludedPASCAL3D_FGL1_BGL3.zip 36 | rm OccludedPASCAL3D_FGL2_BGL1.zip 37 | rm OccludedPASCAL3D_FGL2_BGL2.zip 38 | rm OccludedPASCAL3D_FGL2_BGL3.zip 39 | rm OccludedPASCAL3D_FGL3_BGL1.zip 40 | rm OccludedPASCAL3D_FGL3_BGL2.zip 41 | rm OccludedPASCAL3D_FGL3_BGL3.zip 42 | 43 | # Clean up 44 | rm -rf /tmp/cookies.txt 45 | 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # OccludedPASCAL3D+ 2 | The OccludedPASCAL3D+ is a dataset is designed to evaluate the robustness to occlusion for a number of computer vision tasks, such as object detection, keypoint detection and pose estimation. 3 | In the OccludedPASCAL3D+ dataset, we simulate partial occlusion by superimposing objects cropped from the MS-COCO dataset on top of objects from the PASCAL3D+ dataset. We only use ImageNet subset in PASCAL3D+, which has 10812 testing images. **Note:** The OccludedPASCAL3D+ dataset is designed for evaluating out distribution robustness toward unsign occlusion. Thus, the training set with occlusion is only for abilation usage. 4 | ![Figure of Car in OccludedPASCAL3D+ in 9 occlussion levels](https://github.com/Angtian/OccludedPASCAL3D/blob/master/Example.JPEG) 5 | 6 | The OccludedPASCAL3D+ has 9 occlusion levels in total, with three foreground occlusion levels (FGL1, FGL2, FGL3) and three background occlusion levels (BGL1, BGL2, BGL3). Note that the amount of occlusion is compuated as the number of occluded pixels on the object mask. 7 | The occlusion ratio of the foreground: 8 | | Occlusion Level | FGL1 | FGL2 | FGL3 | 9 | |-----------------|---------|---------|---------| 10 | | Occlusion Ratio | 20%-40% | 40%-60% | 60%-80% | 11 | 12 | The occlusion ratio of the background: 13 | | Occlusion Level | BGL1 | BGL2 | BGL3 | 14 | |-----------------|---------|---------|---------| 15 | | Occlusion Ratio | 0%-20% | 20%-40% | 40%-60% | 16 | 17 | Number of images for each level: 18 | | | FGL1 | FGL2 | FGL3 | 19 | |------|-------|-------|-------| 20 | | BGL1 | 10421 | 10270 | 9965 | 21 | | BGL2 | 10304 | 10219 | 10056 | 22 | | BGL3 | 9143 | 10125 | 9983 | 23 | 24 | ## Download dataset 25 | We provide two scripts for downloading either the full dataset or the foreground only dataset (FGL1_BGL1, FGL2_BGL2, FGL3_BGL3). The foreground only dataset is designed for computer vision tasks that assume a given bounding box during inference, such as keypoint detection and pose estimation. 26 | 1. Clone this repo 27 | 2. Run the script to download full dataset: 28 | 29 | ``` 30 | chmod +x download_FG_and_BG.sh 31 | ./download_FG_and_BG.sh 32 | ``` 33 | 34 | Or run the script to download foreground only dataset: 35 | 36 | ``` 37 | chmod +x download_FG.sh 38 | ./download_FG.sh 39 | ``` 40 | 41 | 3. After running the above commands, you should see following folders: 42 | **images**: contains occluded images. 43 | **annotations**: annotations for each images. 44 | **lists**: lists indicate the names of available images. 45 | 46 | ## Use the annotations 47 | Inside the annotations folder you find folders named in the format "%sFGL%d_BGL%d" % (cate, fg_occ_lv, bg_occ_lv). In each folder, there are npz files containing the annotations for each individual image. 48 | To load the annotations: 49 | 50 | ``` 51 | import numpy as np 52 | 53 | annos = np.load('IMG_NAME.npz', allow_pickle=True) 54 | ``` 55 | 56 | The variable annos will contain the following attributes: 57 | 1. 'source': name of the image. 58 | 2. 'occluder_mask': a binary mask indicating the occluder. 59 | 3. 'mask': a binary mask indicating the object. 60 | 4. 'box': the bounding box of the object, in the format \[ y0, y1, x0, x1, img_h, img_h \]. 61 | 5. 'occluder_box': a list of bounding boxes of each occluder respectively, in the format \[ \[ y0, y1, x0, x1, img_h, img_h \], \[ y0, y1, x0, x1, img_h, img_h \] ... \]. 62 | 63 | ## Create your own version of the OccludedPASCAL3D+ dataset 64 | If you are not satisfied with the version we provide, you can also create the dataset using code we provide in the code folder. To create the dataset: 65 | 1. Install the BboxTools (a python lib for bounding boxing operations). 66 | 67 | ``` 68 | git clone https://github.com/Angtian/BboxTools.git 69 | python ./BboxTools/setup.py install 70 | ``` 71 | 72 | 2. Download the occluder library cropped from the MS-COCO dataset. 73 | 74 | ``` 75 | cd code 76 | chmod +x download_occluder_lib.sh 77 | ./download_occluder_lib.sh 78 | ``` 79 | 80 | 3. Change the path in CreateOccludedDataset.py and Process_anno.py 81 | 4. Run these python scripts: 82 | 83 | ``` 84 | python CreateOccludedDataset.py 85 | python Process_anno.py 86 | ``` 87 | 88 | ## Citation 89 | If you find this dataset is useful in your research, please cite: 90 | 91 | ``` 92 | @inproceedings{wang2020robust, 93 | title={Robust Object Detection Under Occlusion With Context-Aware CompositionalNets}, 94 | author={Wang, Angtian and Sun, Yihong and Kortylewski, Adam and Yuille, Alan L}, 95 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, 96 | pages={12645--12654}, 97 | year={2020} 98 | } 99 | ``` 100 | 101 | 102 | 103 | 104 | -------------------------------------------------------------------------------- /code/CreateL0.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import BboxTools as bbt 3 | import os 4 | import scipy.io 5 | from PIL import Image 6 | import cv2 7 | 8 | 9 | categories = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train', 'boat', 'bottle', 'chair', 'diningtable', 10 | 'sofa', 'tvmonitor'] 11 | 12 | path_save = '../OccludedPASCAL3D/' 13 | path_to_original_pascal3dp = '../../PASCAL3D+/PASCAL3D+_release1.1/' 14 | 15 | save_anno_path = path_save + 'annotations_grouped' 16 | save_img_path = path_save + 'images' 17 | save_list_path = path_save + 'lists' 18 | 19 | source_list_path = path_to_original_pascal3dp + 'Image_sets/%s_imagenet_val.txt' 20 | source_image_path = path_to_original_pascal3dp + 'Images/%s_imagenet' 21 | source_anno_path = path_to_original_pascal3dp + 'Annotations/%s_imagenet' 22 | source_mask_path = path_to_original_pascal3dp + 'obj_mask/%s' 23 | 24 | 25 | folder_name_list = ['%sFGL0_BGL0'] 26 | 27 | 28 | def load_one_annotation(anno_path): 29 | a = scipy.io.loadmat(anno_path) 30 | bbox_ = a['record'][0][0][1][0][0][1][0] 31 | num_obj = len(a['record'][0][0][1][0]) 32 | return bbox_, num_obj != 1 33 | 34 | 35 | def generate_dataset(cate, file_list, img_dir, anno_dir, mask_dir, save_img_dir, save_list_dir, save_anno_dir, occ_lib_dir, 36 | occ_lib_names, record_file): 37 | 38 | annotations = [{'source': [], 'mask': [], 'box': [], 'occluder_box': [], 'occluder_mask': []} for _ in range(len(folder_name_list))] 39 | img_list_ = ['' for _ in range(len(folder_name_list))] 40 | 41 | save_img_dir_list = [os.path.join(save_img_dir, folder_name % cate) for folder_name in folder_name_list] 42 | for folder_name in save_img_dir_list: 43 | os.makedirs(folder_name, exist_ok=True) 44 | os.makedirs(save_list_dir, exist_ok=True) 45 | os.makedirs(save_anno_dir, exist_ok=True) 46 | 47 | for file_name in file_list: 48 | print(file_name) 49 | try: 50 | anno, flag_ = load_one_annotation(os.path.join(anno_dir, file_name + '.mat')) 51 | if flag_: 52 | record_file.write('Skipped %s for multi objects\n' % file_name) 53 | continue 54 | img = np.array(Image.open(os.path.join(img_dir, file_name + '.JPEG'))) 55 | mask = np.array(Image.open(os.path.join(mask_dir, file_name + '.JPEG'))) 56 | 57 | if not mask.shape[0] == img.shape[0] and mask.shape[1] == img.shape[1]: 58 | mask = cv2.resize(mask, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST) 59 | 60 | box = bbt.from_numpy(anno, image_boundary=img.shape[0:2], sorts=('y0', 'x0', 'y1', 'x1')) 61 | filled_ = np.array([True]) 62 | images_ = np.array([img]) 63 | masks_ = np.zeros_like(np.array([img]), dtype=bool) 64 | boxes_ = [[]] 65 | 66 | except: 67 | print('Unknown Expectations at %s' % file_name) 68 | record_file.write('Unknown Expectations at %s\n' % file_name) 69 | 70 | continue 71 | if not np.all(filled_): 72 | record_file.write('Unfill %s: ' % file_name) 73 | 74 | for i in range(filled_.size): 75 | if filled_[i]: 76 | Image.fromarray(images_[i].astype(np.uint8)).save( 77 | os.path.join(save_img_dir_list[i], file_name + '.JPEG')) 78 | annotations[i]['source'].append(os.path.join(img_dir, file_name + '.JPEG')) 79 | annotations[i]['occluder_mask'].append(masks_[i]) 80 | annotations[i]['mask'].append(mask) 81 | annotations[i]['box'].append(bbt.list_box_to_numpy([box], save_image_boundary=True).ravel()) 82 | annotations[i]['occluder_box'].append(bbt.list_box_to_numpy(boxes_[i], save_image_boundary=True)) 83 | 84 | img_list_[i] += file_name + '.JPEG' + '\n' 85 | else: 86 | record_file.write(' %d' % i) 87 | 88 | if not np.all(filled_): 89 | record_file.write('\n') 90 | 91 | for name_, anno_ in zip(folder_name_list, annotations): 92 | np.savez(os.path.join(save_anno_dir, (name_ % cate) + '.npz'), **anno_) 93 | 94 | for name_, list_ in zip(folder_name_list, img_list_): 95 | with open(os.path.join(save_list_dir, (name_ % cate) + '.txt'), 'w') as file: 96 | file.write(list_) 97 | 98 | return 99 | 100 | 101 | if __name__ == '__main__': 102 | for cate in categories: 103 | print('Start cate: ', cate) 104 | tem = open('generating_record_%s_1030.txt' % cate, 'w') 105 | file_list_ = open(source_list_path % cate).readlines() 106 | file_list_ = [tem.strip('\n') for tem in file_list_] 107 | source_image_path_ = source_image_path % cate 108 | source_anno_path_ = source_anno_path % cate 109 | source_mask_path_ = source_mask_path % cate 110 | generate_dataset(cate, file_list_, source_image_path_, source_anno_path_, source_mask_path_, save_img_path, save_list_path, 111 | save_anno_path, '', '', tem) 112 | tem.close() 113 | -------------------------------------------------------------------------------- /code/CreateTrainingData.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import BboxTools as bbt 3 | import os 4 | import scipy.io 5 | from PIL import Image 6 | import cv2 7 | from CreateOccludedDataset import mix_imgs, mix_masks, apply_n_occluder, get_occ, merge_occ_image, load_one_annotation 8 | 9 | 10 | if_occ = True 11 | 12 | occ_libs_dir = './occluder_libs_train_%s.npz' 13 | occ_libs_name = ['large', 'medium', 'small'] 14 | 15 | path_save = '../OccludedPASCAL3D_Train/' 16 | path_to_original_pascal3dp = '../../PASCAL3D+/PASCAL3D+_release1.1/' 17 | 18 | categories = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train', 'boat', 'bottle', 'chair', 'diningtable', 19 | 'sofa', 'tvmonitor'] 20 | # categories = ['boat', 'bottle', 'chair', 'diningtable', 'sofa', 'tvmonitor'] 21 | save_anno_path = path_save + 'annotations_grouped' 22 | save_img_path = path_save + 'images' 23 | save_list_path = path_save + 'lists' 24 | 25 | if if_occ: 26 | sub_name = '%s_occluded' 27 | else: 28 | sub_name = '%s_raw' 29 | 30 | source_list_path = path_to_original_pascal3dp + 'Image_sets/%s_imagenet_train.txt' 31 | source_image_path = path_to_original_pascal3dp + 'Images/%s_imagenet' 32 | source_anno_path = path_to_original_pascal3dp + 'Annotations/%s_imagenet' 33 | 34 | # 0: only start randomly, 1: only start in box, 2: using both mode 35 | l_s_thr = 150000 36 | occluding_modes_l = ['s', 'm', 'l', 'lm', 'll', 'ml', 'mm', 'lms', 'lls', 'lmm', 'lll'] 37 | start_mode_l = [1, 2, 2, 0, 2, 1, 1, 0, 1, 0, 0] 38 | occluding_modes_s = ['s', 'm', 'l', 'ms', 'ms', 'mm', 'mms', ] 39 | start_mode_s = [1, 2, 2, 2, 1, 2, 1, ] 40 | 41 | num_per_image = 1 42 | 43 | start_off_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 0 or sd == 2] 44 | start_in_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 1 or sd == 2] 45 | start_off_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 0 or sd == 2] 46 | start_in_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 1 or sd == 2] 47 | 48 | 49 | def generate_one_img(img, box_anno, occ_libs, seg_anno): 50 | img_size = img.shape[0] * img.shape[1] 51 | if img_size > l_s_thr: 52 | using_start_off_box = start_off_box_l 53 | using_start_in_box = start_in_box_l 54 | else: 55 | using_start_off_box = start_off_box_s 56 | using_start_in_box = start_in_box_s 57 | 58 | image_out = [] 59 | occluder_mask = [] 60 | occluder_box = [] 61 | for n in range(num_per_image): 62 | using_idx = np.random.randint(0, len(using_start_off_box) + len(using_start_in_box)) 63 | 64 | if using_idx > len(using_start_off_box): 65 | using_idx -= len(using_start_off_box) 66 | 67 | working_mode = using_start_in_box[using_idx] 68 | 69 | t_boxes, t_masks, t_images = get_occ(working_mode, occ_libs) 70 | t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=box_anno) 71 | 72 | else: 73 | working_mode = using_start_off_box[using_idx] 74 | 75 | t_boxes, t_masks, t_images = get_occ(working_mode, occ_libs) 76 | t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=None) 77 | 78 | for i, proc in enumerate(t_process): 79 | if proc: 80 | t_masks[i] = proc.apply(t_masks[i]) 81 | t_images[i] = proc.apply(t_images[i]) 82 | 83 | mask_map = mix_masks(t_masks, t_boxes) 84 | occluder_map = mix_imgs(t_masks, t_boxes, t_images) 85 | 86 | image_out.append(merge_occ_image(mask_map, occluder_map, img.copy())) 87 | occluder_mask.append(mask_map) 88 | occluder_box.append(t_boxes) 89 | 90 | return image_out, occluder_mask, occluder_box 91 | 92 | 93 | def generate_dataset(cate, file_list, img_dir, anno_dir, mask_dir, save_img_dir, save_list_dir, save_anno_dir, occ_lib_dir, occ_lib_names, record_file): 94 | occ_libs = {} 95 | annotation = {'source': [], 'mask': [], 'box': [], 'occluder_box': [], 'occluder_mask': []} 96 | img_list_ = '' 97 | 98 | for k in occ_lib_names: 99 | occ_libs[k] = dict(np.load(occ_lib_dir % k, allow_pickle=True)) 100 | # occ_libs[k] = dict(np.load('tem_lib.npz', allow_pickle=True)) 101 | occ_libs[k]['boxes'] = bbt.from_numpy(occ_libs[k]['boxes']) 102 | 103 | save_img_dir = os.path.join(save_img_dir, sub_name % cate) 104 | os.makedirs(save_img_dir, exist_ok=True) 105 | os.makedirs(save_list_dir, exist_ok=True) 106 | os.makedirs(save_anno_dir, exist_ok=True) 107 | 108 | for file_name in file_list: 109 | print(file_name) 110 | try: 111 | # if True: 112 | anno, flag_ = load_one_annotation(os.path.join(anno_dir, file_name + '.mat')) 113 | if flag_: 114 | record_file.write('Skipped %s for multi objects\n' % file_name) 115 | continue 116 | img = np.array(Image.open(os.path.join(img_dir, file_name + '.JPEG'))) 117 | 118 | box = bbt.from_numpy(anno, image_boundary=img.shape[0:2], sorts=('y0', 'x0', 'y1', 'x1')) 119 | if if_occ: 120 | images_, masks_, boxes_ = generate_one_img(img, box, occ_libs, None) 121 | else: 122 | images_ = np.array([img]) 123 | masks_ = np.zeros_like(np.array([img]), dtype=bool) 124 | boxes_ = [[]] 125 | 126 | except: 127 | print('Unknown Expectations at %s' % file_name) 128 | record_file.write('Unknown Expectations at %s\n' % file_name) 129 | 130 | continue 131 | 132 | i = 0 133 | if not num_per_image == 1: 134 | raise Exception('Currently not support more than one output per image') 135 | 136 | Image.fromarray(images_[i].astype(np.uint8)).save( 137 | os.path.join(save_img_dir, file_name + '.JPEG')) 138 | annotation['source'].append(os.path.join(img_dir, file_name + '.JPEG')) 139 | annotation['occluder_mask'].append(masks_[i]) 140 | annotation['mask'].append(None) 141 | annotation['box'].append(bbt.list_box_to_numpy([box], save_image_boundary=True).ravel()) 142 | annotation['occluder_box'].append(bbt.list_box_to_numpy(boxes_[i], save_image_boundary=True)) 143 | 144 | img_list_ += file_name + '.JPEG' + '\n' 145 | 146 | name_ = sub_name 147 | np.savez(os.path.join(save_anno_dir, (name_ % cate) + '.npz'), **annotation) 148 | 149 | with open(os.path.join(save_list_dir, (name_ % cate) + '.txt'), 'w') as file: 150 | file.write(img_list_) 151 | 152 | return 153 | 154 | 155 | if __name__ == '__main__': 156 | for cate in categories: 157 | print('Start cate: ', cate) 158 | tem = open('generating_record_%s_1031.txt' % cate, 'w') 159 | file_list_ = open(source_list_path % cate).readlines() 160 | file_list_ = [tem.strip('\n') for tem in file_list_] 161 | source_image_path_ = source_image_path % cate 162 | source_anno_path_ = source_anno_path % cate 163 | generate_dataset(cate, file_list_, source_image_path_, source_anno_path_, '', save_img_path, save_list_path, 164 | save_anno_path, occ_libs_dir, occ_libs_name, tem) 165 | tem.close() 166 | 167 | -------------------------------------------------------------------------------- /code/CreateOccludedDataset.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import BboxTools as bbt 3 | import os 4 | import scipy.io 5 | from PIL import Image 6 | import cv2 7 | 8 | occ_libs_dir = './occluder_libs_test_%s.npz' 9 | occ_libs_name = ['large', 'medium', 'small'] 10 | 11 | path_save = '../OccludedPASCAL3D/' 12 | path_to_original_pascal3dp = '../../PASCAL3D+/PASCAL3D+_release1.1/' 13 | 14 | categories = ['aeroplane', 'bicycle', 'bus', 'car', 'motorbike', 'train', 'boat', 'bottle', 'chair', 'diningtable', 15 | 'sofa', 'tvmonitor'] 16 | 17 | save_anno_path = path_save + 'annotations_grouped' 18 | save_img_path = path_save + 'images' 19 | save_list_path = path_save + 'lists' 20 | 21 | source_list_path = path_to_original_pascal3dp + 'Image_sets/%s_imagenet_val.txt' 22 | source_image_path = path_to_original_pascal3dp + 'Images/%s_imagenet' 23 | source_anno_path = path_to_original_pascal3dp + 'Annotations/%s_imagenet' 24 | source_mask_path = path_to_original_pascal3dp + 'obj_mask/%s' 25 | 26 | if not os.path.exists(path_to_original_pascal3dp + 'obj_mask'): 27 | os.system('mv ./obj_mask ' + path_to_original_pascal3dp) 28 | 29 | # 0: only start randomly, 1: only start in box, 2: using both mode 30 | l_s_thr = 150000 31 | occluding_modes_l = ['s', 'm', 'l', 'l','lm', 'll', 'lll'] 32 | start_mode_l = [1, 2, 2, 2, 0, 2, 0] 33 | occluding_modes_s = ['m', 'l', 'l','lm', 'mm', ] 34 | start_mode_s = [2, 2, 2, 2, 2, ] 35 | 36 | occluding_rate = [(0.3, 0.1), (0.5, 0.1), (0.7, 0.1), (0.3, 0.3), (0.5, 0.3), (0.7, 0.3), (0.3, 0.5), (0.5, 0.5), 37 | (0.7, 0.5), ] 38 | folder_name_list = ['%sFGL%d_BGL%d' % ('%s', j, i) for i in range(1, 4) for j in range(1, 4)] 39 | 40 | allowed_var = 0.1 41 | 42 | start_off_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 0 or sd == 2] 43 | start_in_box_l = [md for md, sd in zip(occluding_modes_l, start_mode_l) if sd == 1 or sd == 2] 44 | start_off_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 0 or sd == 2] 45 | start_in_box_s = [md for md, sd in zip(occluding_modes_s, start_mode_s) if sd == 1 or sd == 2] 46 | 47 | limited_trying_times = 40 48 | 49 | 50 | def mix_masks(masks, boxes): 51 | back = np.zeros(boxes[0].boundary, dtype=bool) 52 | for box, mask in zip(boxes, masks): 53 | box.assign(back, np.logical_or(mask, box.apply(back))) 54 | return back 55 | 56 | 57 | def mix_imgs(masks, boxes, imgs): 58 | back_im = np.zeros(tuple(boxes[0].boundary) + (3,), dtype=np.uint8) 59 | 60 | for box, mask, img in zip(boxes, masks, imgs): 61 | mask = mask.reshape(mask.shape + (1,)) 62 | box.assign(back_im, mask * img + (1 - mask) * box.apply(back_im)) 63 | return back_im 64 | 65 | 66 | def merge_occ_image(masks, occluder_map, image): 67 | masks = np.expand_dims(masks, axis=2) 68 | return masks * occluder_map + (1 - masks) * image 69 | 70 | 71 | def check_occ_ratio(mask_map, object_annotation_box): 72 | in_box_size = object_annotation_box.size 73 | out_box_size = mask_map.size - in_box_size 74 | 75 | in_box_value = np.sum(object_annotation_box.apply(mask_map)) 76 | out_box_value = np.sum(mask_map) - in_box_value 77 | 78 | return in_box_value / in_box_size, out_box_value / out_box_size 79 | 80 | 81 | def check_occ_ratio_seg(mask_map, mask_obj): 82 | mask_obj = mask_obj > 10 83 | mask_map = mask_map > 0.5 84 | in_box_size = np.sum(mask_obj) 85 | in_box_value = np.sum(np.logical_and(mask_obj, mask_map)) 86 | 87 | out_box_size = np.sum(np.logical_not(mask_obj)) 88 | out_box_value = np.sum(np.logical_and(np.logical_not(mask_obj), mask_map)) 89 | 90 | return in_box_value / in_box_size, out_box_value / out_box_size 91 | 92 | 93 | def process_inbox(shape, center, boundary): 94 | tem_box = bbt.box_by_shape(shape, center, boundary) 95 | tem_box_ = bbt.box_by_shape(shape, center) 96 | return tem_box_.box_in_box(tem_box) 97 | 98 | 99 | def apply_n_occluder(occluder_boxes, img_shape, in_box, boundary_constraint=25, overlap_constraint=-5): 100 | box_list = [] 101 | processing_list = [None for _ in range(len(occluder_boxes))] 102 | for i in range(len(occluder_boxes)): 103 | flag_ = False 104 | x = 0 105 | y = 0 106 | ti_ = 0 107 | if in_box and i == 0: 108 | while not flag_: 109 | flag_ = True 110 | x = np.random.randint(boundary_constraint, img_shape[0] - boundary_constraint, dtype=int) 111 | y = np.random.randint(boundary_constraint, img_shape[1] - boundary_constraint, dtype=int) 112 | 113 | if not in_box.include((x, y)): 114 | flag_ = False 115 | else: 116 | while not flag_ and ti_ < 40: 117 | ti_ += 1 118 | flag_ = True 119 | x = np.random.randint(boundary_constraint, img_shape[0] - boundary_constraint, dtype=int) 120 | y = np.random.randint(boundary_constraint, img_shape[1] - boundary_constraint, dtype=int) 121 | for exist_box in box_list: 122 | if exist_box.pad(overlap_constraint).include((x, y)): 123 | flag_ = False 124 | 125 | center = (x, y) 126 | occluder_box = occluder_boxes[i] 127 | this_box = bbt.box_by_shape(occluder_box.shape, center, image_boundary=img_shape) 128 | box_list.append(this_box) 129 | 130 | if not occluder_box.size == this_box.size: 131 | processing_list[i] = process_inbox(occluder_box.shape, center, img_shape) 132 | 133 | return box_list, processing_list 134 | 135 | 136 | def get_occ(required_type, occ_libs): 137 | out_boxes = [] 138 | out_masks = [] 139 | out_images = [] 140 | for t in required_type: 141 | if t == 'l': 142 | this_lib = occ_libs['large'] 143 | elif t == 's': 144 | this_lib = occ_libs['small'] 145 | else: 146 | this_lib = occ_libs['medium'] 147 | 148 | idx = np.random.randint(0, this_lib['masks'].shape[0], dtype=int) 149 | out_boxes.append(this_lib['boxes'][idx]) 150 | out_masks.append(this_lib['masks'][idx]) 151 | out_images.append(this_lib['images'][idx]) 152 | 153 | return out_boxes, out_masks, out_images 154 | 155 | 156 | def generate_one_img(img, box_anno, occ_libs, seg_anno): 157 | img_size = img.shape[0] * img.shape[1] 158 | if img_size > l_s_thr: 159 | using_start_off_box = start_off_box_l 160 | using_start_in_box = start_in_box_l 161 | else: 162 | using_start_off_box = start_off_box_s 163 | using_start_in_box = start_in_box_s 164 | 165 | tried_times = 0 166 | fully_filled = False 167 | filled_level = np.zeros(len(occluding_rate), dtype=bool) 168 | filled_score = np.zeros(len(occluding_rate), dtype=bool) 169 | 170 | using_box = np.zeros(len(occluding_rate), dtype=object) 171 | using_mask = np.zeros(len(occluding_rate), dtype=object) 172 | using_occluder = np.zeros(len(occluding_rate), dtype=object) 173 | 174 | while tried_times < limited_trying_times and not fully_filled: 175 | tried_times += 1 176 | 177 | boxes = [] 178 | masks = [] 179 | occluders = [] 180 | ratios = [] 181 | 182 | for working_mode in using_start_in_box: 183 | t_boxes, t_masks, t_images = get_occ(working_mode, occ_libs) 184 | t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=box_anno) 185 | 186 | for i, proc in enumerate(t_process): 187 | if proc: 188 | # print() 189 | # print(proc) 190 | # print(t_masks[i].shape) 191 | # print(t_boxes[i]) 192 | t_masks[i] = proc.apply(t_masks[i]) 193 | t_images[i] = proc.apply(t_images[i]) 194 | 195 | mask_map = mix_masks(t_masks, t_boxes) 196 | occluder_map = mix_imgs(t_masks, t_boxes, t_images) 197 | 198 | # ratios.append(check_occ_ratio(mask_map, box_anno)) 199 | ratios.append(check_occ_ratio_seg(mask_map, seg_anno)) 200 | masks.append(mask_map) 201 | boxes.append(t_boxes) 202 | occluders.append(occluder_map) 203 | 204 | for working_mode in using_start_off_box: 205 | t_boxes, t_masks, t_images = get_occ(working_mode, occ_libs) 206 | t_boxes, t_process = apply_n_occluder(t_boxes, img_shape=img.shape[0:2], in_box=None) 207 | 208 | for i, proc in enumerate(t_process): 209 | if proc: 210 | t_masks[i] = proc.apply(t_masks[i]) 211 | t_images[i] = proc.apply(t_images[i]) 212 | 213 | mask_map = mix_masks(t_masks, t_boxes) 214 | occluder_map = mix_imgs(t_masks, t_boxes, t_images) 215 | 216 | ratios.append(check_occ_ratio(mask_map, box_anno)) 217 | masks.append(mask_map) 218 | boxes.append(t_boxes) 219 | occluders.append(occluder_map) 220 | 221 | ratios_np = np.array(ratios) 222 | ratios_base = np.array(occluding_rate) 223 | 224 | # n * 2 - 9 * 2 -> n * 1 * 2 - 1 * 9 * 2 -> n * 9 * 2 -> all(2) -> any(n) -> 9 225 | legal_assign = np.any( 226 | np.all(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) < allowed_var, 227 | axis=2), axis=0) 228 | 229 | # n * 2 - 9 * 2 -> n * 1 * 2 - 1 * 9 * 2 -> n * 9 * 2 -> sum(2) -> argmin(n) -> 9 230 | dist_assign = np.argmin( 231 | np.sum(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) + 10 * (np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) >= allowed_var), axis=2), axis=0) 232 | dist_score = np.min( 233 | np.sum(np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) + 10 * (np.abs(np.expand_dims(ratios_np, axis=1) - np.expand_dims(ratios_base, axis=0)) >= allowed_var), axis=2), axis=0) 234 | 235 | for i in range(len(occluding_rate)): # 9 236 | if legal_assign[i]: 237 | if (not filled_level[i]) or dist_score[i] < filled_score[i]: 238 | filled_level[i] = legal_assign[i] # False -> True 239 | filled_score[i] = dist_score[i] 240 | 241 | idx_ = dist_assign[i] 242 | 243 | using_box[i] = boxes[idx_] 244 | using_mask[i] = masks[idx_] 245 | using_occluder[i] = occluders[idx_] 246 | 247 | fully_filled = np.all(filled_level) 248 | 249 | image_out = np.zeros(len(occluding_rate), dtype=object) 250 | for i in range(len(occluding_rate)): # 9 251 | if filled_level[i]: 252 | image_out[i] = (merge_occ_image(using_mask[i], using_occluder[i], img.copy())) 253 | return filled_level, image_out, using_mask, using_box 254 | 255 | 256 | def load_one_annotation(anno_path): 257 | a = scipy.io.loadmat(anno_path) 258 | bbox_ = a['record'][0][0][1][0][0][1][0] 259 | num_obj = len(a['record'][0][0][1][0]) 260 | return bbox_, num_obj != 1 261 | 262 | 263 | def generate_dataset(cate, file_list, img_dir, anno_dir, mask_dir, save_img_dir, save_list_dir, save_anno_dir, occ_lib_dir, 264 | occ_lib_names, record_file): 265 | occ_libs = {} 266 | annotations = [{'source': [], 'mask': [], 'box': [], 'occluder_box': [], 'occluder_mask': []} for _ in range(len(occluding_rate))] 267 | img_list_ = ['' for _ in range(len(occluding_rate))] 268 | 269 | for k in occ_lib_names: 270 | occ_libs[k] = dict(np.load(occ_lib_dir % k, allow_pickle=True)) 271 | # occ_libs[k] = dict(np.load('tem_lib.npz', allow_pickle=True)) 272 | occ_libs[k]['boxes'] = bbt.from_numpy(occ_libs[k]['boxes']) 273 | 274 | save_img_dir_list = [os.path.join(save_img_dir, folder_name % cate) for folder_name in folder_name_list] 275 | for folder_name in save_img_dir_list: 276 | os.makedirs(folder_name, exist_ok=True) 277 | os.makedirs(save_list_dir, exist_ok=True) 278 | os.makedirs(save_anno_dir, exist_ok=True) 279 | 280 | for file_name in file_list: 281 | print(file_name) 282 | try: 283 | # if True: 284 | anno, flag_ = load_one_annotation(os.path.join(anno_dir, file_name + '.mat')) 285 | if flag_: 286 | record_file.write('Skipped %s for multi objects\n' % file_name) 287 | continue 288 | img = np.array(Image.open(os.path.join(img_dir, file_name + '.JPEG'))) 289 | mask = np.array(Image.open(os.path.join(mask_dir, file_name + '.JPEG'))) 290 | 291 | if not mask.shape[0] == img.shape[0] and mask.shape[1] == img.shape[1]: 292 | mask = cv2.resize(mask, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_NEAREST) 293 | 294 | box = bbt.from_numpy(anno, image_boundary=img.shape[0:2], sorts=('x0', 'y0', 'x1', 'y1')) 295 | filled_, images_, masks_, boxes_ = generate_one_img(img, box, occ_libs, mask) 296 | # try: 297 | 298 | except: 299 | print('Unknown Expectations at %s' % file_name) 300 | record_file.write('Unknown Expectations at %s\n' % file_name) 301 | continue 302 | 303 | if not np.all(filled_): 304 | record_file.write('Unfill %s: ' % file_name) 305 | 306 | for i in range(filled_.size): 307 | if filled_[i]: 308 | Image.fromarray(images_[i].astype(np.uint8)).save(os.path.join(save_img_dir_list[i], file_name + '.JPEG')) 309 | annotations[i]['source'].append(os.path.join(img_dir, file_name + '.JPEG')) 310 | annotations[i]['occluder_mask'].append(masks_[i]) 311 | annotations[i]['mask'].append(mask) 312 | annotations[i]['box'].append(bbt.list_box_to_numpy([box], save_image_boundary=True).ravel()) 313 | annotations[i]['occluder_box'].append(bbt.list_box_to_numpy(boxes_[i], save_image_boundary=True)) 314 | 315 | img_list_[i] += file_name + '.JPEG' + '\n' 316 | else: 317 | record_file.write(' %d' % i) 318 | 319 | if not np.all(filled_): 320 | record_file.write('\n') 321 | 322 | for name_, anno_ in zip(folder_name_list, annotations): 323 | np.savez(os.path.join(save_anno_dir, (name_ % cate) + '.npz'), **anno_) 324 | 325 | for name_, list_ in zip(folder_name_list, img_list_): 326 | with open(os.path.join(save_list_dir, (name_ % cate) + '.txt'), 'w') as file: 327 | file.write(list_) 328 | 329 | return 330 | 331 | 332 | if __name__ == '__main__': 333 | for cate in categories: 334 | print('Start cate: ', cate) 335 | tem = open('generating_record_%s_1030.txt' % cate, 'w') 336 | file_list_ = open(source_list_path % cate).readlines() 337 | file_list_ = [tem.strip('\n') for tem in file_list_] 338 | source_image_path_ = source_image_path % cate 339 | source_anno_path_ = source_anno_path % cate 340 | source_mask_path_ = source_mask_path % cate 341 | generate_dataset(cate, file_list_, source_image_path_, source_anno_path_, source_mask_path_, save_img_path, save_list_path, 342 | save_anno_path, occ_libs_dir, occ_libs_name, tem) 343 | tem.close() 344 | 345 | 346 | 347 | 348 | 349 | --------------------------------------------------------------------------------