├── LICENSE ├── README.md ├── vil2mask.py ├── vil2tusimples.py ├── vis_converted.py └── vis_vil.py /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Yinguan 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # datasets structure: 2 | ``` 3 | VIL-100 4 | |----Annotations 5 | |----data 6 | |----JPEGImages 7 | |----Json 8 | ``` 9 | 10 | 11 | # parse_vil100 12 | for parsing and converting dataset vil100. 13 | 14 | - vis_vil.py: visualize datasets on original image,incude points and curves form. 15 | - vil2mask.py:generate lane instance mask. 16 | - vil2tusimples.py: convert datasets to tusimple-like format. 17 | - vis_converted.py: visualize converted tusimple-like format. 18 | 19 | the scripts above have tested on wsl/linux. 20 | 21 | # reference 22 | [VIL-100 Dataset: A Large Annotated Dataset of Video Instance Lane Detection](https://github.com/yujun0-0/MMA-Net/tree/main/dataset) 23 | -------------------------------------------------------------------------------- /vil2mask.py: -------------------------------------------------------------------------------- 1 | ''' 2 | datasets name:vil-100 3 | paper link: https://arxiv.org/abs/2108.08482 4 | reference: https://github.com/yujun0-0/MMA-Net/tree/main/dataset 5 | 6 | datasets structure: 7 | VIL-100 8 | |----Annotations 9 | |----data 10 | |----JPEGImages 11 | |----Json 12 | |----train.json 13 | 14 | *********** A sample of one json-file *********** 15 | { 16 | "camera_id": 8272, 17 | "info": { 18 | "height": 1080 , 19 | "width": 1920, 20 | "date": "2020-11-24", 21 | "image_path": "0_Road014_Trim005_frames/XXXXXX.jpg" 22 | }, 23 | "annotations": { 24 | "lane": [{ 25 | "id": 1, 26 | "lane_id": 1, 27 | "attribute": 1, 28 | "occlusion": 0, 29 | "points": [[412.6, 720],[423.7, 709.9], ...] 30 | }, {...}, {...}, {...}] 31 | } 32 | } 33 | ''' 34 | 35 | import os 36 | import cv2 37 | import numpy as np 38 | import json 39 | 40 | def get_mask(mask, label, instane_gap, thickness): 41 | # read label 42 | label_content = open(label) 43 | 44 | label_info = json.load(label_content)['annotations'] 45 | 46 | for index, line in enumerate(label_info['lane']): 47 | points_x = [] 48 | points_y = [] 49 | # get points 50 | for point in line['points']: 51 | points_x.append(int(float(point[0]))) 52 | points_y.append(int(float(point[1]))) 53 | 54 | ptStart = 0 55 | ptEnd = 1 56 | 57 | points = list(zip(points_x, points_y)) 58 | # sort along y 59 | sorted(points , key=lambda k: (k[1], k[0])) 60 | 61 | # print(points) 62 | while ptEnd < len(points_x): 63 | mask = cv2.line(mask, points[ptStart], points[ptEnd], [(index+1)*instane_gap]*3, thickness, lineType = 8) 64 | ptStart += 1 65 | ptEnd += 1 66 | 67 | return mask 68 | 69 | if __name__ == '__main__': 70 | # choose datasets category from:'train','test' 71 | datasets_category = 'train' 72 | # datasets dir 73 | dataset_dir = '/mnt/h/lane_datasets/VIL-100' 74 | 75 | # save label dir(mask) 76 | save_mask_dir = dataset_dir + '/mask' 77 | if not os.path.exists(save_mask_dir): 78 | os.makedirs(save_mask_dir) 79 | 80 | 81 | # read file from txt 82 | txt_file = '{}/data/{}.txt'.format(dataset_dir, datasets_category) 83 | file_list = open(txt_file) 84 | for file in file_list: 85 | file = file.strip() 86 | full_img_path = dataset_dir + file 87 | 88 | if not os.path.exists(full_img_path): 89 | continue 90 | print("Now dealing with:", file) 91 | file_name = os.path.splitext(file.strip().split('/')[-1])[0] # image_name xxx 92 | json_file = dataset_dir + file.replace('JPEGImages', 'Json') + '.json' 93 | 94 | img = cv2.imread(full_img_path) 95 | 96 | # get img shape,h and w. 97 | h = img.shape[0] 98 | w = img.shape[1] 99 | 100 | # set params 101 | instane_gap = 30 102 | thickness = w // 128 103 | 104 | mask = np.zeros([h,w,3],dtype=np.uint8) 105 | # parse label 106 | label_mask = get_mask(mask, json_file, instane_gap, thickness) 107 | 108 | cv2.imencode('.png',label_mask)[1].tofile('{}/{}.png'.format(save_mask_dir,file_name)) 109 | 110 | 111 | print("Done!") 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /vil2tusimples.py: -------------------------------------------------------------------------------- 1 | ''' 2 | convert to Tusimple json/txt format. 3 | ''' 4 | 5 | import cv2 6 | import json 7 | import numpy as np 8 | import os 9 | 10 | ''' 11 | datasets name:vil-100 12 | paper link: https://arxiv.org/abs/2108.08482 13 | reference: https://github.com/yujun0-0/MMA-Net/tree/main/dataset 14 | 15 | datasets structure: 16 | VIL-100 17 | |----Annotations 18 | |----data 19 | |----JPEGImages 20 | |----Json 21 | |----train.json 22 | 23 | *********** A sample of one json-file *********** 24 | { 25 | "camera_id": 8272, 26 | "info": { 27 | "height": 1080 , 28 | "width": 1920, 29 | "date": "2020-11-24", 30 | "image_path": "0_Road014_Trim005_frames/XXXXXX.jpg" 31 | }, 32 | "annotations": { 33 | "lane": [{ 34 | "id": 1, 35 | "lane_id": 1, 36 | "attribute": 1, 37 | "occlusion": 0, 38 | "points": [[412.6, 720],[423.7, 709.9], ...] 39 | }, {...}, {...}, {...}] 40 | } 41 | } 42 | ''' 43 | import os 44 | import cv2 45 | import numpy as np 46 | import json 47 | 48 | 49 | def get_mask(mask, label, instance_gap): 50 | # read label 51 | label_content = open(label) 52 | 53 | label_info = json.load(label_content)['annotations'] 54 | lanes_num = 0 55 | 56 | for index, line in enumerate(label_info['lane']): 57 | lanes_num += 1 58 | # print(line) 59 | points_x = [] 60 | points_y = [] 61 | # get points 62 | for point in line['points']: 63 | points_x.append(int(float(point[0]))) 64 | points_y.append(int(float(point[1]))) 65 | 66 | ptStart = 0 67 | ptEnd = 1 68 | 69 | points = list(zip(points_x, points_y)) 70 | # sort along y 71 | points = sorted(points , key=lambda k: (k[1], k[0])) 72 | 73 | # print(points) 74 | while ptEnd < len(points_x): 75 | mask = cv2.line(mask, points[ptStart], points[ptEnd], [instance_gap * (index+1)]*3, 4, lineType = 8) 76 | ptStart += 1 77 | ptEnd += 1 78 | 79 | max_val = lanes_num * instance_gap 80 | 81 | return mask, max_val 82 | 83 | def lane_instance(label_gray,pix_value, hstart, hend, hdis): 84 | lane = [] 85 | for hstep in range(hstart, hend, hdis): # 86 | # h_samples.append(hstep) 87 | wids = np.where(label_gray[hstep][:] == pix_value) 88 | for ele in list(wids): 89 | # print(list(ele)) 90 | if len(ele) == 0: 91 | val = -2 92 | else: 93 | val = int(sum(ele)/(len(ele))) # get average x_value. 94 | # if val != 1: 95 | lane.append(val) 96 | return lane 97 | 98 | if __name__ == '__main__': 99 | # choose datasets category from:'train','test' 100 | datasets_category = 'test' 101 | dataset_dir = '/mnt/h/lane_datasets/VIL-100' 102 | # datasets dir 103 | # dataset_dir = '{}/{}/'.format(path_to_datasets, datasets_category) 104 | # write ground truth in json or txt. 105 | save_gt = dataset_dir + '/data/{}_converted.json'.format(datasets_category) 106 | 107 | # read file from txt 108 | txt_file = '{}/data/{}.txt'.format(dataset_dir, datasets_category) 109 | 110 | file_list = open(txt_file) 111 | for file in file_list: 112 | file = file.strip() 113 | full_img_path = dataset_dir + file 114 | 115 | if not os.path.exists(full_img_path): 116 | continue 117 | print("Now dealing with:", file) 118 | file_name = os.path.splitext(file.strip().split('/')[-1])[0] 119 | json_file = dataset_dir + file.replace('JPEGImages', 'Json') + '.json' 120 | 121 | # if os.path.exists(full_img_path): 122 | img = cv2.imread(full_img_path) 123 | 124 | h = img.shape[0] 125 | w = img.shape[1] 126 | 127 | # set param. 128 | points_num = 56*3 129 | instance_gap = 20 130 | hstart = 0 131 | hend = h 132 | hdis = h // points_num 133 | 134 | img_dict = {} 135 | h_samples = [] # height 136 | lanes = [] 137 | 138 | mask = np.zeros([h,w,3],dtype=np.uint8) 139 | 140 | # parse label 141 | label_mask, max_value = get_mask(mask, json_file,instance_gap) 142 | 143 | # convert to grayscale. 144 | label_gray = label_mask[:,:,1] 145 | 146 | for hstep in range(hstart, hend, hdis): 147 | h_samples.append(hstep) 148 | 149 | # neg samples. 150 | if max_value == 0: 151 | lanes.append([-2]*points_num) 152 | 153 | # value:pix_value 154 | else: 155 | for value in range(instance_gap, max_value + 1, instance_gap): 156 | # print("value", value) 157 | lane = lane_instance(label_gray,value, hstart, hend, hdis) 158 | 159 | if max(lane) == -2: 160 | lanes.append([-2]*points_num) 161 | else: 162 | lanes.append(lane) 163 | 164 | img_dict["lanes"] = lanes 165 | img_dict["h_samples"] = h_samples 166 | img_dict["raw_file"] = f'{file}' # img_path 167 | 168 | img_dict_str = str(img_dict) 169 | # print(img_dict_str) 170 | img_dict = eval(img_dict_str) 171 | 172 | # write to txt 173 | # with open("save_gt","a+") as f: 174 | # f.writelines(img_dict_str + '\n') 175 | # f.close() 176 | 177 | # write to json 178 | with open(save_gt,"a+") as out: 179 | string = json.dumps(img_dict) 180 | string += '\n' 181 | out.write(string) 182 | out.close() 183 | 184 | # cv2.imencode('.png',label_mask)[1].tofile('{}\{}.png'.format(save_mask_dir,file_name)) 185 | 186 | 187 | print("finished~~") 188 | 189 | 190 | 191 | -------------------------------------------------------------------------------- /vis_converted.py: -------------------------------------------------------------------------------- 1 | ''' 2 | visualization. 3 | 4 | datasets structure: 5 | VIL-100 6 | |----Annotations 7 | |----data 8 | |----train_converted.json 9 | |----test_converted.json 10 | |----... 11 | 12 | |----JPEGImages 13 | |----Json 14 | |----train.json 15 | ''' 16 | import random 17 | import cv2 18 | import numpy as np 19 | import json 20 | import os 21 | 22 | 23 | def vis_converted(dataset_dir, color): 24 | 25 | test_image = cv2.imdecode(np.fromfile(dataset_dir + test_data[i]['raw_file'], dtype=np.uint8), -1) 26 | y_samples = test_data[i]['h_samples'] 27 | for id, lane in enumerate(test_data[i]['lanes']): 28 | for pts in zip(lane, y_samples): 29 | if pts[0] != -2: #remove -2 30 | test_image = cv2.circle(test_image, pts, 3, color[id], -1) 31 | return test_image 32 | 33 | 34 | if __name__ == '__main__': 35 | datasets_category = 'test' 36 | dataset_dir = '/mnt/h/lane_datasets/VIL-100' #path to VIL dataset. 37 | save_dir = '{}/vis_converted'.format(dataset_dir) 38 | label_file = '{}/data/{}_converted.json'.format(dataset_dir, datasets_category) 39 | 40 | # color 41 | color = [(218,112,214), (255, 0, 0), (0, 255, 0), (0, 0, 255), 42 | (255, 255, 0), (255, 0, 255),(255, 0, 100), (0, 255, 100),(0, 255, 100)] 43 | 44 | sample_num = 0.01 # 0-1 45 | 46 | if not os.path.exists(save_dir): 47 | os.makedirs(save_dir) 48 | 49 | test_data = [] 50 | print("Data Loading...") 51 | 52 | with open(label_file) as f: 53 | while True: 54 | line = f.readline() 55 | if not line: 56 | break 57 | if random.random() < sample_num: 58 | jsonString = json.loads(line) 59 | test_data.append(jsonString) 60 | 61 | size_test = len(test_data) 62 | 63 | for i in range(size_test): 64 | print("Now deal with {}".format(test_data[i]['raw_file'])) 65 | image = vis_converted(dataset_dir, color) 66 | cv2.imwrite(save_dir + '/vis_{}.png'.format(test_data[i]['raw_file'].split('/')[-1].split('.jpg')[0]), image) 67 | # cv2.imencode('.png',mask)[1].tofile('{}\{}.png'.format(mask_dir,label_file.split('.txt')[0])) 68 | 69 | print("Done!") 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /vis_vil.py: -------------------------------------------------------------------------------- 1 | ''' 2 | visualize VIL-100 datasets in points form or curves form. 3 | 4 | datasets name:vil-100 5 | paper link: https://arxiv.org/abs/2108.08482 6 | reference: https://github.com/yujun0-0/MMA-Net/tree/main/dataset 7 | 8 | datasets structure: 9 | VIL-100 10 | |----Annotations 11 | |----data 12 | |----JPEGImages 13 | |----Json 14 | |----train.json 15 | 16 | *********** A sample of one json-file *********** 17 | { 18 | "camera_id": 8272, 19 | "info": { 20 | "height": 1080 , 21 | "width": 1920, 22 | "date": "2020-11-24", 23 | "image_path": "0_Road014_Trim005_frames/XXXXXX.jpg" 24 | }, 25 | "annotations": { 26 | "lane": [{ 27 | "id": 1, 28 | "lane_id": 1, 29 | "attribute": 1, 30 | "occlusion": 0, 31 | "points": [[412.6, 720],[423.7, 709.9], ...] 32 | }, {...}, {...}, {...}] 33 | } 34 | } 35 | ''' 36 | 37 | import os 38 | import cv2 39 | import numpy as np 40 | import json 41 | 42 | color = [(218,112,214), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), (255, 255, 255), 43 | (100, 255, 0), (100, 0, 255), (255, 100, 0), (0, 100, 255), (255, 0, 100), (0, 255, 100)] 44 | 45 | def get_points(mask, label): 46 | # read label 47 | label_content = open(label) 48 | 49 | label_info = json.load(label_content)['annotations'] 50 | 51 | # label_info = eval(label_info) 52 | for index, line in enumerate(label_info['lane']): 53 | # print(line) 54 | points_x = [] 55 | points_y = [] 56 | # get points 57 | for point in line['points']: 58 | points_x.append(int(float(point[0]))) 59 | points_y.append(int(float(point[1]))) 60 | 61 | ptStart = 0 62 | 63 | points = list(zip(points_x, points_y)) 64 | # sort along y 65 | sorted(points , key=lambda k: (k[1], k[0])) 66 | 67 | # print(points) 68 | while ptStart < len(points_x): 69 | image = cv2.circle(mask, points[ptStart], 5, color[index], -1) 70 | ptStart += 1 71 | 72 | return image 73 | 74 | 75 | 76 | def get_curves(mask, label): 77 | # read label 78 | label_content = open(label) 79 | 80 | label_info = json.load(label_content)['annotations'] 81 | 82 | # label_info = eval(label_info) 83 | for index, line in enumerate(label_info['lane']): 84 | # print(line) 85 | points_x = [] 86 | points_y = [] 87 | # get points 88 | for point in line['points']: 89 | points_x.append(int(float(point[0]))) 90 | points_y.append(int(float(point[1]))) 91 | 92 | ptStart = 0 93 | ptEnd = 1 94 | 95 | points = list(zip(points_x, points_y)) 96 | # sort along y 97 | sorted(points , key=lambda k: (k[1], k[0])) 98 | 99 | # print(points) 100 | while ptEnd < len(points_x): 101 | mask = cv2.line(mask, points[ptStart], points[ptEnd], color[index], 4, lineType = 8) 102 | ptStart += 1 103 | ptEnd += 1 104 | 105 | return mask 106 | 107 | if __name__ == '__main__': 108 | # choose datasets category from:'train','test' 109 | datasets_category = 'train' 110 | # choose vis_mode between 'points' and 'curves' 111 | vis_mod = 'curves' 112 | # datasets dir 113 | dataset_dir = '/mnt/h/lane_datasets/VIL-100' 114 | # save label dir(mask) 115 | save_mask_dir = '{}/{}_{}'.format(dataset_dir, "vis_datasets", vis_mod) 116 | if not os.path.exists(save_mask_dir): 117 | os.makedirs(save_mask_dir) 118 | 119 | # read file from txt 120 | txt_file = dataset_dir + '/data/{}.txt'.format(datasets_category) 121 | file_list = open(txt_file) 122 | for file in file_list: 123 | file = file.strip() 124 | full_img_path = dataset_dir + file 125 | 126 | if not os.path.exists(full_img_path): 127 | continue 128 | print("Now dealing with:", file) 129 | file_name = os.path.splitext(file.strip().split('/')[-1])[0] # image_name xxx 130 | json_file = dataset_dir + file.replace('JPEGImages', 'Json') + '.json' 131 | 132 | img = cv2.imread(full_img_path) 133 | 134 | # datasets have different height and width. 135 | # get img shape,h and w. 136 | h = img.shape[0] 137 | w = img.shape[1] 138 | 139 | # parse label 140 | # visulize points 141 | if vis_mod == 'points': 142 | label_mask = get_points(img, json_file) 143 | else: 144 | # visulize curves 145 | label_mask = get_curves(img, json_file) 146 | 147 | cv2.imencode('.png',label_mask)[1].tofile('{}/{}.png'.format(save_mask_dir,file_name)) 148 | 149 | print("finished~~") 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | --------------------------------------------------------------------------------