├── .gitignore ├── README.md ├── STCrowd_convert.py ├── camera.json └── split.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # STCrowd 3 | 4 | This repository is for STCrowd dataset and official implement for **STCrowd: A Multimodal Dataset for Pedestrian Perception in Crowded Scenes**. 5 | 6 | ## Dataset 7 | Our website can be download from the [Homepage](https://4dvlab.github.io/STCrowd/index.html). 8 | 9 | Also the dataset can be directly download from [STCrowd DATA](https://drive.google.com/file/d/1cw8Ats2jYSkUK-g-5lumF2pY_NKSehKS/view?usp=sharing) . 10 | ## Installation 11 | 12 | ### Requirements 13 | - PyTorch 14 | - yaml 15 | - mmcv 16 | - mmdet 17 | - mmdet3d 18 | - mmpycocotools 19 | 20 | ## Data Preparation 21 | The original annotation result is saved in **SEQUENCE_NUM.json** for each continuous sequence, for more details, we include in **anno/sample.json**. 22 | 23 | Please prepare the dataset as following folder struction: 24 | 25 | ``` 26 | ./ 27 | └── Path_To_STCrowd/ 28 | ├──split.json 29 | ├──anno 30 | ├── 1.json 31 | ├── 2.json 32 | └── ... 33 | ├── left 34 | ├── 1 35 | | ├── XXX.jpg 36 | | ├── XXX.jpg 37 | │ └── ... 38 | ├── 2 39 | ├── ... 40 | ├── right 41 | ├── 1 42 | | ├── XXX.jpg 43 | | ├── XXX.jpg 44 | │ └── ... 45 | ├── 2 46 | ├── ... 47 | ├── pcd 48 | ├── 1 49 | | ├── XXX.bin 50 | | ├── XXX.bin 51 | │ └── ... 52 | ├── 2 53 | ├── XXX.bin 54 | ├── XXX.bin 55 | └── ... 56 | ``` 57 | ## Dataset convert 58 | We provide the convert code for data converting. 59 | eg. convert for **STCrowd_infos_train.pkl**. 60 | ``` 61 | python STCrowd_conver.py --path Path_To_STCrowd --split 'train' 62 | ``` 63 | 64 | ## License: 65 | 66 | All datasets are published under the [Creative Commons Attribution-NonCommercial-ShareAlike](https://creativecommons.org/licenses/by-nc-sa/4.0/). 67 | This means that you must attribute the work in the manner specified by the authors, you may not use this work for commercial purposes and if you alter, transform, or build upon this work, you may distribute the resulting work only under the same license. 68 | -------------------------------------------------------------------------------- /STCrowd_convert.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from concurrent import futures as futures 4 | from pathlib import Path 5 | import mmcv 6 | import argparse 7 | import numpy as np 8 | 9 | def options(): 10 | parser = argparse.ArgumentParser(description='STCrowd converting ...') 11 | parser.add_argument('--path_root',type=str,default='/remote-home/share/STCrowd/') 12 | parser.add_argument('--split_file',type=str,default='split.json') # the split file 13 | parser.add_argument('--split',type=str,default='train') # train / val 14 | args = parser.parse_args() 15 | return args 16 | 17 | 18 | 19 | def load_file(path_root,load_dict): 20 | # load file from single json file and return a list 21 | # it is dealing the continuous sequence 22 | def process_single_scene(idx): 23 | info = {} 24 | pc_info = {'num_features': 4} 25 | calib_info = {} 26 | image_info = {'image_idx': idx,'image_path':path_root + '/'.join(load_dict['frames'][idx]['images'][0]['image_name'].split('/')[-4:])} 27 | pc_info['point_cloud_path'] = path_root + '/'.join(load_dict['frames'][idx]['frame_name'].split('/')[-4:]) 28 | 29 | info['image'] = image_info 30 | info['point_cloud'] = pc_info 31 | 32 | position_list,boundingbox_list,occlusion_list,rotation_list,img_point_list = [],[],[],[],[] 33 | 34 | v_list = [] 35 | ### for tracking 36 | tracking_id_list = [] 37 | #### current dealing with the zero object 38 | if (len(load_dict['frames'][idx]['items'])) == 0: 39 | position_list.append([0.1,0.1,0.1]) 40 | boundingbox_list.append([0.001,0.001,0.001]) 41 | occlusion_list.append(0) 42 | rotation_list.append(0) 43 | v_list.append([0,0]) 44 | tracking_id_list.append(0) 45 | for item in load_dict['frames'][idx]['items']: 46 | if item['boundingbox']['z'] < 1.2: # delete the sitting person currently 47 | continue 48 | position_list.append([item['position']['x'],item['position']['y'],item['position']['z']]) 49 | boundingbox_list.append([item['boundingbox']['x'],item['boundingbox']['y'],item['boundingbox']['z']]) 50 | occlusion_list.append(item['occlusion']) 51 | rotation_list.append(item['rotation']) 52 | # current set vx,vy = 0 if they don't appear in next frame 53 | vx = 0 54 | vy = 0 55 | try: 56 | # store the prev items id to check 57 | if idx != 0: 58 | next_items_id_list = [next_frame_item['id'] for next_frame_item in load_dict['frames'][idx-1]['items']] 59 | 60 | for j in range(len(next_items_id_list)): 61 | if item['id'] == next_items_id_list[j]: 62 | vx = load_dict['frames'][idx-1]['items'][j]['position']['x'] - item['position']['x'] 63 | vy = load_dict['frames'][idx-1]['items'][j]['position']['y'] - item['position']['y'] 64 | break 65 | except: 66 | vx = 0 67 | vy = 0 68 | v_list.append([vx,vy]) 69 | 70 | tracking_id_list.append(item['id']) 71 | img_id,img_bbox,img_occ= [],[],[] 72 | for item in load_dict['frames'][idx]['images'][0]['items']: 73 | eight_point = [] 74 | for p in item['points']: 75 | eight_point.append(p['x']) 76 | eight_point.append(p['y']) 77 | 78 | img_id.append(item['id']) 79 | img_occ.append(item['occlusion']) 80 | img_point_list.append(np.asarray(eight_point).reshape(8,2)) 81 | img_bbox.append([item['boundingbox']['x'],item['boundingbox']['y'],item['dimension']['x'],item['dimension']['y']]) 82 | 83 | info['annos'] = { 84 | 'position': position_list, 85 | 'dimensions':boundingbox_list, 86 | 'occlusion':occlusion_list, 87 | 'rotation':rotation_list, 88 | 'tracking_id':tracking_id_list,'image_bbox':dict()} 89 | info['annos']['image_bbox']['3D'] = img_point_list 90 | info['annos']['image_bbox']['2D'] = img_bbox 91 | info['annos']['image_bbox']['occlusion'] = img_occ 92 | info['annos']['image_id'] = img_id 93 | 94 | info_tracking = {'group_id': load_dict['group_id'],'indx':idx,'velocity': v_list,'item_id':tracking_id_list} 95 | info['annos']['tracking'] = info_tracking 96 | return info 97 | ids = list(range(load_dict['total_number'])) 98 | with futures.ThreadPoolExecutor(1) as executor: 99 | infos = executor.map(process_single_scene, ids) 100 | return list(infos) 101 | 102 | 103 | def create_data_info(data_path,file_list, 104 | pkl_prefix='STCrowd'): 105 | # only deal with train split 106 | path = data_path +'anno/' 107 | all_files = [str(file)+'.json' for file in file_list] 108 | infos = [] 109 | for file in all_files: 110 | file_group_path = ''.join([path,file]) 111 | with open(file_group_path, 'r') as load_f: 112 | load_dict = json.load(load_f) 113 | info = load_file(data_path,load_dict) 114 | if info: 115 | infos = infos + info 116 | return infos 117 | 118 | def main(): 119 | args = options() 120 | path_root = args.path_root 121 | split = args.split 122 | split_file = args.path_root + args.split_file 123 | with open(split_file, 'r') as load_f: 124 | load_dict = json.load(load_f) 125 | info = create_data_info(data_path = path_root,file_list = load_dict[split]) 126 | filename = f'{path_root}STCrowd_infos_{split}.pkl' 127 | print(f'dataset info {split} file is saved to {filename}') 128 | mmcv.dump(info, filename) 129 | 130 | if __name__ == "__main__": 131 | main() -------------------------------------------------------------------------------- /camera.json: -------------------------------------------------------------------------------- 1 | { 2 | "p": [[683.8,0,673.5907,0.0],[0.0,684.147,372.8048,0.0],[0,0,1,0] 3 | ], 4 | "r": [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1] 5 | ], 6 | "t": [[0.00852965,-0.999945,-0.00606215,0.0609592],[-0.0417155,0.00570127,-0.999113,-0.144364],[0.999093,0.00877497,-0.0416646,-0.0731114],[0,0,0,1] 7 | ] 8 | } -------------------------------------------------------------------------------- /split.json: -------------------------------------------------------------------------------- 1 | { 2 | "train":[19,18,17,16,15,14,13,12,11,10,6,1,7,2,8,3,31,30,9,4,5,29,28,27,26,25,24,23,22,21,20], 3 | "val":[33,35,39,43,47,50,51,52,53,55,56,59,62,63,64,69,70,71,73,74,76,77,78,79,82,84], 4 | "test":[32,34,36,37,38,40,41,42,44,45,46,48,49,54,57,58,60,61,65,66,67,68,72,75,80,81,83] 5 | } 6 | 7 | --------------------------------------------------------------------------------