├── .gitignore
├── Icon
├── Lying.bmp
├── Bending.bmp
├── Falling.bmp
├── Nothing.bmp
├── Others.bmp
├── Phoning.bmp
├── Running.bmp
├── Sitting.bmp
├── Smoking.bmp
├── Texting.bmp
├── Walking.bmp
├── Bicycling.bmp
├── Littering.bmp
├── Standing.bmp
└── Stationary.bmp
├── DB
├── AR-002-12-20151017-01-09.mkv
├── AR-003-12-20151024-02-04.avi
├── AR-002-12-20151017-01-09.txt
└── AR-003-12-20151024-02-04.txt
├── main.py
├── README.md
├── ICVL_action_structure.py
├── write2excel.py
└── ICVL_data_reader.py
/.gitignore:
--------------------------------------------------------------------------------
1 | __pycache__
2 | *.xlsx
3 | .idea/
4 | github_imgs/
5 |
--------------------------------------------------------------------------------
/Icon/Lying.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Lying.bmp
--------------------------------------------------------------------------------
/Icon/Bending.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Bending.bmp
--------------------------------------------------------------------------------
/Icon/Falling.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Falling.bmp
--------------------------------------------------------------------------------
/Icon/Nothing.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Nothing.bmp
--------------------------------------------------------------------------------
/Icon/Others.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Others.bmp
--------------------------------------------------------------------------------
/Icon/Phoning.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Phoning.bmp
--------------------------------------------------------------------------------
/Icon/Running.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Running.bmp
--------------------------------------------------------------------------------
/Icon/Sitting.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Sitting.bmp
--------------------------------------------------------------------------------
/Icon/Smoking.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Smoking.bmp
--------------------------------------------------------------------------------
/Icon/Texting.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Texting.bmp
--------------------------------------------------------------------------------
/Icon/Walking.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Walking.bmp
--------------------------------------------------------------------------------
/Icon/Bicycling.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Bicycling.bmp
--------------------------------------------------------------------------------
/Icon/Littering.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Littering.bmp
--------------------------------------------------------------------------------
/Icon/Standing.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Standing.bmp
--------------------------------------------------------------------------------
/Icon/Stationary.bmp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/Icon/Stationary.bmp
--------------------------------------------------------------------------------
/DB/AR-002-12-20151017-01-09.mkv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/DB/AR-002-12-20151017-01-09.mkv
--------------------------------------------------------------------------------
/DB/AR-003-12-20151024-02-04.avi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ChengBinJin/ActionViewer/HEAD/DB/AR-003-12-20151024-02-04.avi
--------------------------------------------------------------------------------
/DB/AR-002-12-20151017-01-09.txt:
--------------------------------------------------------------------------------
1 | Frame Head.X Head.Y Feet.X Feet.Y ObjC ObjId 1st_stage 2nd_stage 3rd_stage 4th_stage
2 | 101 172 318 376 479 1 1 0 0 0 2
3 | 102 178 317 384 479 1 1 0 0 0 2
4 | 103 184 315 392 479 1 1 0 0 0 2
5 | 104 200 329 392 479 1 1 0 0 0 2
6 | 105 217 343 393 479 1 1 0 0 0 2
7 | 106 221 355 397 479 1 1 0 0 0 2
8 | 107 225 367 402 479 1 1 0 0 0 2
9 | 108 240 373 404 479 1 1 0 0 0 2
10 | 109 256 379 406 479 1 1 0 0 0 2
11 | 110 265 378 406 479 1 1 0 0 0 2
12 | 111 274 377 406 479 1 1 0 0 0 2
13 | 112 282 360 435 479 1 1 0 0 0 2
14 | 113 290 343 465 479 1 1 0 0 0 2
15 | 114 301 342 463 479 1 1 0 0 0 2
16 | 115 312 340 460 479 1 1 0 0 0 2
17 | 116 316 336 461 478 1 1 0 0 0 2
18 | 117 320 332 462 477 1 1 0 0 0 2
19 | 118 316 332 460 477 1 1 0 0 0 2
20 | 119 312 331 457 477 1 1 0 0 0 2
21 | 120 312 328 462 478 1 1 0 0 0 2
22 | 121 313 324 467 479 1 1 0 0 0 2
23 | 122 318 327 468 477 1 1 0 0 0 2
24 | 123 323 331 470 475 1 1 0 0 0 2
25 | 124 329 338 471 476 1 1 0 0 0 2
26 | 125 336 346 472 477 1 1 0 0 0 2
27 | 126 325 350 473 478 1 1 0 0 0 2
28 | 127 314 354 474 479 1 1 2 0 0 0
29 | 128 314 353 474 479 1 1 2 0 0 0
30 | 129 315 352 474 479 1 1 2 0 0 0
31 | 130 315 356 472 477 1 1 2 0 0 0
32 | 131 316 361 469 474 1 1 2 0 0 0
33 | 132 315 361 472 476 1 1 2 0 0 0
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import argparse
3 |
4 | from ICVL_data_reader import ICVL
5 | from write2excel import Writer
6 |
7 | parser = argparse.ArgumentParser(description='')
8 | parser.add_argument('--resize_ratio', dest='resize_ratio', type=float, default=1.0,
9 | help='resize ratio for input frame')
10 | parser.add_argument('--interval_time', dest='interval_time', type=int, default=20,
11 | help='interval time between two frames')
12 |
13 | args = parser.parse_args()
14 |
15 |
16 | def main(video_list):
17 | writer = Writer()
18 | video_id = 1
19 |
20 | for video_path in video_list:
21 | video_reader(video_id, video_path, writer)
22 | video_id += 1
23 |
24 |
25 | def video_reader(video_id, path, writer):
26 | reader = ICVL(video_id, path=path, resize_ratio=args.resize_ratio, interval_time=args.interval_time)
27 | reader.read_gt()
28 | writer.write2excel(path, video_id, reader.GT, reader.videoTime)
29 | reader.show()
30 |
31 |
32 | if __name__ == '__main__':
33 | path = './DB'
34 | video_types = ['.avi', '.mkv']
35 |
36 | filenames = []
37 | for video_type in video_types:
38 | filenames.extend([os.path.join(path, fname) for fname in os.listdir(path) if fname.endswith(video_type)])
39 |
40 | print(filenames)
41 |
42 | main(filenames)
43 |
44 |
--------------------------------------------------------------------------------
/DB/AR-003-12-20151024-02-04.txt:
--------------------------------------------------------------------------------
1 | Frame Head.X Head.Y Feet.X Feet.Y ObjC ObjId 1st_stage 2nd_stage 3rd_stage 4th_stage
2 | 11 332 109 395 273 1 1 3 0 0 2
3 | 12 332 116 393 279 1 1 3 0 0 2
4 | 13 333 123 390 285 1 1 3 0 0 2
5 | 14 333 128 392 287 1 1 3 0 0 2
6 | 15 333 133 394 289 1 1 3 0 0 2
7 | 16 332 139 396 292 1 1 3 0 0 2
8 | 17 330 142 396 293 1 1 3 0 0 2
9 | 18 328 145 395 294 1 1 3 0 0 2
10 | 19 326 149 394 296 1 1 3 0 0 2
11 | 20 326 149 394 300 1 1 3 0 0 2
12 | 21 325 149 394 304 1 1 3 0 0 2
13 | 22 324 148 393 309 1 1 3 0 0 2
14 | 23 322 150 391 308 1 1 3 0 0 2
15 | 24 320 152 389 307 1 1 3 0 0 2
16 | 25 318 155 387 305 1 1 3 0 0 2
17 | 26 318 157 387 312 1 1 3 0 0 2
18 | 27 318 159 388 319 1 1 3 0 0 2
19 | 28 318 161 389 326 1 1 3 0 0 2
20 | 29 317 163 390 334 1 1 3 0 0 2
21 | 30 317 171 390 339 1 1 3 0 0 2
22 | 31 317 179 391 345 1 1 3 0 0 2
23 | 32 316 188 392 351 1 1 3 0 0 2
24 | 33 316 190 392 351 1 1 3 0 0 2
25 | 34 316 192 392 351 1 1 3 0 0 2
26 | 35 316 194 393 351 1 1 3 0 0 2
27 | 36 315 199 393 350 1 1 3 0 0 2
28 | 37 314 204 394 349 1 1 3 0 0 2
29 | 38 313 209 395 347 1 1 3 0 0 2
30 | 39 314 213 395 347 1 1 3 0 0 2
31 | 40 316 218 396 347 1 1 3 0 0 2
32 | 41 317 222 397 347 1 1 3 0 0 2
33 | 42 319 227 398 348 1 1 3 0 0 2
34 | 43 319 234 397 356 1 1 3 0 0 2
35 | 44 319 242 396 365 1 1 3 0 0 2
36 | 45 319 249 395 374 1 1 3 0 0 2
37 | 46 319 257 393 383 1 1 3 0 0 2
38 | 47 318 258 393 383 1 1 3 0 0 2
39 | 48 316 259 393 383 1 1 3 0 0 2
40 | 49 314 260 393 383 1 1 3 0 0 2
41 | 50 312 261 392 383 1 1 3 0 0 2
42 | 51 311 261 391 383 1 1 3 0 0 2
43 | 52 310 262 389 384 1 1 3 0 0 2
44 | 53 309 262 388 385 1 1 3 0 0 2
45 | 54 308 263 386 386 1 1 3 0 0 2
46 | 55 307 263 385 387 1 1 3 0 0 2
47 | 56 305 264 383 388 1 1 3 0 0 2
48 | 57 305 265 383 389 1 1 3 0 0 2
49 | 58 304 266 383 390 1 1 3 0 0 2
50 | 59 304 267 383 392 1 1 3 0 0 2
51 | 60 303 269 384 394 1 1 3 0 0 2
52 | 61 303 269 385 394 1 1 3 0 0 2
53 | 62 303 269 386 394 1 1 3 0 0 2
54 | 63 303 269 387 394 1 1 3 0 0 2
55 | 64 303 269 388 394 1 1 3 0 0 2
56 | 65 303 269 389 394 1 1 3 0 0 2
57 | 66 305 272 391 394 1 1 3 0 0 2
58 | 67 307 275 393 394 1 1 3 0 0 2
59 | 68 309 278 395 394 1 1 3 0 0 2
60 | 69 311 281 397 394 1 1 3 0 0 2
61 | 70 313 283 399 394 1 1 3 0 0 2
62 | 71 315 287 401 394 1 1 3 0 0 2
63 | 72 318 290 404 393 1 1 3 0 0 2
64 | 73 323 293 409 396 1 1 2 0 0 0
65 | 74 328 297 414 400 1 1 2 0 0 0
66 | 75 333 301 419 404 1 1 2 0 0 0
67 | 76 338 304 424 407 1 1 2 0 0 0
68 | 77 343 308 429 411 1 1 2 0 0 0
69 | 78 348 312 434 415 1 1 2 0 0 0
70 | 79 348 312 434 415 1 1 2 0 0 0
71 | 80 348 311 434 416 1 1 2 0 0 0
72 | 81 348 311 434 417 1 1 2 0 0 0
73 | 82 348 310 434 418 1 1 2 0 0 0
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ActionViewer
2 | This repository is the Action Viewer for the ICVL Action Dataset. More information can be find on our [paper](https://arxiv.org/abs/1710.03383). Following image indicates a hierarchical action structure. There are 4 layers, posture, locomotiom, gesture, and event layer. So, one action is represented 4 sub-action labels.
3 |
4 |
5 |
6 |
7 |
8 | ## Requirements
9 | - opencv 3.3.1
10 | - numpy 1.15.4
11 | - xlsxwriter 0.9.6
12 |
13 | ## Action Viewer Demo
14 |
15 |
16 |
17 |
18 | ## Documentation
19 | ### Directory Hierarchy
20 | ```
21 | .
22 | │ ActionViewer
23 | │ ├── DB
24 | │ │ ├── AR-002-12-20151017-01-09.mkv
25 | │ │ ├── AR-002-12-20151017-01-09.txt
26 | │ │ ├── AR-003-12-20151024-02-04.avi
27 | │ │ └── AR-003-12-20151024-02-04.txt
28 | │ ├── Icon
29 | │ │ ├── Bending.bmp
30 | │ │ ├── Bicycling.bmp
31 | │ │ ├── Falling.bmp
32 | │ │ ├── Littering.bmp
33 | │ │ ├── Lying.bmp
34 | │ │ ├── Nothing.bmp
35 | │ │ ├── Others.bmp
36 | │ │ ├── Phoning.bmp
37 | │ │ ├── Running.bmp
38 | │ │ ├── Sitting.bmp
39 | │ │ ├── Smoking.bmp
40 | │ │ ├── Standing.bmp
41 | │ │ ├── Stationary.bmp
42 | │ │ ├── Texting.bmp
43 | │ │ └── Walking.bmp
44 | │ ├── ICVL_action_structure.py
45 | │ ├── ICVL_data_reader.py
46 | │ ├── main.py
47 | │ └── write2excel.py
48 | ```
49 |
50 | ### Run Action-Viewer
51 | Run `main.py` in the ActionViewer.
52 |
53 | ```
54 | python main.py
55 | ```
56 | - `--resize_ratio`: resize ratio for input frame, default: `1.0`
57 | - `--interval_time`: interval time between two frames, default: `20`
58 | **Note:** main.py read all of the videos and the corresponing txt files.
59 |
60 | ### ICVL Dataset
61 | Click [here](https://www.dropbox.com/sh/qvetvo6eqz1oi9l/AACXIqWiAaXNGlvpD3qUncAva?dl=0) to download ICVL dataset. Please cite our following paper to use ICVL dataset.
62 |
63 | ### Citation
64 | ```
65 | @article{jin2017real,
66 | title={Real-time action detection in video surveillance using sub-action descriptor with multi-cnn},
67 | author={Jin, Cheng-Bin and Li, Shengzhe and Kim, Hakil},
68 | journal={arXiv preprint arXiv:1710.03383},
69 | year={2017}
70 | }
71 | ```
72 |
73 | ## License
74 | Copyright (c) 2018 Cheng-Bin Jin. Contact me for commercial use (or rather any use that is not academic research) (email: sbkim0407@gmail.com). Free for research use, as long as proper attribution is given and this copyright notice is retained.
75 |
--------------------------------------------------------------------------------
/ICVL_action_structure.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 |
4 | # noinspection PyPep8Naming
5 | def int2stringLabel(gt):
6 | postureLayer = {
7 | "0": "Sitting",
8 | "1": "Standing",
9 | "2": "Lying",
10 | "3": "Bending"
11 | }
12 |
13 | locomotionLayer = {
14 | "0": "Stationary",
15 | "1": "Walking",
16 | "2": "Running",
17 | "3": "Bicycling"
18 | }
19 |
20 | gestureLayer = {
21 | "0": "Nothing",
22 | "1": "Texting",
23 | "2": "Smoking",
24 | "3": "Phoning",
25 | "9": "Others"}
26 |
27 | interactionLayer = {
28 | "0": "Nothing",
29 | "1": "Littering",
30 | "2": "Falling"}
31 |
32 | firstLabel = ""
33 | secondLabel = ""
34 | thirdLabel = ""
35 | fourthLabel = ""
36 |
37 | try:
38 | firstLabel = postureLayer[str(gt[7])]
39 | except KeyError:
40 | print("There are wrong lable in the posture layer!")
41 |
42 | try:
43 | secondLabel = locomotionLayer[str(gt[8])]
44 | except KeyError:
45 | print("There are wrong label in the locomotion layer!")
46 |
47 | try:
48 | thirdLabel = gestureLayer[str(gt[9])]
49 | except KeyError:
50 | print("There are wrong label in the gesture layer!")
51 |
52 | try:
53 | fourthLabel = interactionLayer[str(gt[10])]
54 | except KeyError:
55 | print("There are wrong label in the interaction layer!")
56 |
57 | return [firstLabel, secondLabel, thirdLabel, fourthLabel]
58 |
59 |
60 | # noinspection PyPep8Naming
61 | def readIcon():
62 | SIZE = 64
63 |
64 | sittingImg = cv2.imread("./Icon/Sitting.bmp")
65 | sittingImg = cv2.resize(sittingImg, (SIZE, SIZE))
66 |
67 | standingImg = cv2.imread("./Icon/Standing.bmp")
68 | standingImg = cv2.resize(standingImg, (SIZE, SIZE))
69 |
70 | lyingImg = cv2. imread("./Icon/Lying.bmp")
71 | lyingImg = cv2.resize(lyingImg, (SIZE, SIZE))
72 |
73 | bendingImg = cv2.imread("./Icon/Bending.bmp")
74 | bendingImg = cv2.resize(bendingImg, (SIZE, SIZE))
75 |
76 | stationaryImg = cv2.imread("./Icon/Stationary.bmp")
77 | stationaryImg = cv2.resize(stationaryImg, (SIZE, SIZE))
78 |
79 | walkingImg = cv2.imread("./Icon/Walking.bmp")
80 | walkingImg = cv2.resize(walkingImg, (SIZE, SIZE))
81 |
82 | runningImg = cv2.imread("./Icon/Running.bmp")
83 | runningImg = cv2.resize(runningImg, (SIZE, SIZE))
84 |
85 | bicyclingImg = cv2.imread("./Icon/Bicycling.bmp")
86 | bicyclingImg = cv2.resize(bicyclingImg, (SIZE, SIZE))
87 |
88 | fallingImg = cv2.imread("./Icon/Falling.bmp")
89 | fallingImg = cv2.resize(fallingImg, (SIZE, SIZE))
90 |
91 | nothingImg = cv2.imread("./Icon/Nothing.bmp")
92 | nothingImg = cv2.resize(nothingImg, (SIZE, SIZE))
93 |
94 | textingImg = cv2.imread("./Icon/Texting.bmp")
95 | textingImg = cv2.resize(textingImg, (SIZE, SIZE))
96 |
97 | smokingImg = cv2.imread("./Icon/Smoking.bmp")
98 | smokingImg = cv2.resize(smokingImg, (SIZE, SIZE))
99 |
100 | phoningImg = cv2.imread("./Icon/Phoning.bmp")
101 | phoningImg = cv2.resize(phoningImg, (SIZE, SIZE))
102 |
103 | othersImg = cv2.imread("./Icon/Others.bmp")
104 | othersImg = cv2.resize(othersImg, (SIZE, SIZE))
105 |
106 | litteringImg = cv2.imread("./Icon/Littering.bmp")
107 | litteringImg = cv2.resize(litteringImg, (SIZE, SIZE))
108 |
109 | return {"Sitting": sittingImg,
110 | "Standing": standingImg,
111 | "Lying": lyingImg,
112 | "Bending": bendingImg,
113 | "Stationary": stationaryImg,
114 | "Walking": walkingImg,
115 | "Running": runningImg,
116 | "Bicycling": bicyclingImg,
117 | "Falling": fallingImg,
118 | "Nothing": nothingImg,
119 | "Texting": textingImg,
120 | "Smoking": smokingImg,
121 | "Phoning": phoningImg,
122 | "Others": othersImg,
123 | "Littering": litteringImg
124 | }
125 |
--------------------------------------------------------------------------------
/write2excel.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import xlsxwriter
3 |
4 |
5 | class Writer():
6 | def __init__(self):
7 |
8 | # Create a workbook and add a worksheet.
9 | self.workbook = xlsxwriter.Workbook('Statistics.xlsx')
10 |
11 | self.worksheet = self.workbook.add_worksheet()
12 | self.xlsFormat = self.workbook.add_format()
13 | self.xlsFormat.set_align('center')
14 | self.xlsFormat.set_valign('vcenter')
15 |
16 | self.write_keywords()
17 |
18 | def __del__(self):
19 | self.workbook.close()
20 |
21 | def write_keywords(self):
22 | # write head filer
23 | self.worksheet.write(0, 0, 'Number', self.xlsFormat) # index of the video
24 | self.worksheet.write(0, 1, 'video_name', self.xlsFormat) # video name
25 |
26 | self.worksheet.write(0, 2, 'Num. of people', self.xlsFormat) # number of people
27 |
28 | self.worksheet.write(0, 3, 'Time', self.xlsFormat) # video time
29 |
30 | self.worksheet.write(0, 4, '0:Sitting', self.xlsFormat)
31 | self.worksheet.write(0, 5, '1:Standing', self.xlsFormat)
32 | self.worksheet.write(0, 6, '2:Lying', self.xlsFormat)
33 |
34 | self.worksheet.write(0, 7, '0:Stationary', self.xlsFormat)
35 | self.worksheet.write(0, 8, '1:Waling', self.xlsFormat)
36 | self.worksheet.write(0, 9, '2:Running', self.xlsFormat)
37 | self.worksheet.write(0, 10, '3:Bicycling', self.xlsFormat)
38 | self.worksheet.write(0, 11, '4:Falling', self.xlsFormat)
39 |
40 | self.worksheet.write(0, 12, '0:Nothing', self.xlsFormat)
41 | self.worksheet.write(0, 13, '1:Texting', self.xlsFormat)
42 | self.worksheet.write(0, 14, '2:Smoking', self.xlsFormat)
43 | self.worksheet.write(0, 15, '3:Phoning', self.xlsFormat)
44 | self.worksheet.write(0, 16, '4:Others', self.xlsFormat)
45 |
46 | self.worksheet.write(0, 17, '0:Nothing', self.xlsFormat)
47 | self.worksheet.write(0, 18, '1:Littering', self.xlsFormat)
48 |
49 | def write2excel(self, path, video_id, gt, video_time):
50 | statistics = dict()
51 |
52 | statistics['video_name'] = path[-8:]
53 |
54 | gt = np.asarray(gt)
55 | statistics['num_of_people'] = np.max(gt, axis=0)[6]
56 |
57 | statistics['num_of_sitting'] = np.sum(gt[:, 7] == 0)
58 | statistics['num_of_standing'] = np.sum(gt[:, 7] == 1)
59 | statistics['num_of_lying'] = np.sum(gt[:, 7] == 2)
60 |
61 | statistics['num_of_stationary'] = np.sum(gt[:, 8] == 0)
62 | statistics['num_of_walking'] = np.sum(gt[:, 8] == 1)
63 | statistics['num_of_running'] = np.sum(gt[:, 8] == 2)
64 | statistics['num_of_bicycling'] = np.sum(gt[:, 8] == 3)
65 | statistics['num_of_falling'] = np.sum(gt[:, 8] == 4)
66 |
67 | statistics['num_of_nothing_3'] = np.sum(gt[:, 9] == 0)
68 | statistics['num_of_texting'] = np.sum(gt[:, 9] == 1)
69 | statistics['num_of_smoking'] = np.sum(gt[:, 9] == 2)
70 | statistics['num_of_phoning'] = np.sum(gt[:, 9] == 3)
71 | statistics['num_of_others'] = np.sum(gt[:, 9] == 9)
72 |
73 | statistics['num_of_nothing_4'] = np.sum(gt[:, 10] == 0)
74 | statistics['num_of_littering'] = np.sum(gt[:, 10] == 1)
75 |
76 | print("Name: ", statistics['video_name'])
77 | # print("People: ", statistics['num_of_people'])
78 |
79 | # print("Sitting: ", statistics['num_of_sitting'])
80 | # print("Standing: ", statistics['num_of_standing'])
81 | # print("Lying: ", statistics['num_of_lying'])
82 |
83 | # print("Stationary: ", statistics['num_of_stationary'])
84 | # print("Walking: ", statistics['num_of_walking'])
85 | # print("Running: ", statistics['num_of_running'])
86 | # print("Bicycling: ", statistics['num_of_bicycling'])
87 | # print("Falling: ", statistics['num_of_falling'])
88 |
89 | # print("Nothing: ", statistics['num_of_nothing_3'])
90 | # print("Texting: ", statistics['num_of_texting'])
91 | # print("Smoking: ", statistics['num_of_smoking'])
92 | # print("Phoning: ", statistics['num_of_phoning'])
93 | # print("Others: ", statistics['num_of_others'])
94 |
95 | # print("Nothing: ", statistics['num_of_nothing_4'])
96 | # print("Littering: ", statistics['num_of_littering'])
97 |
98 | # print("Video ID: ", video_id)
99 | # write statistics information
100 | self.worksheet.write(video_id, 0, video_id, self.xlsFormat) # index of the video
101 | self.worksheet.write(video_id, 1, statistics['video_name'], self.xlsFormat) # video name
102 |
103 | self.worksheet.write(video_id, 2, statistics['num_of_people'], self.xlsFormat) # number of people
104 |
105 | # convert second to format time
106 | self.worksheet.write(video_id, 3, self.convert2format(video_time), self.xlsFormat) # video time
107 |
108 | self.worksheet.write(video_id, 4, statistics['num_of_sitting'], self.xlsFormat)
109 | self.worksheet.write(video_id, 5, statistics['num_of_standing'], self.xlsFormat)
110 | self.worksheet.write(video_id, 6, statistics['num_of_lying'], self.xlsFormat)
111 |
112 | self.worksheet.write(video_id, 7, statistics['num_of_stationary'], self.xlsFormat)
113 | self.worksheet.write(video_id, 8, statistics['num_of_walking'], self.xlsFormat)
114 | self.worksheet.write(video_id, 9, statistics['num_of_running'], self.xlsFormat)
115 | self.worksheet.write(video_id, 10, statistics['num_of_bicycling'], self.xlsFormat)
116 | self.worksheet.write(video_id, 11, statistics['num_of_falling'], self.xlsFormat)
117 |
118 | self.worksheet.write(video_id, 12, statistics['num_of_nothing_3'], self.xlsFormat)
119 | self.worksheet.write(video_id, 13, statistics['num_of_texting'], self.xlsFormat)
120 | self.worksheet.write(video_id, 14, statistics['num_of_smoking'], self.xlsFormat)
121 | self.worksheet.write(video_id, 15, statistics['num_of_phoning'], self.xlsFormat)
122 | self.worksheet.write(video_id, 16, statistics['num_of_others'], self.xlsFormat)
123 |
124 | self.worksheet.write(video_id, 17, statistics['num_of_nothing_4'], self.xlsFormat)
125 | self.worksheet.write(video_id, 18, statistics['num_of_littering'], self.xlsFormat)
126 |
127 | @staticmethod
128 | def convert2format(time):
129 | hour = time // 60 // 60
130 | minute = time // 60
131 | second = time % 60
132 |
133 | format_time = str(hour) + ":" + str(minute).zfill(2) + ":" + str(second).zfill(2)
134 | return format_time
135 |
136 |
--------------------------------------------------------------------------------
/ICVL_data_reader.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import re
4 | import sys
5 |
6 | from ICVL_action_structure import int2stringLabel, readIcon
7 |
8 |
9 | class ICVL:
10 | def __init__(self, video_id, path, resize_ratio=1.0, interval_time=1):
11 | self.resizeRatio = resize_ratio
12 | self.intervalTime = interval_time
13 | self.total_frames = 0
14 |
15 | self.videoID = video_id
16 | self.path = path
17 | self.GTPath = self.path[:-3] + 'txt'
18 |
19 | self.GT = [] # frame number, head_x, head_y, feet_x, feet_y, objc, objId, 1st_stage, 2nd_stage,
20 | # 3rd_stage, 4th_stage
21 | self.color = (0, 51, 255)
22 | self.thickness = 3
23 |
24 | self.fontFace = cv2.FONT_HERSHEY_TRIPLEX
25 | self.fontColor = (255, 255, 255)
26 | self.fontScale = 1.0 * self.resizeRatio
27 | self.fontThickness = 1
28 |
29 | self.icons = readIcon()
30 | self.videoTime = self.get_video_time()
31 |
32 | self.stop_flag = False
33 | self.view_flag = True
34 | self.control = False
35 |
36 | self.video_cap = []
37 | self.read_frames() # read all of the frames first
38 |
39 | def read_frames(self):
40 | video_cap = cv2.VideoCapture(self.path)
41 | if video_cap.isOpened() is False:
42 | print("Can not open video!")
43 | return 0
44 |
45 | while True:
46 | ret, raw_frame = video_cap.read()
47 | if ret is False:
48 | print("Can't read the frame")
49 | break
50 |
51 | self.video_cap.append(raw_frame)
52 |
53 | fps = int(video_cap.get(cv2.CAP_PROP_FPS))
54 | self.total_frames = len(self.video_cap)
55 | self.videoTime = self.total_frames // fps
56 |
57 | print("\nFinish to read Video ", self.videoID)
58 | print("Total frames: ", self.total_frames)
59 |
60 | # When everything done, release the capture
61 | video_cap.release()
62 |
63 | def get_video_time(self):
64 | video_cap = cv2.VideoCapture(self.path)
65 | if video_cap.isOpened() is False:
66 | print("Can not open video!")
67 | return 0
68 |
69 | total_frame = int(video_cap.get(cv2.CAP_PROP_FRAME_COUNT))
70 | fps = int(video_cap.get(cv2.CAP_PROP_FPS))
71 |
72 | # When everything done, release the capture
73 | video_cap.release()
74 |
75 | return total_frame // fps
76 |
77 | def read_gt(self):
78 | with open(self.GTPath) as f:
79 | f.readline() # throw away the first line that is information
80 | for line in f:
81 | data_list = [int(data) for data in re.split(r'\t+', line.rstrip('\t'))]
82 | self.GT.append(data_list)
83 |
84 | # print("Length:", len(self.GT))
85 | f.close() # close file
86 |
87 | def show(self):
88 | cv2.namedWindow(self.path[-25:], cv2.WINDOW_AUTOSIZE)
89 | cv2.moveWindow(self.path[-25:], 300, 50)
90 |
91 | frame_id = 0
92 | while True:
93 | if frame_id < self.total_frames:
94 | raw_frame = self.video_cap[frame_id]
95 | else:
96 | break
97 |
98 | print("Frame: ", frame_id)
99 |
100 | raw_frame = cv2.resize(raw_frame, (int(self.resizeRatio * raw_frame.shape[1]),
101 | int(self.resizeRatio * raw_frame.shape[0])))
102 | show_frame = raw_frame.copy()
103 | show_frame = self.draw(show_frame, frame_id)
104 |
105 | if self.view_flag:
106 | cv2.imshow(self.path[-25:], show_frame) # Display the resulting frame
107 | else:
108 | cv2.imshow(self.path[-25:], raw_frame)
109 |
110 | if self.stop_flag is False:
111 | asc_code = cv2.waitKey(self.intervalTime) & 0xFF
112 | if asc_code == ord('z'):
113 | break
114 | elif asc_code == 27: # Esc button
115 | sys.exit("Esc clicked!")
116 | elif asc_code == ord(' '):
117 | self.stop_flag = True
118 | elif asc_code == ord('v'):
119 | self.view_flag = not self.view_flag
120 | else:
121 | frame_id = frame_id + 1
122 | else:
123 | asc_code = cv2.waitKey(0) & 0xFF
124 | if asc_code == ord(' '):
125 | self.stop_flag = False
126 | elif asc_code == ord('z'):
127 | break
128 | elif asc_code == ord('e'):
129 | frame_id = self.check_frame_id(frame_id, 30, self.total_frames)
130 | elif asc_code == ord('q'):
131 | frame_id = self.check_frame_id(frame_id, -30, self.total_frames)
132 | elif asc_code == ord('d'):
133 | frame_id = self.check_frame_id(frame_id, 1, self.total_frames)
134 | elif asc_code == ord('a'):
135 | frame_id = self.check_frame_id(frame_id, -1, self.total_frames)
136 | elif asc_code == ord('v'):
137 | self.view_flag = not self.view_flag
138 | elif asc_code == 27: # Esc button
139 | sys.exit("Esc clicked")
140 |
141 | cv2.destroyAllWindows()
142 | print("Finish to process!")
143 |
144 | @staticmethod
145 | def check_frame_id(frame_id, base, total_frame):
146 | temp = frame_id + base
147 | if 0 <= temp < total_frame:
148 | return temp
149 | else:
150 | return frame_id
151 |
152 | def draw(self, frame, number):
153 | show_frame = frame.copy()
154 | for index in range(len(self.GT)):
155 | if number == self.GT[index][0]:
156 | gt = self.GT[index].copy()
157 |
158 | # according to resize ratio
159 | gt[1] = int(self.resizeRatio * gt[1])
160 | gt[2] = int(self.resizeRatio * gt[2])
161 | gt[3] = int(self.resizeRatio * gt[3])
162 | gt[4] = int(self.resizeRatio * gt[4])
163 |
164 | show_frame = cv2.rectangle(show_frame, (gt[1], gt[2]), (gt[3], gt[4]),
165 | self.color, self.thickness)
166 |
167 | show_frame = self.fancy_show(show_frame, gt) # show labels
168 |
169 | return show_frame
170 |
171 | @staticmethod
172 | def fancy_show_grid(frame, gt):
173 | grid = 0
174 | height, width, _ = frame.shape
175 |
176 | center_x = int((gt[1] + gt[3]) / 2.0)
177 | center_y = int((gt[2] + gt[4]) / 2.0)
178 |
179 | if (center_x <= width / 2.0) and (center_y <= height / 2.0):
180 | grid = 2
181 | elif (center_x <= width / 2.0) and (center_y > height / 2.0):
182 | grid = 3
183 | elif (center_x > width / 2.0) and (center_y <= height / 2.0):
184 | grid = 1
185 | elif (center_x > width / 2.0) and (center_y > height / 2.0):
186 | grid = 4
187 |
188 | return grid
189 |
190 | def fancy_show(self, show_frame, gt):
191 | # showFrame = frame.copy()
192 |
193 | obj_id = gt[6]
194 | # print("Object ID: ", obj_id)
195 |
196 | first, second, third, fourth = int2stringLabel(gt) # int label to string label
197 |
198 | first_size = cv2.getTextSize(first, self.fontFace, self.fontScale,
199 | self.fontThickness)
200 | second_size = cv2.getTextSize(second, self.fontFace, self.fontScale,
201 | self.fontThickness)
202 | third_size = cv2.getTextSize(third, self.fontFace, self.fontScale,
203 | self.fontThickness)
204 | fourth_size = cv2.getTextSize(fourth, self.fontFace, self.fontScale,
205 | self.fontThickness)
206 |
207 | height = np.max(np.array((first_size[0][1], second_size[0][1], third_size[0][1], fourth_size[0][1])))
208 | width = np.max(np.array((first_size[0][0], second_size[0][0], third_size[0][0], fourth_size[0][0])))
209 | margin = int(0.4 * height)
210 |
211 | # read icons
212 | first_img = self.icons[first]
213 | second_img = self.icons[second]
214 | third_img = self.icons[third]
215 | fourth_img = self.icons[fourth]
216 |
217 | icon_width = icon_height = height
218 | first_img, second_img, third_img, fourth_img = [cv2.resize(img, (icon_width, icon_height))
219 | for img in [first_img, second_img, third_img, fourth_img]]
220 |
221 | # draw object ID
222 | id_size = cv2.getTextSize(str(obj_id), self.fontFace, self.fontScale, self.fontThickness)
223 | id_height, id_width = id_size[0][1], id_size[0][0]
224 |
225 | top_left = (gt[1], gt[4] - id_height - 2 * margin)
226 | bottom_right = (gt[1] + id_width + 2 * margin, gt[4])
227 | cv2.rectangle(show_frame, top_left, bottom_right, (0, 0, 255), -1)
228 |
229 | bottom_left = (gt[1] + margin, gt[4] - margin)
230 | show_frame = cv2.putText(show_frame, str(obj_id), bottom_left, self.fontFace, self.fontScale, (255, 255, 255),
231 | self.fontThickness)
232 |
233 | grid = self.fancy_show_grid(show_frame, gt)
234 |
235 | if grid == 1:
236 | self.grid1(show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
237 | first_img, second_img, third_img, fourth_img)
238 | elif grid == 2:
239 | self.grid2(show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
240 | first_img, second_img, third_img, fourth_img)
241 | elif grid == 3:
242 | self.grid3(show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
243 | first_img, second_img, third_img, fourth_img)
244 | elif grid == 4:
245 | self.grid4(show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
246 | first_img, second_img, third_img, fourth_img)
247 |
248 | return show_frame
249 |
250 | def grid1(self, show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
251 | first_img, second_img, third_img, fourth_img):
252 | # draw background and bounding box of background
253 | top_left = (gt[1] - width - icon_width - 3 * margin, gt[4] - 4 * height - 5 * margin)
254 | bottom_right = (gt[1], gt[4])
255 |
256 | if all(element >= 0 for element in top_left): # consider bounding box out of frame range
257 | cv2.rectangle(show_frame, top_left, bottom_right, self.color, -1)
258 | cv2.rectangle(show_frame, top_left, bottom_right, (0, 0, 255), self.thickness)
259 |
260 | # draw fourth label
261 | bottom_left = (gt[1] - width - margin, gt[4] - 3 * height - 4 * margin)
262 | show_frame = cv2.putText(show_frame, fourth, bottom_left, self.fontFace, self.fontScale, self.fontColor,
263 | self.fontThickness)
264 |
265 | # draw third label
266 | bottom_left = (gt[1] - width - margin, gt[4] - 2 * height - 3 * margin)
267 | show_frame = cv2.putText(show_frame, third, bottom_left, self.fontFace, self.fontScale, self.fontColor,
268 | self.fontThickness)
269 |
270 | # draw second label
271 | bottom_left = (gt[1] - width - margin, gt[4] - height - 2 * margin)
272 | show_frame = cv2.putText(show_frame, second, bottom_left, self.fontFace, self.fontScale, self.fontColor,
273 | self.fontThickness)
274 |
275 | # draw first label
276 | bottom_left = (gt[1] - width - margin, gt[4] - margin)
277 | show_frame = cv2.putText(show_frame, first, bottom_left, self.fontFace, self.fontScale, self.fontColor,
278 | self.fontThickness)
279 |
280 | # draw fourth icon
281 | rows = [gt[4] - 4 * height - 4 * margin, gt[4] - 3 * height - 4 * margin]
282 | cols = [gt[1] - width - icon_width - 2 * margin, gt[1] - width - 2 * margin]
283 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = fourth_img
284 |
285 | # draw third icon
286 | rows = [gt[4] - 3 * height - 3 * margin, gt[4] - 2 * height - 3 * margin]
287 | cols = [gt[1] - width - icon_width - 2 * margin, gt[1] - width - 2 * margin]
288 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = third_img
289 |
290 | # draw second icon
291 | rows = [gt[4] - 2 * height - 2 * margin, gt[4] - height - 2 * margin]
292 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = second_img
293 |
294 | # draw first icon
295 | rows = [gt[4] - height - margin, gt[4] - margin]
296 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = first_img
297 |
298 | def grid2(self, show_frame, gt, height, width, margin, icon_height, iconWidth, first, second, third, fourth,
299 | first_img, second_img, third_img, fourth_img):
300 | # draw background and bounding box of background
301 | top_left = (gt[3], gt[4] - 4 * height - 5 * margin)
302 | bottom_right = (gt[3] + width + iconWidth + 3 * margin, gt[4])
303 |
304 | if all(element >= 0 for element in top_left): # consider bounding box out of frame range
305 | cv2.rectangle(show_frame, top_left, bottom_right, self.color, -1)
306 | cv2.rectangle(show_frame, top_left, bottom_right, (0, 0, 255), self.thickness)
307 |
308 | # draw fourth label
309 | bottom_left = (gt[3] + iconWidth + 2 * margin, gt[4] - 3 * height - 4 * margin)
310 | show_frame = cv2.putText(show_frame, fourth, bottom_left, self.fontFace, self.fontScale, self.fontColor,
311 | self.fontThickness)
312 |
313 | # draw third label
314 | bottom_left = (gt[3] + iconWidth + 2 * margin, gt[4] - 2 * height - 3 * margin)
315 | show_frame = cv2.putText(show_frame, third, bottom_left, self.fontFace, self.fontScale, self.fontColor,
316 | self.fontThickness)
317 |
318 | # draw second label
319 | bottom_left = (gt[3] + iconWidth + 2 * margin, gt[4] - height - 2 * margin)
320 | show_frame = cv2.putText(show_frame, second, bottom_left, self.fontFace, self.fontScale, self.fontColor,
321 | self.fontThickness)
322 |
323 | # draw first label
324 | bottom_left = (gt[3] + iconWidth + 2 * margin, gt[4] - margin)
325 | show_frame = cv2.putText(show_frame, first, bottom_left, self.fontFace, self.fontScale, self.fontColor,
326 | self.fontThickness)
327 |
328 | # draw fourth icon
329 | rows = [gt[4] - 4 * height - 4 * margin, gt[4] - 3 * height - 4 * margin]
330 | cols = [gt[3] + margin, gt[3] + margin + iconWidth]
331 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = fourth_img
332 |
333 | # draw third icon
334 | rows = [gt[4] - 3 * height - 3 * margin, gt[4] - 2 * height - 3 * margin]
335 | cols = [gt[3] + margin, gt[3] + iconWidth + margin]
336 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = third_img
337 |
338 | # draw second icon
339 | rows = [gt[4] - 2 * height - 2 * margin, gt[4] - height - 2 * margin]
340 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = second_img
341 |
342 | # draw first icon
343 | rows = [gt[4] - height - margin, gt[4] - margin]
344 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = first_img
345 |
346 | def grid3(self, show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
347 | first_img, second_img, third_img, fourth_img):
348 | # draw background and bounding box of background
349 | top_left = (gt[3], gt[2])
350 | bottom_right = (gt[3] + icon_width + width + 3 * margin, gt[2] + 4 * height + 5 * margin)
351 | check_coordinate = (show_frame.shape[0] - bottom_right[1], show_frame.shape[1] - bottom_right[0])
352 |
353 | if all(element >= 0 for element in check_coordinate): # consider bounding box out of frame range
354 | cv2.rectangle(show_frame, top_left, bottom_right, self.color, -1)
355 | cv2.rectangle(show_frame, top_left, bottom_right, (0, 0, 255), self.thickness)
356 |
357 | # draw fourth label
358 | bottom_left = (gt[3] + icon_width + 2 * margin, gt[2] + height + margin)
359 | show_frame = cv2.putText(show_frame, fourth, bottom_left, self.fontFace,
360 | self.fontScale, self.fontColor, self.fontThickness)
361 |
362 | # draw third label
363 | bottom_left = (gt[3] + icon_width + 2 * margin, gt[2] + 2 * height + 2 * margin)
364 | show_frame = cv2.putText(show_frame, third, bottom_left, self.fontFace,
365 | self.fontScale, self.fontColor, self.fontThickness)
366 |
367 | # draw second label
368 | bottom_left = (gt[3] + icon_width + 2 * margin, gt[2] + 3 * height + 3 * margin)
369 | show_frame = cv2.putText(show_frame, second, bottom_left, self.fontFace,
370 | self.fontScale, self.fontColor, self.fontThickness)
371 |
372 | # draw first label
373 | bottom_left = (gt[3] + icon_width + 2 * margin, gt[2] + 4 * height + 4 * margin)
374 | show_frame = cv2.putText(show_frame, first, bottom_left, self.fontFace,
375 | self.fontScale, self.fontColor, self.fontThickness)
376 |
377 | # draw fourth icon
378 | rows = [gt[2] + margin, gt[2] + icon_height + margin]
379 | cols = [gt[3] + margin, gt[3] + icon_width + margin]
380 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = fourth_img
381 |
382 | # draw third icon
383 | rows = [gt[2] + icon_height + 2 * margin, gt[2] + 2 * icon_height + 2 * margin]
384 | cols = [gt[3] + margin, gt[3] + icon_width + margin]
385 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = third_img
386 |
387 | # draw second icon
388 | rows = [gt[2] + 2 * icon_height + 3 * margin, gt[2] + 3 * icon_height + 3 * margin]
389 | cols = [gt[3] + margin, gt[3] + icon_width + margin]
390 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = second_img
391 |
392 | # draw first icon
393 | rows = [gt[2] + 3 * icon_height + 4 * margin, gt[2] + 4 * icon_height + 4 * margin]
394 | cols = [gt[3] + margin, gt[3] + icon_width + margin]
395 |
396 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = first_img
397 |
398 | def grid4(self, show_frame, gt, height, width, margin, icon_height, icon_width, first, second, third, fourth,
399 | first_img, second_img, third_img, fourth_img):
400 | # draw background and bounding box of background
401 | top_left = (gt[1] - width - icon_width - 3 * margin, gt[2])
402 | bottom_right = (gt[1], gt[2] + 4 * height + 5 * margin)
403 | check_coordinate = (show_frame.shape[0] - bottom_right[1], show_frame.shape[1] - bottom_right[0])
404 |
405 | if all(element >= 0 for element in check_coordinate): # consider bounding box out of frame range
406 | cv2.rectangle(show_frame, top_left, bottom_right, self.color, -1)
407 | cv2.rectangle(show_frame, top_left, bottom_right, (0, 0, 255), self.thickness)
408 |
409 | # draw fourth label
410 | bottom_left = (gt[1] - width - margin, gt[2] + height + margin)
411 | show_frame = cv2.putText(show_frame, fourth, bottom_left, self.fontFace,
412 | self.fontScale, self.fontColor, self.fontThickness)
413 |
414 | # draw third label
415 | bottom_left = (gt[1] - width - margin, gt[2] + 2 * height + 2 * margin)
416 | show_frame = cv2.putText(show_frame, third, bottom_left, self.fontFace,
417 | self.fontScale, self.fontColor, self.fontThickness)
418 |
419 | # draw second label
420 | bottom_left = (gt[1] - width - margin, gt[2] + 3 * height + 3 * margin)
421 | show_frame = cv2.putText(show_frame, second, bottom_left, self.fontFace,
422 | self.fontScale, self.fontColor, self.fontThickness)
423 |
424 | # draw first label
425 | bottom_left = (gt[1] - width - margin, gt[2] + 4 * height + 4 * margin)
426 | show_frame = cv2.putText(show_frame, first, bottom_left, self.fontFace,
427 | self.fontScale, self.fontColor, self.fontThickness)
428 |
429 | # draw fourth icon
430 | rows = [gt[2] + margin, gt[2] + height + margin]
431 | cols = [gt[1] - width - icon_width - 2 * margin, gt[1] - width - 2 * margin]
432 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = fourth_img
433 |
434 | # draw third icon
435 | rows = [gt[2] + height + 2 * margin, gt[2] + 2 * height + 2 * margin]
436 | cols = [gt[1] - width - icon_width - 2 * margin, gt[1] - width - 2 * margin]
437 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = third_img
438 |
439 | # draw second icon
440 | rows = [gt[2] + 2 * height + 3 * margin, gt[2] + 3 * height + 3 * margin]
441 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = second_img
442 |
443 | # draw first icon
444 | rows = [gt[2] + 3 * height + 4 * margin, gt[2] + 4 * height + 4 * margin]
445 | show_frame[rows[0]:rows[1], cols[0]:cols[1], :] = first_img
446 |
--------------------------------------------------------------------------------