├── README.md ├── consumer ├── consumer.py └── templates │ └── index.html ├── data ├── coco.names └── yolov3-320.cfg ├── producer └── producer.py └── requirements.txt /README.md: -------------------------------------------------------------------------------- 1 | # Object-Detection-using-Spark-Docker-Kafka-Video-Streaming 2 | 3 | ### Please download the CNN model from below docker registry and the model code has deployed inside on the docker images 4 | 5 | docker pull yesdineshkumar/bdpmodel 6 | 7 | docker run --name c1_bdpmodel -it -v /c/Users/esdin/OneDrive/Documents/ModelDocker:/data yesdineshkumar/bdpmodel bash 8 | -------------------------------------------------------------------------------- /consumer/consumer.py: -------------------------------------------------------------------------------- 1 | 2 | from flask import Flask, Response, render_template 3 | from kafka import KafkaConsumer 4 | 5 | # Fire up the Kafka Consumer 6 | topic = "kafka-video-topic" 7 | 8 | consumer = KafkaConsumer( 9 | topic, 10 | bootstrap_servers=['192.168.99.100:9092']) 11 | 12 | 13 | # Set the consumer in a Flask App 14 | app = Flask(__name__) 15 | 16 | 17 | @app.route('/') 18 | def index(): 19 | return render_template('index.html') 20 | 21 | 22 | @app.route('/read_video', methods=['GET']) 23 | def read_video(): 24 | """ 25 | This is the heart of our video display. Notice we set the mimetype to 26 | multipart/x-mixed-replace. This tells Flask to replace any old images with 27 | new values streaming through the pipeline. 28 | """ 29 | return Response( 30 | get_video_stream(), 31 | mimetype='multipart/x-mixed-replace; boundary=frame') 32 | 33 | 34 | def get_video_stream(): 35 | """ 36 | Here is where we recieve streamed images from the Kafka Server and convert 37 | them to a Flask-readable format. 38 | """ 39 | for msg in consumer: 40 | print("msg ", msg) 41 | yield (b'--frame\r\n' 42 | b'Content-Type: image/jpg\r\n\r\n' + msg.value + b'\r\n\r\n') 43 | 44 | 45 | if __name__ == "__main__": 46 | app.run(host='0.0.0.0', debug=True) 47 | -------------------------------------------------------------------------------- /consumer/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | BigDatapedia Kafka Video Streaming 4 | 5 | 6 |

Object Detection using Kafka Video Streaming

7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /data/coco.names: -------------------------------------------------------------------------------- 1 | person 2 | bicycle 3 | car 4 | motorbike 5 | aeroplane 6 | bus 7 | train 8 | truck 9 | boat 10 | traffic light 11 | fire hydrant 12 | stop sign 13 | parking meter 14 | bench 15 | bird 16 | cat 17 | dog 18 | horse 19 | sheep 20 | cow 21 | elephant 22 | bear 23 | zebra 24 | giraffe 25 | backpack 26 | umbrella 27 | handbag 28 | tie 29 | suitcase 30 | frisbee 31 | skis 32 | snowboard 33 | sports ball 34 | kite 35 | baseball bat 36 | baseball glove 37 | skateboard 38 | surfboard 39 | tennis racket 40 | bottle 41 | wine glass 42 | cup 43 | fork 44 | knife 45 | spoon 46 | bowl 47 | banana 48 | apple 49 | sandwich 50 | orange 51 | broccoli 52 | carrot 53 | hot dog 54 | pizza 55 | donut 56 | cake 57 | chair 58 | sofa 59 | potted plant 60 | bed 61 | dining table 62 | toilet 63 | tvmonitor 64 | laptop 65 | mouse 66 | remote 67 | keyboard 68 | cell phone 69 | microwave 70 | oven 71 | toaster 72 | sink 73 | refrigerator 74 | book 75 | clock 76 | vase 77 | scissors 78 | teddy bear 79 | hair drier 80 | toothbrush 81 | -------------------------------------------------------------------------------- /data/yolov3-320.cfg: -------------------------------------------------------------------------------- 1 | [net] 2 | # Testing 3 | # batch=1 4 | # subdivisions=1 5 | # Training 6 | batch=64 7 | subdivisions=16 8 | width=608 9 | height=608 10 | channels=3 11 | momentum=0.9 12 | decay=0.0005 13 | angle=0 14 | saturation = 1.5 15 | exposure = 1.5 16 | hue=.1 17 | 18 | learning_rate=0.001 19 | burn_in=1000 20 | max_batches = 500200 21 | policy=steps 22 | steps=400000,450000 23 | scales=.1,.1 24 | 25 | [convolutional] 26 | batch_normalize=1 27 | filters=32 28 | size=3 29 | stride=1 30 | pad=1 31 | activation=leaky 32 | 33 | # Downsample 34 | 35 | [convolutional] 36 | batch_normalize=1 37 | filters=64 38 | size=3 39 | stride=2 40 | pad=1 41 | activation=leaky 42 | 43 | [convolutional] 44 | batch_normalize=1 45 | filters=32 46 | size=1 47 | stride=1 48 | pad=1 49 | activation=leaky 50 | 51 | [convolutional] 52 | batch_normalize=1 53 | filters=64 54 | size=3 55 | stride=1 56 | pad=1 57 | activation=leaky 58 | 59 | [shortcut] 60 | from=-3 61 | activation=linear 62 | 63 | # Downsample 64 | 65 | [convolutional] 66 | batch_normalize=1 67 | filters=128 68 | size=3 69 | stride=2 70 | pad=1 71 | activation=leaky 72 | 73 | [convolutional] 74 | batch_normalize=1 75 | filters=64 76 | size=1 77 | stride=1 78 | pad=1 79 | activation=leaky 80 | 81 | [convolutional] 82 | batch_normalize=1 83 | filters=128 84 | size=3 85 | stride=1 86 | pad=1 87 | activation=leaky 88 | 89 | [shortcut] 90 | from=-3 91 | activation=linear 92 | 93 | [convolutional] 94 | batch_normalize=1 95 | filters=64 96 | size=1 97 | stride=1 98 | pad=1 99 | activation=leaky 100 | 101 | [convolutional] 102 | batch_normalize=1 103 | filters=128 104 | size=3 105 | stride=1 106 | pad=1 107 | activation=leaky 108 | 109 | [shortcut] 110 | from=-3 111 | activation=linear 112 | 113 | # Downsample 114 | 115 | [convolutional] 116 | batch_normalize=1 117 | filters=256 118 | size=3 119 | stride=2 120 | pad=1 121 | activation=leaky 122 | 123 | [convolutional] 124 | batch_normalize=1 125 | filters=128 126 | size=1 127 | stride=1 128 | pad=1 129 | activation=leaky 130 | 131 | [convolutional] 132 | batch_normalize=1 133 | filters=256 134 | size=3 135 | stride=1 136 | pad=1 137 | activation=leaky 138 | 139 | [shortcut] 140 | from=-3 141 | activation=linear 142 | 143 | [convolutional] 144 | batch_normalize=1 145 | filters=128 146 | size=1 147 | stride=1 148 | pad=1 149 | activation=leaky 150 | 151 | [convolutional] 152 | batch_normalize=1 153 | filters=256 154 | size=3 155 | stride=1 156 | pad=1 157 | activation=leaky 158 | 159 | [shortcut] 160 | from=-3 161 | activation=linear 162 | 163 | [convolutional] 164 | batch_normalize=1 165 | filters=128 166 | size=1 167 | stride=1 168 | pad=1 169 | activation=leaky 170 | 171 | [convolutional] 172 | batch_normalize=1 173 | filters=256 174 | size=3 175 | stride=1 176 | pad=1 177 | activation=leaky 178 | 179 | [shortcut] 180 | from=-3 181 | activation=linear 182 | 183 | [convolutional] 184 | batch_normalize=1 185 | filters=128 186 | size=1 187 | stride=1 188 | pad=1 189 | activation=leaky 190 | 191 | [convolutional] 192 | batch_normalize=1 193 | filters=256 194 | size=3 195 | stride=1 196 | pad=1 197 | activation=leaky 198 | 199 | [shortcut] 200 | from=-3 201 | activation=linear 202 | 203 | 204 | [convolutional] 205 | batch_normalize=1 206 | filters=128 207 | size=1 208 | stride=1 209 | pad=1 210 | activation=leaky 211 | 212 | [convolutional] 213 | batch_normalize=1 214 | filters=256 215 | size=3 216 | stride=1 217 | pad=1 218 | activation=leaky 219 | 220 | [shortcut] 221 | from=-3 222 | activation=linear 223 | 224 | [convolutional] 225 | batch_normalize=1 226 | filters=128 227 | size=1 228 | stride=1 229 | pad=1 230 | activation=leaky 231 | 232 | [convolutional] 233 | batch_normalize=1 234 | filters=256 235 | size=3 236 | stride=1 237 | pad=1 238 | activation=leaky 239 | 240 | [shortcut] 241 | from=-3 242 | activation=linear 243 | 244 | [convolutional] 245 | batch_normalize=1 246 | filters=128 247 | size=1 248 | stride=1 249 | pad=1 250 | activation=leaky 251 | 252 | [convolutional] 253 | batch_normalize=1 254 | filters=256 255 | size=3 256 | stride=1 257 | pad=1 258 | activation=leaky 259 | 260 | [shortcut] 261 | from=-3 262 | activation=linear 263 | 264 | [convolutional] 265 | batch_normalize=1 266 | filters=128 267 | size=1 268 | stride=1 269 | pad=1 270 | activation=leaky 271 | 272 | [convolutional] 273 | batch_normalize=1 274 | filters=256 275 | size=3 276 | stride=1 277 | pad=1 278 | activation=leaky 279 | 280 | [shortcut] 281 | from=-3 282 | activation=linear 283 | 284 | # Downsample 285 | 286 | [convolutional] 287 | batch_normalize=1 288 | filters=512 289 | size=3 290 | stride=2 291 | pad=1 292 | activation=leaky 293 | 294 | [convolutional] 295 | batch_normalize=1 296 | filters=256 297 | size=1 298 | stride=1 299 | pad=1 300 | activation=leaky 301 | 302 | [convolutional] 303 | batch_normalize=1 304 | filters=512 305 | size=3 306 | stride=1 307 | pad=1 308 | activation=leaky 309 | 310 | [shortcut] 311 | from=-3 312 | activation=linear 313 | 314 | 315 | [convolutional] 316 | batch_normalize=1 317 | filters=256 318 | size=1 319 | stride=1 320 | pad=1 321 | activation=leaky 322 | 323 | [convolutional] 324 | batch_normalize=1 325 | filters=512 326 | size=3 327 | stride=1 328 | pad=1 329 | activation=leaky 330 | 331 | [shortcut] 332 | from=-3 333 | activation=linear 334 | 335 | 336 | [convolutional] 337 | batch_normalize=1 338 | filters=256 339 | size=1 340 | stride=1 341 | pad=1 342 | activation=leaky 343 | 344 | [convolutional] 345 | batch_normalize=1 346 | filters=512 347 | size=3 348 | stride=1 349 | pad=1 350 | activation=leaky 351 | 352 | [shortcut] 353 | from=-3 354 | activation=linear 355 | 356 | 357 | [convolutional] 358 | batch_normalize=1 359 | filters=256 360 | size=1 361 | stride=1 362 | pad=1 363 | activation=leaky 364 | 365 | [convolutional] 366 | batch_normalize=1 367 | filters=512 368 | size=3 369 | stride=1 370 | pad=1 371 | activation=leaky 372 | 373 | [shortcut] 374 | from=-3 375 | activation=linear 376 | 377 | [convolutional] 378 | batch_normalize=1 379 | filters=256 380 | size=1 381 | stride=1 382 | pad=1 383 | activation=leaky 384 | 385 | [convolutional] 386 | batch_normalize=1 387 | filters=512 388 | size=3 389 | stride=1 390 | pad=1 391 | activation=leaky 392 | 393 | [shortcut] 394 | from=-3 395 | activation=linear 396 | 397 | 398 | [convolutional] 399 | batch_normalize=1 400 | filters=256 401 | size=1 402 | stride=1 403 | pad=1 404 | activation=leaky 405 | 406 | [convolutional] 407 | batch_normalize=1 408 | filters=512 409 | size=3 410 | stride=1 411 | pad=1 412 | activation=leaky 413 | 414 | [shortcut] 415 | from=-3 416 | activation=linear 417 | 418 | 419 | [convolutional] 420 | batch_normalize=1 421 | filters=256 422 | size=1 423 | stride=1 424 | pad=1 425 | activation=leaky 426 | 427 | [convolutional] 428 | batch_normalize=1 429 | filters=512 430 | size=3 431 | stride=1 432 | pad=1 433 | activation=leaky 434 | 435 | [shortcut] 436 | from=-3 437 | activation=linear 438 | 439 | [convolutional] 440 | batch_normalize=1 441 | filters=256 442 | size=1 443 | stride=1 444 | pad=1 445 | activation=leaky 446 | 447 | [convolutional] 448 | batch_normalize=1 449 | filters=512 450 | size=3 451 | stride=1 452 | pad=1 453 | activation=leaky 454 | 455 | [shortcut] 456 | from=-3 457 | activation=linear 458 | 459 | # Downsample 460 | 461 | [convolutional] 462 | batch_normalize=1 463 | filters=1024 464 | size=3 465 | stride=2 466 | pad=1 467 | activation=leaky 468 | 469 | [convolutional] 470 | batch_normalize=1 471 | filters=512 472 | size=1 473 | stride=1 474 | pad=1 475 | activation=leaky 476 | 477 | [convolutional] 478 | batch_normalize=1 479 | filters=1024 480 | size=3 481 | stride=1 482 | pad=1 483 | activation=leaky 484 | 485 | [shortcut] 486 | from=-3 487 | activation=linear 488 | 489 | [convolutional] 490 | batch_normalize=1 491 | filters=512 492 | size=1 493 | stride=1 494 | pad=1 495 | activation=leaky 496 | 497 | [convolutional] 498 | batch_normalize=1 499 | filters=1024 500 | size=3 501 | stride=1 502 | pad=1 503 | activation=leaky 504 | 505 | [shortcut] 506 | from=-3 507 | activation=linear 508 | 509 | [convolutional] 510 | batch_normalize=1 511 | filters=512 512 | size=1 513 | stride=1 514 | pad=1 515 | activation=leaky 516 | 517 | [convolutional] 518 | batch_normalize=1 519 | filters=1024 520 | size=3 521 | stride=1 522 | pad=1 523 | activation=leaky 524 | 525 | [shortcut] 526 | from=-3 527 | activation=linear 528 | 529 | [convolutional] 530 | batch_normalize=1 531 | filters=512 532 | size=1 533 | stride=1 534 | pad=1 535 | activation=leaky 536 | 537 | [convolutional] 538 | batch_normalize=1 539 | filters=1024 540 | size=3 541 | stride=1 542 | pad=1 543 | activation=leaky 544 | 545 | [shortcut] 546 | from=-3 547 | activation=linear 548 | 549 | ###################### 550 | 551 | [convolutional] 552 | batch_normalize=1 553 | filters=512 554 | size=1 555 | stride=1 556 | pad=1 557 | activation=leaky 558 | 559 | [convolutional] 560 | batch_normalize=1 561 | size=3 562 | stride=1 563 | pad=1 564 | filters=1024 565 | activation=leaky 566 | 567 | [convolutional] 568 | batch_normalize=1 569 | filters=512 570 | size=1 571 | stride=1 572 | pad=1 573 | activation=leaky 574 | 575 | [convolutional] 576 | batch_normalize=1 577 | size=3 578 | stride=1 579 | pad=1 580 | filters=1024 581 | activation=leaky 582 | 583 | [convolutional] 584 | batch_normalize=1 585 | filters=512 586 | size=1 587 | stride=1 588 | pad=1 589 | activation=leaky 590 | 591 | [convolutional] 592 | batch_normalize=1 593 | size=3 594 | stride=1 595 | pad=1 596 | filters=1024 597 | activation=leaky 598 | 599 | [convolutional] 600 | size=1 601 | stride=1 602 | pad=1 603 | filters=255 604 | activation=linear 605 | 606 | 607 | [yolo] 608 | mask = 6,7,8 609 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 610 | classes=80 611 | num=9 612 | jitter=.3 613 | ignore_thresh = .7 614 | truth_thresh = 1 615 | random=1 616 | 617 | 618 | [route] 619 | layers = -4 620 | 621 | [convolutional] 622 | batch_normalize=1 623 | filters=256 624 | size=1 625 | stride=1 626 | pad=1 627 | activation=leaky 628 | 629 | [upsample] 630 | stride=2 631 | 632 | [route] 633 | layers = -1, 61 634 | 635 | 636 | 637 | [convolutional] 638 | batch_normalize=1 639 | filters=256 640 | size=1 641 | stride=1 642 | pad=1 643 | activation=leaky 644 | 645 | [convolutional] 646 | batch_normalize=1 647 | size=3 648 | stride=1 649 | pad=1 650 | filters=512 651 | activation=leaky 652 | 653 | [convolutional] 654 | batch_normalize=1 655 | filters=256 656 | size=1 657 | stride=1 658 | pad=1 659 | activation=leaky 660 | 661 | [convolutional] 662 | batch_normalize=1 663 | size=3 664 | stride=1 665 | pad=1 666 | filters=512 667 | activation=leaky 668 | 669 | [convolutional] 670 | batch_normalize=1 671 | filters=256 672 | size=1 673 | stride=1 674 | pad=1 675 | activation=leaky 676 | 677 | [convolutional] 678 | batch_normalize=1 679 | size=3 680 | stride=1 681 | pad=1 682 | filters=512 683 | activation=leaky 684 | 685 | [convolutional] 686 | size=1 687 | stride=1 688 | pad=1 689 | filters=255 690 | activation=linear 691 | 692 | 693 | [yolo] 694 | mask = 3,4,5 695 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 696 | classes=80 697 | num=9 698 | jitter=.3 699 | ignore_thresh = .7 700 | truth_thresh = 1 701 | random=1 702 | 703 | 704 | 705 | [route] 706 | layers = -4 707 | 708 | [convolutional] 709 | batch_normalize=1 710 | filters=128 711 | size=1 712 | stride=1 713 | pad=1 714 | activation=leaky 715 | 716 | [upsample] 717 | stride=2 718 | 719 | [route] 720 | layers = -1, 36 721 | 722 | 723 | 724 | [convolutional] 725 | batch_normalize=1 726 | filters=128 727 | size=1 728 | stride=1 729 | pad=1 730 | activation=leaky 731 | 732 | [convolutional] 733 | batch_normalize=1 734 | size=3 735 | stride=1 736 | pad=1 737 | filters=256 738 | activation=leaky 739 | 740 | [convolutional] 741 | batch_normalize=1 742 | filters=128 743 | size=1 744 | stride=1 745 | pad=1 746 | activation=leaky 747 | 748 | [convolutional] 749 | batch_normalize=1 750 | size=3 751 | stride=1 752 | pad=1 753 | filters=256 754 | activation=leaky 755 | 756 | [convolutional] 757 | batch_normalize=1 758 | filters=128 759 | size=1 760 | stride=1 761 | pad=1 762 | activation=leaky 763 | 764 | [convolutional] 765 | batch_normalize=1 766 | size=3 767 | stride=1 768 | pad=1 769 | filters=256 770 | activation=leaky 771 | 772 | [convolutional] 773 | size=1 774 | stride=1 775 | pad=1 776 | filters=255 777 | activation=linear 778 | 779 | 780 | [yolo] 781 | mask = 0,1,2 782 | anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326 783 | classes=80 784 | num=9 785 | jitter=.3 786 | ignore_thresh = .7 787 | truth_thresh = 1 788 | random=1 789 | 790 | -------------------------------------------------------------------------------- /producer/producer.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | import cv2 4 | from kafka import KafkaProducer 5 | 6 | topic = "kafka-video-topic" 7 | 8 | 9 | def publish_video(video_file): 10 | """ 11 | Publish given video file to a specified Kafka topic. 12 | Kafka Server is expected to be running on the localhost. Not partitioned. 13 | 14 | :param video_file: path to video file 15 | """ 16 | # Start up producer 17 | producer = KafkaProducer(bootstrap_servers='192.168.99.100:9092') 18 | 19 | # Open file 20 | video = cv2.VideoCapture(video_file) 21 | 22 | print('publishing video...') 23 | 24 | while(video.isOpened()): 25 | success, frame = video.read() 26 | 27 | # Ensure file was read successfully 28 | if not success: 29 | print("bad read!") 30 | break 31 | 32 | # Convert image to png 33 | ret, buffer = cv2.imencode('.jpg', frame) 34 | 35 | print("ret, buffer", ret, buffer) 36 | # Convert to bytes and send to kafka 37 | producer.send(topic, buffer.tobytes()) 38 | 39 | time.sleep(0.2) 40 | video.release() 41 | print('publish complete') 42 | 43 | 44 | def publish_camera(): 45 | """ 46 | Publish camera video stream to specified Kafka topic. 47 | Kafka Server is expected to be running on the localhost. Not partitioned. 48 | """ 49 | 50 | # Start up producer 51 | producer = KafkaProducer(bootstrap_servers='192.168.99.100:9092') 52 | 53 | camera = cv2.VideoCapture(0) 54 | try: 55 | while(True): 56 | success, frame = camera.read() 57 | 58 | ret, buffer = cv2.imencode('.jpg', frame) 59 | producer.send(topic, buffer.tobytes()) 60 | 61 | # Choppier stream, reduced load on processor 62 | time.sleep(0.2) 63 | 64 | except: 65 | print("\nExiting.") 66 | sys.exit(1) 67 | 68 | 69 | camera.release() 70 | 71 | 72 | if __name__ == '__main__': 73 | """ 74 | Producer will publish to Kafka Server a video file given as a system arg. 75 | Otherwise it will default by streaming webcam feed. 76 | """ 77 | # sys.argv = ["hi", r"D:\00STUDY\00ANALYTICS\01DataScience_ML\02Study\12OpenCV\Kafka_VDO\KafkaVideoStreaming\videos\Countdown1.mp4"] 78 | sys.argv = ["hi", r'C:\Users\esdin\Music\Paris Evening Walk and Bike Ride.mp4'] 79 | if(len(sys.argv) > 1): 80 | video_path = sys.argv[1] 81 | publish_video(video_path) 82 | else: 83 | print("publishing feed!") 84 | publish_camera() -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.12.0 2 | asgiref==3.2.3 3 | astor==0.8.1 4 | astunparse==1.6.3 5 | attrs==19.3.0 6 | backcall==0.1.0 7 | cachetools==4.2.1 8 | certifi==2019.11.28 9 | chardet==4.0.0 10 | click==8.0.0 11 | colorama==0.4.4 12 | cycler==0.10.0 13 | decorator==4.4.1 14 | defusedxml==0.6.0 15 | Django==3.0.3 16 | easydict==1.9 17 | entrypoints==0.3 18 | enum34==1.1.6 19 | Flask==2.0.0 20 | gast==0.3.3 21 | google-auth==1.28.0 22 | google-auth-oauthlib==0.4.3 23 | google-pasta==0.2.0 24 | grpcio==1.36.1 25 | h5py==2.10.0 26 | idna==2.10 27 | image==1.5.28 28 | imageio==2.9.0 29 | implicits==1.0.2 30 | importlib-metadata==1.5.0 31 | ipykernel==5.1.4 32 | ipython==7.12.0 33 | ipython-genutils==0.2.0 34 | ipywidgets==7.5.1 35 | itsdangerous==2.0.0 36 | jedi==0.16.0 37 | Jinja2==3.0.0 38 | joblib==0.14.1 39 | jsonschema==3.2.0 40 | jupyter==1.0.0 41 | jupyter-console==6.1.0 42 | jupyter-core==4.6.3 43 | kafka-python==2.0.2 44 | Keras==2.1.5 45 | Keras-Applications==1.0.8 46 | Keras-Preprocessing==1.1.0 47 | kiwisolver==1.2.0 48 | Markdown==3.2.1 49 | MarkupSafe==2.0.0 50 | matplotlib==3.4.1 51 | mistune==0.8.4 52 | nbconvert==5.6.1 53 | nbformat==5.0.4 54 | networkx==2.5.1 55 | notebook==6.0.3 56 | numpy==1.17.4 57 | oauthlib==3.1.0 58 | opencv-python==4.5.1.48 59 | opt-einsum==3.3.0 60 | pandocfilters==1.4.2 61 | parso==0.6.1 62 | pexpect==4.8.0 63 | pickleshare==0.7.5 64 | Pillow==6.2.1 65 | pip-date==1.0.3 66 | portpicker==1.2.0 67 | prometheus-client==0.7.1 68 | prompt-toolkit==3.0.3 69 | protobuf==3.11.3 70 | ptyprocess==0.6.0 71 | py4j==0.10.9 72 | pyasn1==0.4.8 73 | pyasn1-modules==0.2.8 74 | pycocotools==2.0.2 75 | pydot==1.4.2 76 | Pygments==2.5.2 77 | pyparsing==2.4.7 78 | pyrsistent==0.15.7 79 | pyspark==3.0.1 80 | pytesseract==0.3.7 81 | python-dateutil==2.8.1 82 | pytz==2019.3 83 | PyWavelets==1.1.1 84 | pywin32==300 85 | pywinpty==0.5.7 86 | PyYAML==5.3 87 | pyzmq==18.1.1 88 | qtconsole==4.6.0 89 | requests==2.25.1 90 | requests-oauthlib==1.3.0 91 | rsa==4.7.2 92 | scikit-image==0.16.2 93 | scikit-learn==0.22.1 94 | scipy==1.4.1 95 | Send2Trash==1.5.0 96 | six==1.14.0 97 | sklearn==0.0 98 | sqlparse==0.3.0 99 | tensorboard==2.2.2 100 | tensorboard-plugin-wit==1.8.0 101 | tensorflow-gpu-estimator==2.2.0 102 | termcolor==1.1.0 103 | terminado==0.8.3 104 | testpath==0.4.4 105 | tf-utils==1.0.4 106 | traitlets==4.3.3 107 | urllib3==1.26.4 108 | wcwidth==0.1.8 109 | Werkzeug==2.0.0 110 | widgetsnbextension==3.5.1 111 | wincertstore==0.2 112 | wrapt==1.12.1 113 | zipp==3.0.0 114 | --------------------------------------------------------------------------------