├── mrcnn ├── __init__.py ├── __pycache__ │ ├── model.cpython-36.pyc │ ├── utils.cpython-36.pyc │ ├── __init__.cpython-36.pyc │ ├── config.cpython-36.pyc │ └── visualize.cpython-36.pyc ├── parallel_model.py ├── config.py └── visualize.py ├── output1.png ├── output2.png ├── output3.png ├── static ├── hero-bg.jpg ├── dumpling_first.jpg ├── first_segmented.png ├── images │ ├── fashion.png │ ├── hero-bg.jpg │ └── call-to-action-bg.jpg └── css │ ├── custom-style.css │ ├── bootstrap-reboot.min.css │ ├── mycss.css │ ├── bootstrap-reboot.css │ ├── fileinput.min.css │ ├── glyphicon.css │ ├── style.css │ ├── bootstrap-reboot.min.css.map │ └── bootstrap-grid.min.css ├── Readme.md ├── __pycache__ ├── app.cpython-36.pyc ├── Model.cpython-36.pyc └── Parser.cpython-36.pyc ├── LICENSE ├── README.md ├── Model.py ├── Parser.py ├── app.py ├── stylize.py ├── second_segment.py └── first_segment.py /mrcnn/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /output1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/output1.png -------------------------------------------------------------------------------- /output2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/output2.png -------------------------------------------------------------------------------- /output3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/output3.png -------------------------------------------------------------------------------- /static/hero-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/hero-bg.jpg -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | To learn how to install flask install Flask [click here](http://flask.pocoo.org/docs/1.0/installation/) -------------------------------------------------------------------------------- /static/dumpling_first.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/dumpling_first.jpg -------------------------------------------------------------------------------- /static/first_segmented.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/first_segmented.png -------------------------------------------------------------------------------- /static/images/fashion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/images/fashion.png -------------------------------------------------------------------------------- /static/images/hero-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/images/hero-bg.jpg -------------------------------------------------------------------------------- /__pycache__/app.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/__pycache__/app.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/Model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/__pycache__/Model.cpython-36.pyc -------------------------------------------------------------------------------- /__pycache__/Parser.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/__pycache__/Parser.cpython-36.pyc -------------------------------------------------------------------------------- /static/images/call-to-action-bg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/static/images/call-to-action-bg.jpg -------------------------------------------------------------------------------- /mrcnn/__pycache__/model.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/mrcnn/__pycache__/model.cpython-36.pyc -------------------------------------------------------------------------------- /mrcnn/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/mrcnn/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /mrcnn/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/mrcnn/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /mrcnn/__pycache__/config.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/mrcnn/__pycache__/config.cpython-36.pyc -------------------------------------------------------------------------------- /mrcnn/__pycache__/visualize.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BisheshS/Virtual-Clothes-TryOn/HEAD/mrcnn/__pycache__/visualize.cpython-36.pyc -------------------------------------------------------------------------------- /static/css/custom-style.css: -------------------------------------------------------------------------------- 1 | .big-banner{ 2 | background-image: url('../images/table.jpg') 3 | 4 | } 5 | 6 | 7 | 8 | .form-rounded { 9 | border-radius: 1.5rem; 10 | border-width: 4px; 11 | width: 250px; 12 | } 13 | 14 | .search-rounded { 15 | border-radius: 1.5rem; 16 | border-width: 4px; 17 | width: 1113px; 18 | } 19 | 20 | .suggestion-rounded { 21 | border-radius: 1.5rem; 22 | border-width: 1px; 23 | width: 1113px; 24 | } 25 | 26 | 27 | 28 | .square-container { 29 | display: flex; 30 | flex-wrap: wrap; 31 | } 32 | 33 | .square { 34 | position: relative; 35 | flex-basis: calc(33.333% - 10px); 36 | margin: 5px; 37 | border: 0px solid; 38 | box-sizing: border-box; 39 | } 40 | 41 | .square::before { 42 | content: ''; 43 | display: block; 44 | padding-top: 100%; 45 | } 46 | 47 | .square .content { 48 | position: absolute; 49 | top: 0; left: 0; 50 | height: 100%; 51 | width: 100%; 52 | } 53 | 54 | 55 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 bishbeast 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Virtual Clothes Try On 2 | The project aims to support web-based shopping of apparels by catering to the need of trying out an apparel virtually on a user to validate that it meets his/her requirement. This involves developing a web application which will accept an image of the person who is trying to buy the apparel and image of the desired apparel. The application will use the contour of the person buying the apparel and will generate the image of the person with the desired apparel fitted to his/her body. The project will give users the ability to try clothes they wish to buy without going to a shop in person. 3 | 4 | ## Results 5 | ### Results on trained data 6 | ![alt text](https://github.com/BisheshS/Virtual-Clothes-TryOn/blob/main/output1.png?raw=true) 7 | ### Results on real data taken from webcam 8 | ![alt text](https://github.com/BisheshS/Virtual-Clothes-TryOn/blob/main/output2.png?raw=true) 9 | ![alt text](https://github.com/BisheshS/Virtual-Clothes-TryOn/blob/main/output3.png?raw=true) 10 | 11 | 12 | 13 | ## References 14 | 1. The Conditional Analogy GAN: Swapping Fashion Articles on People Images, Nikolay Jetchev, Urs Bergmann, Zalando Research 15 | 2. Generative Adversarial Network-Based Virtual Try-On with Clothing Region, Shizuma Kubo, Yusuke Iwasawa and Yutaka Matsuo, The University of Tokyo 16 | 3. SwapNet: Image Based Garment Transfer, Amit Raj, Patsorn Sangkloy, Huiwen Chang, James Hays, Duygu Ceylan and Jingwan Lu, Georgia Institute of Technology, Princeton University, Argo AI, Adobe Research 17 | 4. Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks, Jun-Yan Zhu, Taesung Park, Phillip Isola, Alexei A. Efros, Berkeley AI Research laboratory, UC Berkeley 18 | 5. Localized Style Transfer, Alex Wells, Jeremy Wood, Minna Xiao, Stanford University 19 | 6. Mask R-CNN, Kaiming He, Georgia Gkioxari, Piotr Dollar, Ross Girshick, Facebook AI Research. 20 | 7. VITON: An Image-based Virtual Try-on Network, Xintong Han, Zuxuan Wu, Zhe Wu, Ruichi Yu, Larry S. Davis, University of Maryland, College Park 21 | 8. https://towardsdatascience.com/stuart-weitzman-boots-designer-bags-and-outfits-with-mask-r-cnn 92a267a02819 22 | 9. https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs 23 | 10. https://machinelearningmastery.com/use-pre-trained-vgg-model-classify-objects-photographs/ 24 | 11. http://www.robots.ox.ac.uk/~vgg/software/via/via-1.0.6.html 25 | 12. https://engineering.matterport.com/splash-of-color-instance-segmentation-with-mask-r-cnn-and- tensorflow-7c761e238b46 26 | 13. https://medium.com/@jonathan_hui/image-segmentation-with-mask-r-cnn-ebe6d793272 27 | -------------------------------------------------------------------------------- /Model.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import scipy.io 4 | 5 | vgg_layers = ( 6 | 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 7 | 8 | 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 9 | 10 | 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 11 | 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 12 | 13 | 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 14 | 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 15 | 16 | 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 17 | 'relu5_3', 'conv5_4', 'relu5_4' 18 | ) 19 | 20 | 21 | def prepare_model(path): 22 | vgg_rawnet = scipy.io.loadmat(path) 23 | return vgg_rawnet['layers'][0] # another solution: global vgg_weights 24 | 25 | def build_image_net(input_tensor, vgg_weights, feature_pooling_type): 26 | net = {} 27 | current = input_tensor 28 | 29 | for i, name in enumerate(vgg_layers): 30 | layer_kind = name[:4] 31 | if layer_kind == 'conv': 32 | weights, bias = vgg_weights[i][0][0][2][0] 33 | bias = bias.reshape(-1) 34 | current = conv_layer(current, tf.constant(weights), tf.constant(bias)) 35 | elif layer_kind == 'relu': 36 | current = tf.nn.relu(current) 37 | elif layer_kind == 'pool': 38 | current = pool_layer(current, feature_pooling_type) 39 | net[name] = current 40 | 41 | return net 42 | 43 | def conv_layer(input, W, b): 44 | conv = tf.nn.conv2d(input, W, strides=[1,1,1,1], padding='SAME') 45 | return conv + b 46 | 47 | def pool_layer(input, feature_pooling_type): 48 | if feature_pooling_type == 'avg': 49 | return tf.nn.avg_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 50 | elif feature_pooling_type == 'max': 51 | return tf.nn.max_pool(input, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 52 | 53 | def build_mask_net(input_tensor, mask_downsample_type): 54 | net = {} 55 | current = input_tensor 56 | 57 | # soft 58 | if mask_downsample_type == 'simple': 59 | for name in vgg_layers: 60 | layer_kind = name[:4] 61 | if layer_kind == 'pool': 62 | current = tf.nn.avg_pool(current, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 63 | net[name] = current 64 | # hard 65 | elif mask_downsample_type == 'all': 66 | for name in vgg_layers: 67 | layer_kind = name[:4] 68 | if layer_kind == 'conv': 69 | current = tf.nn.max_pool(current, ksize=[1,3,3,1], strides=[1,1,1,1], padding='SAME') 70 | elif layer_kind == 'pool': 71 | current = tf.nn.max_pool(current, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 72 | net[name] = current 73 | # hard, keep the padding boundary unchanged 74 | elif mask_downsample_type == 'inside': 75 | current = 1 - current 76 | for name in vgg_layers: 77 | layer_kind = name[:4] 78 | if layer_kind == 'conv': 79 | current = tf.nn.max_pool(current, ksize=[1,3,3,1], strides=[1,1,1,1], padding='SAME') 80 | elif layer_kind == 'pool': 81 | current = tf.nn.max_pool(current, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 82 | net[name] = 1 - current 83 | # soft 84 | elif mask_downsample_type == 'mean': 85 | for name in vgg_layers: 86 | layer_kind = name[:4] 87 | if layer_kind == 'conv': 88 | current = tf.nn.avg_pool(current, ksize=[1,3,3,1], strides=[1,1,1,1], padding='SAME') 89 | elif layer_kind == 'pool': 90 | current = tf.nn.avg_pool(current, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') 91 | net[name] = current 92 | 93 | return net 94 | 95 | 96 | 97 | 98 | 99 | 100 | -------------------------------------------------------------------------------- /static/css/bootstrap-reboot.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Reboot v4.1.3 (https://getbootstrap.com/) 3 | * Copyright 2011-2018 The Bootstrap Authors 4 | * Copyright 2011-2018 Twitter, Inc. 5 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) 6 | * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md) 7 | */*,::after,::before{box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}@-ms-viewport{width:device-width}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system,BlinkMacSystemFont,"Segoe UI",Roboto,"Helvetica Neue",Arial,sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#212529;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0!important}hr{box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:.5rem}p{margin-top:0;margin-bottom:1rem}abbr[data-original-title],abbr[title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0}address{margin-bottom:1rem;font-style:normal;line-height:inherit}dl,ol,ul{margin-top:0;margin-bottom:1rem}ol ol,ol ul,ul ol,ul ul{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}dfn{font-style:italic}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#007bff;text-decoration:none;background-color:transparent;-webkit-text-decoration-skip:objects}a:hover{color:#0056b3;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus,a:not([href]):not([tabindex]):hover{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}code,kbd,pre,samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,"Liberation Mono","Courier New",monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:.75rem;padding-bottom:.75rem;color:#6c757d;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}button,input,optgroup,select,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}[type=reset],[type=submit],button,html [type=button]{-webkit-appearance:button}[type=button]::-moz-focus-inner,[type=reset]::-moz-focus-inner,[type=submit]::-moz-focus-inner,button::-moz-focus-inner{padding:0;border-style:none}input[type=checkbox],input[type=radio]{box-sizing:border-box;padding:0}input[type=date],input[type=datetime-local],input[type=month],input[type=time]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type=number]::-webkit-inner-spin-button,[type=number]::-webkit-outer-spin-button{height:auto}[type=search]{outline-offset:-2px;-webkit-appearance:none}[type=search]::-webkit-search-cancel-button,[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none!important} 8 | /*# sourceMappingURL=bootstrap-reboot.min.css.map */ -------------------------------------------------------------------------------- /Parser.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | def parse_args(): 4 | parser = argparse.ArgumentParser() 5 | 6 | # ('--weights', nargs='+', type=float, default=[1.0], choices=[1, 2, 3], help='') 7 | 8 | parser.add_argument('--content_img', type=str, 9 | help='content image path') 10 | 11 | parser.add_argument('--style_img', type=str, 12 | help='style image path') 13 | 14 | parser.add_argument('--target_mask', type=str, 15 | help='target mask path') 16 | 17 | parser.add_argument('--style_mask', type=str, 18 | help='style mask path') 19 | 20 | # colors = 1: only use white region 21 | # colors > 1: use all colors 22 | parser.add_argument('--mask_n_colors', type=int, 23 | default=1, 24 | help='Number of colors in the given mask') 25 | 26 | parser.add_argument('--hard_width', type=int, 27 | help='If set, resize the content, style and mask images to the same width') 28 | 29 | parser.add_argument('--init_noise_ratio', type=float, 30 | default=0.0, 31 | help='The ratio between noise and content, ranging from 0. to 1.') 32 | 33 | parser.add_argument('--model_path', type=str, 34 | default='imagenet-vgg-verydeep-19.mat', 35 | help='The path of the vgg model') 36 | 37 | parser.add_argument('--feature_pooling_type', type=str, 38 | default='avg', 39 | choices=['avg', 'max'], 40 | help='pooling type of the vgg model') 41 | 42 | parser.add_argument('--mask_downsample_type', type=str, 43 | default='simple', 44 | choices=['simple', 'all', 'inside', 'mean'], 45 | help='How to propagate masks to different layers') 46 | 47 | parser.add_argument('--content_layers', nargs='+', type=str, 48 | default=['relu4_2'], 49 | help='VGG19 layers used for the content image') 50 | 51 | parser.add_argument('--content_layers_weights', nargs='+', type=float, 52 | default=[1.], 53 | help='weights of each content layer') 54 | 55 | parser.add_argument('--style_layers', nargs='+', type=str, 56 | default=['relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1'], 57 | help='VGG19 layers used for the style image') 58 | 59 | parser.add_argument('--style_layers_weights', nargs='+', type=float, 60 | default=[1., 1., 1., 1., 1.], 61 | help='weights of each style layer') 62 | 63 | parser.add_argument('--content_loss_normalization', type=int, 64 | default=1, 65 | choices=[1, 2], 66 | help='1 for 1./(N * M); 2 for 1./(2. * N**0.5 * M**0.5)') 67 | 68 | parser.add_argument('--mask_normalization_type', type=str, 69 | default='square_sum', 70 | choices=['square_sum', 'sum'], 71 | help='How to normalize a masked gram matrix') 72 | 73 | parser.add_argument('--content_weight', type=float, 74 | default=1., 75 | help='Content loss weight') 76 | 77 | parser.add_argument('--style_weight', type=float, 78 | default=0.2, 79 | help='Style loss weight') 80 | 81 | parser.add_argument('--tv_weight', type=float, 82 | default=0., 83 | help='Total variation loss weight') 84 | 85 | parser.add_argument('--optimizer', type=str, 86 | default='lbfgs', 87 | choices=['lbfgs', 'adam'], 88 | help='choose optimizer') 89 | 90 | parser.add_argument('--learning_rate', type=float, 91 | default=10., 92 | help='learning rate for adam optimizer') 93 | 94 | parser.add_argument('--iteration', type=int, 95 | default=1000, 96 | help='max iterations of training') 97 | 98 | # 10 is good for l-bfgs interface? 99 | parser.add_argument('--log_iteration', type=int, 100 | default=10, 101 | help='Number of iterations to print loss. For adam, also save intermediate result. For L-BFGS, don\'t larger than 10') 102 | 103 | parser.add_argument('--output_dir', type=str, 104 | default='./static', 105 | help='Directory to save result') 106 | 107 | return parser.parse_args() 108 | 109 | -------------------------------------------------------------------------------- /static/css/mycss.css: -------------------------------------------------------------------------------- 1 | .ad-left { 2 | float: left; 3 | } 4 | 5 | .ad-right { 6 | float: right; 7 | margin-left: 10px; 8 | } 9 | 10 | .entire-thing { 11 | width: 650px; 12 | } 13 | 14 | .arrow { 15 | text-align: center; 16 | margin: 8% 0; 17 | } 18 | .bounce { 19 | -moz-animation: bounce 2s infinite; 20 | -webkit-animation: bounce 2s infinite; 21 | animation: bounce 2s infinite; 22 | } 23 | 24 | @keyframes bounce { 25 | 0%, 20%, 50%, 80%, 100% { 26 | transform: translateY(0); 27 | } 28 | 40% { 29 | transform: translateY(-30px); 30 | } 31 | 60% { 32 | transform: translateY(-15px); 33 | } 34 | } 35 | 36 | .displayone { 37 | /*width: 100wh; 38 | height: 90vh; */ 39 | color: #fff; 40 | background: linear-gradient(-45deg, #000000, #0F0F0F, #1C1C1C, #333333); 41 | background-size: 400% 400%; 42 | -webkit-animation: Gradient 15s ease infinite; 43 | -moz-animation: Gradient 15s ease infinite; 44 | animation: Gradient 15s ease infinite; 45 | } 46 | 47 | .camera { 48 | color: #fff; 49 | background: linear-gradient(-45deg, #000000, #0F0F0F, #1C1C1C, #333333); 50 | background-size: 400% 400%; 51 | -webkit-animation: Gradient 15s ease infinite; 52 | -moz-animation: Gradient 15s ease infinite; 53 | animation: Gradient 15s ease infinite; 54 | } 55 | 56 | .displaytwo { 57 | color: #fff; 58 | background: linear-gradient(-45deg, #000000, #0F0F0F, #1C1C1C, #333333); 59 | background-size: 400% 400%; 60 | -webkit-animation: Gradient 15s ease infinite; 61 | -moz-animation: Gradient 15s ease infinite; 62 | animation: Gradient 15s ease infinite; 63 | } 64 | 65 | 66 | .displaythree{ 67 | color: #fff; 68 | background: linear-gradient(-45deg, #000000, #0F0F0F, #1C1C1C, #333333); 69 | background-size: 400% 400%; 70 | -webkit-animation: Gradient 15s ease infinite; 71 | -moz-animation: Gradient 15s ease infinite; 72 | animation: Gradient 15s ease infinite; 73 | } 74 | 75 | 76 | .displayfour{ 77 | color: #fff; 78 | background: linear-gradient(-45deg, #000000, #0F0F0F, #1C1C1C, #333333); 79 | background-size: 400% 400%; 80 | -webkit-animation: Gradient 15s ease infinite; 81 | -moz-animation: Gradient 15s ease infinite; 82 | animation: Gradient 15s ease infinite; 83 | } 84 | 85 | /*.displayfour{ 86 | color: #fff; 87 | background: linear-gradient(-45deg, #EE7752, #E73C7E, #23A6D5, #23D5AB); 88 | background-size: 400% 400%; 89 | -webkit-animation: Gradient 15s ease infinite; 90 | -moz-animation: Gradient 15s ease infinite; 91 | animation: Gradient 15s ease infinite; 92 | }*/ 93 | 94 | @-webkit-keyframes Gradient { 95 | 0% { 96 | background-position: 0% 50% 97 | } 98 | 50% { 99 | background-position: 100% 50% 100 | } 101 | 100% { 102 | background-position: 0% 50% 103 | } 104 | } 105 | 106 | @-moz-keyframes Gradient { 107 | 0% { 108 | background-position: 0% 50% 109 | } 110 | 50% { 111 | background-position: 100% 50% 112 | } 113 | 100% { 114 | background-position: 0% 50% 115 | } 116 | } 117 | 118 | @keyframes Gradient { 119 | 0% { 120 | background-position: 0% 50% 121 | } 122 | 50% { 123 | background-position: 100% 50% 124 | } 125 | 100% { 126 | background-position: 0% 50% 127 | } 128 | } 129 | 130 | .bg { 131 | animation:slide 3s ease-in-out infinite alternate; 132 | background-image: linear-gradient(-60deg, #010000 50%, #7C7F7A 50%); 133 | bottom:0; 134 | left:-50%; 135 | opacity:.5; 136 | position:fixed; 137 | right:-50%; 138 | top:0; 139 | z-index:-1; 140 | } 141 | 142 | /*6c3 09f*/ 143 | 144 | .bg2 { 145 | animation-direction:alternate-reverse; 146 | animation-duration:4s; 147 | } 148 | 149 | .bg3 { 150 | animation-duration:5s; 151 | } 152 | 153 | 154 | @keyframes slide { 155 | 0% { 156 | transform:translateX(-25%); 157 | } 158 | 100% { 159 | transform:translateX(25%); 160 | } 161 | } 162 | 163 | input[type="submit"], input[type="button"], button[type="button"], button { 164 | font-size: 20px; 165 | } -------------------------------------------------------------------------------- /static/css/bootstrap-reboot.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Reboot v4.1.3 (https://getbootstrap.com/) 3 | * Copyright 2011-2018 The Bootstrap Authors 4 | * Copyright 2011-2018 Twitter, Inc. 5 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) 6 | * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md) 7 | */ 8 | *, 9 | *::before, 10 | *::after { 11 | box-sizing: border-box; 12 | } 13 | 14 | html { 15 | font-family: sans-serif; 16 | line-height: 1.15; 17 | -webkit-text-size-adjust: 100%; 18 | -ms-text-size-adjust: 100%; 19 | -ms-overflow-style: scrollbar; 20 | -webkit-tap-highlight-color: rgba(0, 0, 0, 0); 21 | } 22 | 23 | @-ms-viewport { 24 | width: device-width; 25 | } 26 | 27 | article, aside, figcaption, figure, footer, header, hgroup, main, nav, section { 28 | display: block; 29 | } 30 | 31 | body { 32 | margin: 0; 33 | font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"; 34 | font-size: 1rem; 35 | font-weight: 400; 36 | line-height: 1.5; 37 | color: #212529; 38 | text-align: left; 39 | background-color: #fff; 40 | } 41 | 42 | [tabindex="-1"]:focus { 43 | outline: 0 !important; 44 | } 45 | 46 | hr { 47 | box-sizing: content-box; 48 | height: 0; 49 | overflow: visible; 50 | } 51 | 52 | h1, h2, h3, h4, h5, h6 { 53 | margin-top: 0; 54 | margin-bottom: 0.5rem; 55 | } 56 | 57 | p { 58 | margin-top: 0; 59 | margin-bottom: 1rem; 60 | } 61 | 62 | abbr[title], 63 | abbr[data-original-title] { 64 | text-decoration: underline; 65 | -webkit-text-decoration: underline dotted; 66 | text-decoration: underline dotted; 67 | cursor: help; 68 | border-bottom: 0; 69 | } 70 | 71 | address { 72 | margin-bottom: 1rem; 73 | font-style: normal; 74 | line-height: inherit; 75 | } 76 | 77 | ol, 78 | ul, 79 | dl { 80 | margin-top: 0; 81 | margin-bottom: 1rem; 82 | } 83 | 84 | ol ol, 85 | ul ul, 86 | ol ul, 87 | ul ol { 88 | margin-bottom: 0; 89 | } 90 | 91 | dt { 92 | font-weight: 700; 93 | } 94 | 95 | dd { 96 | margin-bottom: .5rem; 97 | margin-left: 0; 98 | } 99 | 100 | blockquote { 101 | margin: 0 0 1rem; 102 | } 103 | 104 | dfn { 105 | font-style: italic; 106 | } 107 | 108 | b, 109 | strong { 110 | font-weight: bolder; 111 | } 112 | 113 | small { 114 | font-size: 80%; 115 | } 116 | 117 | sub, 118 | sup { 119 | position: relative; 120 | font-size: 75%; 121 | line-height: 0; 122 | vertical-align: baseline; 123 | } 124 | 125 | sub { 126 | bottom: -.25em; 127 | } 128 | 129 | sup { 130 | top: -.5em; 131 | } 132 | 133 | a { 134 | color: #007bff; 135 | text-decoration: none; 136 | background-color: transparent; 137 | -webkit-text-decoration-skip: objects; 138 | } 139 | 140 | a:hover { 141 | color: #0056b3; 142 | text-decoration: underline; 143 | } 144 | 145 | a:not([href]):not([tabindex]) { 146 | color: inherit; 147 | text-decoration: none; 148 | } 149 | 150 | a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus { 151 | color: inherit; 152 | text-decoration: none; 153 | } 154 | 155 | a:not([href]):not([tabindex]):focus { 156 | outline: 0; 157 | } 158 | 159 | pre, 160 | code, 161 | kbd, 162 | samp { 163 | font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; 164 | font-size: 1em; 165 | } 166 | 167 | pre { 168 | margin-top: 0; 169 | margin-bottom: 1rem; 170 | overflow: auto; 171 | -ms-overflow-style: scrollbar; 172 | } 173 | 174 | figure { 175 | margin: 0 0 1rem; 176 | } 177 | 178 | img { 179 | vertical-align: middle; 180 | border-style: none; 181 | } 182 | 183 | svg { 184 | overflow: hidden; 185 | vertical-align: middle; 186 | } 187 | 188 | table { 189 | border-collapse: collapse; 190 | } 191 | 192 | caption { 193 | padding-top: 0.75rem; 194 | padding-bottom: 0.75rem; 195 | color: #6c757d; 196 | text-align: left; 197 | caption-side: bottom; 198 | } 199 | 200 | th { 201 | text-align: inherit; 202 | } 203 | 204 | label { 205 | display: inline-block; 206 | margin-bottom: 0.5rem; 207 | } 208 | 209 | button { 210 | border-radius: 0; 211 | } 212 | 213 | button:focus { 214 | outline: 1px dotted; 215 | outline: 5px auto -webkit-focus-ring-color; 216 | } 217 | 218 | input, 219 | button, 220 | select, 221 | optgroup, 222 | textarea { 223 | margin: 0; 224 | font-family: inherit; 225 | font-size: inherit; 226 | line-height: inherit; 227 | } 228 | 229 | button, 230 | input { 231 | overflow: visible; 232 | } 233 | 234 | button, 235 | select { 236 | text-transform: none; 237 | } 238 | 239 | button, 240 | html [type="button"], 241 | [type="reset"], 242 | [type="submit"] { 243 | -webkit-appearance: button; 244 | } 245 | 246 | button::-moz-focus-inner, 247 | [type="button"]::-moz-focus-inner, 248 | [type="reset"]::-moz-focus-inner, 249 | [type="submit"]::-moz-focus-inner { 250 | padding: 0; 251 | border-style: none; 252 | } 253 | 254 | input[type="radio"], 255 | input[type="checkbox"] { 256 | box-sizing: border-box; 257 | padding: 0; 258 | } 259 | 260 | input[type="date"], 261 | input[type="time"], 262 | input[type="datetime-local"], 263 | input[type="month"] { 264 | -webkit-appearance: listbox; 265 | } 266 | 267 | textarea { 268 | overflow: auto; 269 | resize: vertical; 270 | } 271 | 272 | fieldset { 273 | min-width: 0; 274 | padding: 0; 275 | margin: 0; 276 | border: 0; 277 | } 278 | 279 | legend { 280 | display: block; 281 | width: 100%; 282 | max-width: 100%; 283 | padding: 0; 284 | margin-bottom: .5rem; 285 | font-size: 1.5rem; 286 | line-height: inherit; 287 | color: inherit; 288 | white-space: normal; 289 | } 290 | 291 | progress { 292 | vertical-align: baseline; 293 | } 294 | 295 | [type="number"]::-webkit-inner-spin-button, 296 | [type="number"]::-webkit-outer-spin-button { 297 | height: auto; 298 | } 299 | 300 | [type="search"] { 301 | outline-offset: -2px; 302 | -webkit-appearance: none; 303 | } 304 | 305 | [type="search"]::-webkit-search-cancel-button, 306 | [type="search"]::-webkit-search-decoration { 307 | -webkit-appearance: none; 308 | } 309 | 310 | ::-webkit-file-upload-button { 311 | font: inherit; 312 | -webkit-appearance: button; 313 | } 314 | 315 | output { 316 | display: inline-block; 317 | } 318 | 319 | summary { 320 | display: list-item; 321 | cursor: pointer; 322 | } 323 | 324 | template { 325 | display: none; 326 | } 327 | 328 | [hidden] { 329 | display: none !important; 330 | } 331 | /*# sourceMappingURL=bootstrap-reboot.css.map */ -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, render_template, request, url_for 2 | from flask_uploads import configure_uploads, UploadSet, IMAGES 3 | import os 4 | from werkzeug.utils import secure_filename 5 | from flask import Flask, flash, request, redirect, url_for 6 | import sys 7 | import numpy as np 8 | from PIL import Image 9 | import base64 10 | import re 11 | from io import BytesIO # for handling byte strings 12 | from io import StringIO # for handling unicode strings 13 | import scipy.misc 14 | from pathlib import Path 15 | import cv2 16 | import shutil 17 | 18 | 19 | 20 | def color_transfer(source, target): 21 | # convert the images from the RGB to L*ab* color space, being 22 | # sure to utilizing the floating point data type (note: OpenCV 23 | # expects floats to be 32-bit, so use that instead of 64-bit) 24 | source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32") 25 | target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32") 26 | # compute color statistics for the source and target images 27 | (lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source) 28 | (lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target) 29 | 30 | # subtract the means from the target image 31 | (l, a, b) = cv2.split(target) 32 | l -= lMeanTar 33 | # a -= aMeanTar 34 | # b -= bMeanTar 35 | 36 | # scale by the standard deviations 37 | l = (lStdTar / lStdSrc) * l 38 | # a = (aStdTar / aStdSrc) * a 39 | # b = (bStdTar / bStdSrc) * b 40 | 41 | # add in the source mean 42 | l += lMeanSrc 43 | # a += aMeanSrc 44 | # b += bMeanSrc 45 | 46 | # clip the pixel intensities to [0, 255] if they fall outside 47 | # this range 48 | l = np.clip(l, 0, 255) 49 | # a = np.clip(a, 0, 255) 50 | # b = np.clip(b, 0, 255) 51 | print("--------------in color transfer---------------") 52 | # merge the channels together and convert back to the RGB color 53 | # space, being sure to utilize the 8-bit unsigned integer data 54 | # type 55 | transfer = cv2.merge([l, a, b]) 56 | transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR) 57 | 58 | # return the color transferred image 59 | return transfer 60 | 61 | 62 | def histogram_normalization(file): 63 | img = cv2.imread(file) 64 | img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) 65 | # equalize the histogram of the Y channel 66 | img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) 67 | # convert the YUV image back to RGB format 68 | img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) 69 | print("--------------in histogram_normalization---------------") 70 | # saving_name = Path("/home/danish/Downloads/Pot/rice/static/photos/test/dumpling_first.jpg") 71 | cv2.imwrite(file, img) 72 | return 73 | 74 | 75 | 76 | 77 | app = Flask(__name__) 78 | app.config["TEMPLATES_AUTO_RELOAD"] = True 79 | app.config.update({ 80 | 'UPLOADS_DEFAULT_DEST': os.path.realpath('.') + '/static', 81 | 'UPLOADS_DEFAULT_URL': 'http://localhost:5000' 82 | }) 83 | 84 | photos = UploadSet('photos', IMAGES) 85 | configure_uploads(app, photos) 86 | 87 | @app.route('/') 88 | def index(): 89 | return render_template('app.html') 90 | 91 | 92 | @app.route('/save', methods=['POST']) 93 | def get_image(): 94 | data_url = request.values['imageBase64'] 95 | if data_url: 96 | print("Image recieved") 97 | else: 98 | print("Failed-----------") 99 | offset = data_url.index(',')+1 100 | img_bytes = base64.b64decode(data_url[offset:]) 101 | img = Image.open(BytesIO(img_bytes)) 102 | img = np.array(img) 103 | loc = '/home/danish/Project/mix/static/photos/test' 104 | scipy.misc.imsave(os.path.join(loc,"dumpling_first.jpg"),img) 105 | src_dir = '/home/danish/Project/mix/static/photos/test/dumpling_first.jpg' 106 | final_dir = '/home/danish/Project/mix/static/dumpling_first.jpg' 107 | shutil.copy(src_dir,final_dir) 108 | histogram_normalization(os.path.join(loc,"dumpling_first.jpg")) 109 | return '',204 110 | 111 | 112 | @app.route('/upload1', methods=['GET', 'POST']) 113 | def upload1(): 114 | if request.method == 'POST' and 'photo' in request.files: 115 | myfile = request.files['photo'] 116 | loc = '/home/danish/Project/mix/static/photos/test' 117 | saving_name = Path("/home/danish/Project/mix/static/photos/test/dumpling_first.jpg") 118 | if saving_name.exists(): 119 | os.remove(saving_name) 120 | photos.save(myfile,'test', 'dumpling_first'+'.'+'jpg') 121 | histogram_normalization(os.path.join(loc,"dumpling_first.jpg")) 122 | return '',204 123 | return '',204 124 | 125 | @app.route('/upload2', methods=['GET', 'POST']) 126 | def upload2(): 127 | if request.method == 'POST' and 'photo' in request.files: 128 | myfile = request.files['photo'] 129 | loc = '/home/danish/Project/mix/static/photos/test' 130 | saving_name = Path("/home/danish/Project/mix/static/photos/test/dumpling_second.jpg") 131 | if saving_name.exists(): 132 | os.remove(saving_name) 133 | photos.save(myfile,'test', 'dumpling_second'+'.'+'jpg') 134 | histogram_normalization(os.path.join(loc,"dumpling_second.jpg")) 135 | return '',204 136 | return '',204 137 | 138 | 139 | 140 | 141 | @app.route('/seg_one',methods=['GET','POST']) 142 | def seg_one(): 143 | # filename = '/home/danish/Downloads/Pot/rice/static/second_segmented.jpg' 144 | # Provide path for image that is uploaded in static/photos/test 145 | in_file = '/home/danish/Project/mix/static/photos/test/dumpling_first.jpg' 146 | saving_name = Path("/home/danish/Project/mix/static/first_segmented.png") 147 | if saving_name.exists(): 148 | os.remove(saving_name) 149 | os.system('python3 first_segment.py splash --weights=mask_rcnn_shirt_0005.h5 --image="%s"' % in_file) 150 | print("--------------in first image---------------") 151 | # path and image of output image 152 | out_file = '/home/danish/Project/mix/static/first_segmented.png' 153 | return '',204 154 | 155 | 156 | 157 | @app.route('/seg_two',methods=['GET','POST']) 158 | def seg_two(): 159 | # filename = '/home/danish/Downloads/Pot/rice/static/second_segmented.jpg' 160 | # Provide path for image that is uploaded in static/photos/test 161 | in_file = '/home/danish/Project/mix/static/photos/test/dumpling_second.jpg' 162 | saving_name = Path("/home/danish/Project/mix/static/second_segmented.png") 163 | if saving_name.exists(): 164 | os.remove(saving_name) 165 | os.system('python3 second_segment.py splash --weights=mask_rcnn_shirt_0005.h5 --image="%s"' % in_file) 166 | print("--------------in second image---------------") 167 | # path and image of output image 168 | out_file = '/home/danish/Project/mix/static/second_segmented.png' 169 | return '',204 170 | 171 | @app.route('/generate', methods=['GET','POST']) 172 | def generate(): 173 | in_file_raw1 = '/home/danish/Project/mix/static/photos/test/dumpling_first.jpg' 174 | in_file_raw2 = '/home/danish/Project/mix/static/photos/test/dumpling_second.jpg' 175 | in_file_seg1 = '/home/danish/Project/mix/static/first_segmented.png' 176 | in_file_seg2 = '/home/danish/Project/mix/static/second_segmented.png' 177 | os.system('python3 stylize.py --mask_n_colors=1 --content_img="{0}" --target_mask="{1}" --style_img="{2}" --style_mask="{3}"'.format(in_file_raw1, in_file_seg1, in_file_raw2, in_file_seg2)) 178 | out_file = '/home/danish/Project/mix/static/result_final.png' 179 | return '',204 180 | 181 | 182 | if __name__ == '__main__': 183 | app.run(host='0.0.0.0', port=5000, ssl_context='adhoc') -------------------------------------------------------------------------------- /mrcnn/parallel_model.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Multi-GPU Support for Keras. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | 9 | Ideas and a small code snippets from these sources: 10 | https://github.com/fchollet/keras/issues/2436 11 | https://medium.com/@kuza55/transparent-multi-gpu-training-on-tensorflow-with-keras-8b0016fd9012 12 | https://github.com/avolkov1/keras_experiments/blob/master/keras_exp/multigpu/ 13 | https://github.com/fchollet/keras/blob/master/keras/utils/training_utils.py 14 | """ 15 | 16 | import tensorflow as tf 17 | import keras.backend as K 18 | import keras.layers as KL 19 | import keras.models as KM 20 | 21 | 22 | class ParallelModel(KM.Model): 23 | """Subclasses the standard Keras Model and adds multi-GPU support. 24 | It works by creating a copy of the model on each GPU. Then it slices 25 | the inputs and sends a slice to each copy of the model, and then 26 | merges the outputs together and applies the loss on the combined 27 | outputs. 28 | """ 29 | 30 | def __init__(self, keras_model, gpu_count): 31 | """Class constructor. 32 | keras_model: The Keras model to parallelize 33 | gpu_count: Number of GPUs. Must be > 1 34 | """ 35 | self.inner_model = keras_model 36 | self.gpu_count = gpu_count 37 | merged_outputs = self.make_parallel() 38 | super(ParallelModel, self).__init__(inputs=self.inner_model.inputs, 39 | outputs=merged_outputs) 40 | 41 | def __getattribute__(self, attrname): 42 | """Redirect loading and saving methods to the inner model. That's where 43 | the weights are stored.""" 44 | if 'load' in attrname or 'save' in attrname: 45 | return getattr(self.inner_model, attrname) 46 | return super(ParallelModel, self).__getattribute__(attrname) 47 | 48 | def summary(self, *args, **kwargs): 49 | """Override summary() to display summaries of both, the wrapper 50 | and inner models.""" 51 | super(ParallelModel, self).summary(*args, **kwargs) 52 | self.inner_model.summary(*args, **kwargs) 53 | 54 | def make_parallel(self): 55 | """Creates a new wrapper model that consists of multiple replicas of 56 | the original model placed on different GPUs. 57 | """ 58 | # Slice inputs. Slice inputs on the CPU to avoid sending a copy 59 | # of the full inputs to all GPUs. Saves on bandwidth and memory. 60 | input_slices = {name: tf.split(x, self.gpu_count) 61 | for name, x in zip(self.inner_model.input_names, 62 | self.inner_model.inputs)} 63 | 64 | output_names = self.inner_model.output_names 65 | outputs_all = [] 66 | for i in range(len(self.inner_model.outputs)): 67 | outputs_all.append([]) 68 | 69 | # Run the model call() on each GPU to place the ops there 70 | for i in range(self.gpu_count): 71 | with tf.device('/gpu:%d' % i): 72 | with tf.name_scope('tower_%d' % i): 73 | # Run a slice of inputs through this replica 74 | zipped_inputs = zip(self.inner_model.input_names, 75 | self.inner_model.inputs) 76 | inputs = [ 77 | KL.Lambda(lambda s: input_slices[name][i], 78 | output_shape=lambda s: (None,) + s[1:])(tensor) 79 | for name, tensor in zipped_inputs] 80 | # Create the model replica and get the outputs 81 | outputs = self.inner_model(inputs) 82 | if not isinstance(outputs, list): 83 | outputs = [outputs] 84 | # Save the outputs for merging back together later 85 | for l, o in enumerate(outputs): 86 | outputs_all[l].append(o) 87 | 88 | # Merge outputs on CPU 89 | with tf.device('/cpu:0'): 90 | merged = [] 91 | for outputs, name in zip(outputs_all, output_names): 92 | # Concatenate or average outputs? 93 | # Outputs usually have a batch dimension and we concatenate 94 | # across it. If they don't, then the output is likely a loss 95 | # or a metric value that gets averaged across the batch. 96 | # Keras expects losses and metrics to be scalars. 97 | if K.int_shape(outputs[0]) == (): 98 | # Average 99 | m = KL.Lambda(lambda o: tf.add_n(o) / len(outputs), name=name)(outputs) 100 | else: 101 | # Concatenate 102 | m = KL.Concatenate(axis=0, name=name)(outputs) 103 | merged.append(m) 104 | return merged 105 | 106 | 107 | if __name__ == "__main__": 108 | # Testing code below. It creates a simple model to train on MNIST and 109 | # tries to run it on 2 GPUs. It saves the graph so it can be viewed 110 | # in TensorBoard. Run it as: 111 | # 112 | # python3 parallel_model.py 113 | 114 | import os 115 | import numpy as np 116 | import keras.optimizers 117 | from keras.datasets import mnist 118 | from keras.preprocessing.image import ImageDataGenerator 119 | 120 | GPU_COUNT = 2 121 | 122 | # Root directory of the project 123 | ROOT_DIR = os.path.abspath("../") 124 | 125 | # Directory to save logs and trained model 126 | MODEL_DIR = os.path.join(ROOT_DIR, "logs") 127 | 128 | def build_model(x_train, num_classes): 129 | # Reset default graph. Keras leaves old ops in the graph, 130 | # which are ignored for execution but clutter graph 131 | # visualization in TensorBoard. 132 | tf.reset_default_graph() 133 | 134 | inputs = KL.Input(shape=x_train.shape[1:], name="input_image") 135 | x = KL.Conv2D(32, (3, 3), activation='relu', padding="same", 136 | name="conv1")(inputs) 137 | x = KL.Conv2D(64, (3, 3), activation='relu', padding="same", 138 | name="conv2")(x) 139 | x = KL.MaxPooling2D(pool_size=(2, 2), name="pool1")(x) 140 | x = KL.Flatten(name="flat1")(x) 141 | x = KL.Dense(128, activation='relu', name="dense1")(x) 142 | x = KL.Dense(num_classes, activation='softmax', name="dense2")(x) 143 | 144 | return KM.Model(inputs, x, "digit_classifier_model") 145 | 146 | # Load MNIST Data 147 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 148 | x_train = np.expand_dims(x_train, -1).astype('float32') / 255 149 | x_test = np.expand_dims(x_test, -1).astype('float32') / 255 150 | 151 | print('x_train shape:', x_train.shape) 152 | print('x_test shape:', x_test.shape) 153 | 154 | # Build data generator and model 155 | datagen = ImageDataGenerator() 156 | model = build_model(x_train, 10) 157 | 158 | # Add multi-GPU support. 159 | model = ParallelModel(model, GPU_COUNT) 160 | 161 | optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, clipnorm=5.0) 162 | 163 | model.compile(loss='sparse_categorical_crossentropy', 164 | optimizer=optimizer, metrics=['accuracy']) 165 | 166 | model.summary() 167 | 168 | # Train 169 | model.fit_generator( 170 | datagen.flow(x_train, y_train, batch_size=64), 171 | steps_per_epoch=50, epochs=10, verbose=1, 172 | validation_data=(x_test, y_test), 173 | callbacks=[keras.callbacks.TensorBoard(log_dir=MODEL_DIR, 174 | write_graph=True)] 175 | ) 176 | -------------------------------------------------------------------------------- /static/css/fileinput.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * bootstrap-fileinput v4.5.2 3 | * http://plugins.krajee.com/file-input 4 | * 5 | * Krajee default styling for bootstrap-fileinput. 6 | * 7 | * Author: Kartik Visweswaran 8 | * Copyright: 2014 - 2018, Kartik Visweswaran, Krajee.com 9 | * 10 | * Licensed under the BSD 3-Clause 11 | * https://github.com/kartik-v/bootstrap-fileinput/blob/master/LICENSE.md 12 | */.btn-file input[type=file],.file-caption-icon,.file-no-browse,.file-preview .fileinput-remove,.file-zoom-dialog .btn-navigate,.file-zoom-dialog .floating-buttons,.krajee-default .file-thumb-progress{position:absolute}.file-loading input[type=file],input[type=file].file-loading{width:0;height:0}.file-no-browse{left:50%;bottom:20%;width:1px;height:1px;font-size:0;opacity:0;border:none;background:0 0;outline:0;box-shadow:none}.file-caption-icon,.file-input-ajax-new .fileinput-remove-button,.file-input-ajax-new .fileinput-upload-button,.file-input-ajax-new .no-browse .input-group-btn,.file-input-new .close,.file-input-new .file-preview,.file-input-new .fileinput-remove-button,.file-input-new .fileinput-upload-button,.file-input-new .glyphicon-file,.file-input-new .no-browse .input-group-btn,.file-zoom-dialog .modal-header:after,.file-zoom-dialog .modal-header:before,.hide-content .kv-file-content,.kv-hidden{display:none}.btn-file,.file-caption,.file-input,.file-loading:before,.file-preview,.file-zoom-dialog .modal-dialog,.krajee-default .file-thumbnail-footer,.krajee-default.file-preview-frame{position:relative}.file-error-message pre,.file-error-message ul,.krajee-default .file-actions,.krajee-default .file-other-error{text-align:left}.file-error-message pre,.file-error-message ul{margin:0}.krajee-default .file-drag-handle,.krajee-default .file-upload-indicator{float:left;margin:5px 0 -5px;width:16px;height:16px}.krajee-default .file-thumb-progress .progress,.krajee-default .file-thumb-progress .progress-bar{height:11px;font-family:Verdana,Helvetica,sans-serif;font-size:9px}.krajee-default .file-caption-info,.krajee-default .file-size-info{display:block;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;width:160px;height:15px;margin:auto}.file-zoom-content>.file-object.type-flash,.file-zoom-content>.file-object.type-image,.file-zoom-content>.file-object.type-video{max-width:100%;max-height:100%;width:auto}.file-zoom-content>.file-object.type-flash,.file-zoom-content>.file-object.type-video{height:100%}.file-zoom-content>.file-object.type-default,.file-zoom-content>.file-object.type-html,.file-zoom-content>.file-object.type-pdf,.file-zoom-content>.file-object.type-text{width:100%}.file-loading:before{content:" Loading...";display:inline-block;padding-left:20px;line-height:16px;font-size:13px;font-variant:small-caps;color:#999;background:url(../img/loading.gif) top left no-repeat}.file-object{margin:0 0 -5px;padding:0}.btn-file{overflow:hidden}.btn-file input[type=file]{top:0;left:0;min-width:100%;min-height:100%;text-align:right;opacity:0;background:none;cursor:inherit;display:block}.btn-file ::-ms-browse{font-size:10000px;width:100%;height:100%}.file-caption .file-caption-name{width:100%;margin:0;padding:0;box-shadow:none;border:none;background:0 0;outline:0}.file-caption.icon-visible .file-caption-icon{display:inline-block}.file-caption.icon-visible .file-caption-name{padding-left:15px}.file-caption-icon{left:8px}.file-error-message{color:#a94442;background-color:#f2dede;margin:5px;border:1px solid #ebccd1;border-radius:4px;padding:15px}.file-error-message pre{margin:5px 0}.file-caption-disabled{background-color:#eee;cursor:not-allowed;opacity:1}.file-preview{border-radius:5px;border:1px solid #ddd;padding:8px;width:100%;margin-bottom:5px}.file-preview .btn-xs{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.file-preview .fileinput-remove{top:1px;right:1px;line-height:10px}.file-preview .clickable{cursor:pointer}.file-preview-image{font:40px Impact,Charcoal,sans-serif;color:green}.krajee-default.file-preview-frame{margin:8px;border:1px solid rgba(0,0,0,.2);box-shadow:0 0 10px 0 rgba(0,0,0,.2);padding:6px;float:left;text-align:center}.krajee-default.file-preview-frame .kv-file-content{width:213px;height:160px}.krajee-default.file-preview-frame .kv-file-content.kv-pdf-rendered{width:400px}.krajee-default.file-preview-frame[data-template=audio] .kv-file-content{width:240px;height:55px}.krajee-default.file-preview-frame .file-thumbnail-footer{height:70px}.krajee-default.file-preview-frame:not(.file-preview-error):hover{border:1px solid rgba(0,0,0,.3);box-shadow:0 0 10px 0 rgba(0,0,0,.4)}.krajee-default .file-preview-text{display:block;color:#428bca;border:1px solid #ddd;font-family:Menlo,Monaco,Consolas,"Courier New",monospace;outline:0;padding:8px;resize:none}.krajee-default .file-preview-html{border:1px solid #ddd;padding:8px;overflow:auto}.krajee-default .file-other-icon{font-size:6em}.krajee-default .file-footer-buttons{float:right}.krajee-default .file-footer-caption{display:block;text-align:center;padding-top:4px;font-size:11px;color:#777;margin-bottom:15px}.krajee-default .file-preview-error{opacity:.65;box-shadow:none}.krajee-default .file-thumb-progress{height:11px;top:37px;left:0;right:0}.krajee-default.kvsortable-ghost{background:#e1edf7;border:2px solid #a1abff}.krajee-default .file-preview-other:hover{opacity:.8}.krajee-default .file-preview-frame:not(.file-preview-error) .file-footer-caption:hover{color:#000}.kv-upload-progress .progress{height:20px;margin:10px 0;overflow:hidden}.kv-upload-progress .progress-bar{height:20px;font-family:Verdana,Helvetica,sans-serif}.file-zoom-dialog .file-other-icon{font-size:22em;font-size:50vmin}.file-zoom-dialog .modal-dialog{width:auto}.file-zoom-dialog .modal-header{display:flex;align-items:center;justify-content:space-between}.file-zoom-dialog .btn-navigate{padding:0;margin:0;background:0 0;text-decoration:none;outline:0;opacity:.7;top:45%;font-size:4em;color:#1c94c4}.file-zoom-dialog .btn-navigate:not([disabled]):hover{outline:0;box-shadow:none;opacity:.6}.file-zoom-dialog .floating-buttons{top:5px;right:10px}.file-zoom-dialog .btn-navigate[disabled]{opacity:.3}.file-zoom-dialog .btn-prev{left:1px}.file-zoom-dialog .btn-next{right:1px}.file-zoom-dialog .kv-zoom-title{font-weight:300;color:#999;max-width:50%;overflow:hidden;white-space:nowrap;text-overflow:ellipsis}.file-input-ajax-new .no-browse .form-control,.file-input-new .no-browse .form-control{border-top-right-radius:4px;border-bottom-right-radius:4px}.file-caption-main{width:100%}.file-thumb-loading{background:url(../img/loading.gif) center center no-repeat content-box!important}.file-drop-zone{border:1px dashed #aaa;border-radius:4px;height:100%;text-align:center;vertical-align:middle;margin:12px 15px 12px 12px;padding:5px}.file-drop-zone.clickable:hover{border:2px dashed #999}.file-drop-zone.clickable:focus{border:2px solid #5acde2}.file-drop-zone .file-preview-thumbnails{cursor:default}.file-drop-zone-title{color:#aaa;font-size:1.6em;padding:85px 10px;cursor:default}.file-highlighted{border:2px dashed #999!important;background-color:#eee}.file-uploading{background:url(../img/loading-sm.gif) center bottom 10px no-repeat;opacity:.65}.file-zoom-fullscreen .modal-dialog{min-width:100%;margin:0}.file-zoom-fullscreen .modal-content{border-radius:0;box-shadow:none;min-height:100vh}.file-zoom-fullscreen .modal-body{overflow-y:auto}.floating-buttons{z-index:3000}.floating-buttons .btn-kv{margin-left:3px;z-index:3000}.file-zoom-content{height:480px;text-align:center}.file-zoom-content .file-preview-image,.file-zoom-content .file-preview-video{max-height:100%}.file-zoom-content>.file-object.type-image{height:auto;min-height:inherit}.file-zoom-content>.file-object.type-audio{width:auto;height:30px}@media (min-width:576px){.file-zoom-dialog .modal-dialog{max-width:500px}}@media (min-width:992px){.file-zoom-dialog .modal-lg{max-width:800px}}@media (max-width:767px){.file-preview-thumbnails{display:flex;justify-content:center;align-items:center;flex-direction:column}.file-zoom-dialog .modal-header{flex-direction:column}}@media (max-width:350px){.krajee-default.file-preview-frame:not([data-template=audio]) .kv-file-content{width:160px}}@media (max-width:420px){.krajee-default.file-preview-frame .kv-file-content.kv-pdf-rendered{width:100%}}.file-loading[dir=rtl]:before{background:url(../img/loading.gif) top right no-repeat;padding-left:0;padding-right:20px}.file-sortable .file-drag-handle{cursor:move;opacity:1}.file-sortable .file-drag-handle:hover{opacity:.7}.clickable .file-drop-zone-title{cursor:pointer}.kv-zoom-actions .btn-kv{margin-left:3px}.file-preview-initial.sortable-chosen{background-color:#d9edf7} -------------------------------------------------------------------------------- /mrcnn/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Base Configurations class. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import numpy as np 11 | 12 | 13 | # Base Configuration Class 14 | # Don't use this class directly. Instead, sub-class it and override 15 | # the configurations you need to change. 16 | 17 | class Config(object): 18 | """Base configuration class. For custom configurations, create a 19 | sub-class that inherits from this one and override properties 20 | that need to be changed. 21 | """ 22 | # Name the configurations. For example, 'COCO', 'Experiment 3', ...etc. 23 | # Useful if your code needs to do things differently depending on which 24 | # experiment is running. 25 | NAME = None # Override in sub-classes 26 | 27 | # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1. 28 | GPU_COUNT = 1 29 | 30 | # Number of images to train with on each GPU. A 12GB GPU can typically 31 | # handle 2 images of 1024x1024px. 32 | # Adjust based on your GPU memory and image sizes. Use the highest 33 | # number that your GPU can handle for best performance. 34 | IMAGES_PER_GPU = 2 35 | 36 | # Number of training steps per epoch 37 | # This doesn't need to match the size of the training set. Tensorboard 38 | # updates are saved at the end of each epoch, so setting this to a 39 | # smaller number means getting more frequent TensorBoard updates. 40 | # Validation stats are also calculated at each epoch end and they 41 | # might take a while, so don't set this too small to avoid spending 42 | # a lot of time on validation stats. 43 | STEPS_PER_EPOCH = 1000 44 | 45 | # Number of validation steps to run at the end of every training epoch. 46 | # A bigger number improves accuracy of validation stats, but slows 47 | # down the training. 48 | VALIDATION_STEPS = 50 49 | 50 | # Backbone network architecture 51 | # Supported values are: resnet50, resnet101. 52 | # You can also provide a callable that should have the signature 53 | # of model.resnet_graph. If you do so, you need to supply a callable 54 | # to COMPUTE_BACKBONE_SHAPE as well 55 | BACKBONE = "resnet101" 56 | 57 | # Only useful if you supply a callable to BACKBONE. Should compute 58 | # the shape of each layer of the FPN Pyramid. 59 | # See model.compute_backbone_shapes 60 | COMPUTE_BACKBONE_SHAPE = None 61 | 62 | # The strides of each layer of the FPN Pyramid. These values 63 | # are based on a Resnet101 backbone. 64 | BACKBONE_STRIDES = [4, 8, 16, 32, 64] 65 | 66 | # Size of the fully-connected layers in the classification graph 67 | FPN_CLASSIF_FC_LAYERS_SIZE = 1024 68 | 69 | # Size of the top-down layers used to build the feature pyramid 70 | TOP_DOWN_PYRAMID_SIZE = 256 71 | 72 | # Number of classification classes (including background) 73 | NUM_CLASSES = 1 # Override in sub-classes 74 | 75 | # Length of square anchor side in pixels 76 | RPN_ANCHOR_SCALES = (32, 64, 128, 256, 512) 77 | 78 | # Ratios of anchors at each cell (width/height) 79 | # A value of 1 represents a square anchor, and 0.5 is a wide anchor 80 | RPN_ANCHOR_RATIOS = [0.5, 1, 2] 81 | 82 | # Anchor stride 83 | # If 1 then anchors are created for each cell in the backbone feature map. 84 | # If 2, then anchors are created for every other cell, and so on. 85 | RPN_ANCHOR_STRIDE = 1 86 | 87 | # Non-max suppression threshold to filter RPN proposals. 88 | # You can increase this during training to generate more propsals. 89 | RPN_NMS_THRESHOLD = 0.7 90 | 91 | # How many anchors per image to use for RPN training 92 | RPN_TRAIN_ANCHORS_PER_IMAGE = 256 93 | 94 | # ROIs kept after tf.nn.top_k and before non-maximum suppression 95 | PRE_NMS_LIMIT = 6000 96 | 97 | # ROIs kept after non-maximum suppression (training and inference) 98 | POST_NMS_ROIS_TRAINING = 2000 99 | POST_NMS_ROIS_INFERENCE = 1000 100 | 101 | # If enabled, resizes instance masks to a smaller size to reduce 102 | # memory load. Recommended when using high-resolution images. 103 | USE_MINI_MASK = True 104 | MINI_MASK_SHAPE = (56, 56) # (height, width) of the mini-mask 105 | 106 | # Input image resizing 107 | # Generally, use the "square" resizing mode for training and predicting 108 | # and it should work well in most cases. In this mode, images are scaled 109 | # up such that the small side is = IMAGE_MIN_DIM, but ensuring that the 110 | # scaling doesn't make the long side > IMAGE_MAX_DIM. Then the image is 111 | # padded with zeros to make it a square so multiple images can be put 112 | # in one batch. 113 | # Available resizing modes: 114 | # none: No resizing or padding. Return the image unchanged. 115 | # square: Resize and pad with zeros to get a square image 116 | # of size [max_dim, max_dim]. 117 | # pad64: Pads width and height with zeros to make them multiples of 64. 118 | # If IMAGE_MIN_DIM or IMAGE_MIN_SCALE are not None, then it scales 119 | # up before padding. IMAGE_MAX_DIM is ignored in this mode. 120 | # The multiple of 64 is needed to ensure smooth scaling of feature 121 | # maps up and down the 6 levels of the FPN pyramid (2**6=64). 122 | # crop: Picks random crops from the image. First, scales the image based 123 | # on IMAGE_MIN_DIM and IMAGE_MIN_SCALE, then picks a random crop of 124 | # size IMAGE_MIN_DIM x IMAGE_MIN_DIM. Can be used in training only. 125 | # IMAGE_MAX_DIM is not used in this mode. 126 | IMAGE_RESIZE_MODE = "square" 127 | IMAGE_MIN_DIM = 800 128 | IMAGE_MAX_DIM = 1024 129 | # Minimum scaling ratio. Checked after MIN_IMAGE_DIM and can force further 130 | # up scaling. For example, if set to 2 then images are scaled up to double 131 | # the width and height, or more, even if MIN_IMAGE_DIM doesn't require it. 132 | # However, in 'square' mode, it can be overruled by IMAGE_MAX_DIM. 133 | IMAGE_MIN_SCALE = 0 134 | # Number of color channels per image. RGB = 3, grayscale = 1, RGB-D = 4 135 | # Changing this requires other changes in the code. See the WIKI for more 136 | # details: https://github.com/matterport/Mask_RCNN/wiki 137 | IMAGE_CHANNEL_COUNT = 3 138 | 139 | # Image mean (RGB) 140 | MEAN_PIXEL = np.array([123.7, 116.8, 103.9]) 141 | 142 | # Number of ROIs per image to feed to classifier/mask heads 143 | # The Mask RCNN paper uses 512 but often the RPN doesn't generate 144 | # enough positive proposals to fill this and keep a positive:negative 145 | # ratio of 1:3. You can increase the number of proposals by adjusting 146 | # the RPN NMS threshold. 147 | TRAIN_ROIS_PER_IMAGE = 200 148 | 149 | # Percent of positive ROIs used to train classifier/mask heads 150 | ROI_POSITIVE_RATIO = 0.33 151 | 152 | # Pooled ROIs 153 | POOL_SIZE = 7 154 | MASK_POOL_SIZE = 14 155 | 156 | # Shape of output mask 157 | # To change this you also need to change the neural network mask branch 158 | MASK_SHAPE = [28, 28] 159 | 160 | # Maximum number of ground truth instances to use in one image 161 | MAX_GT_INSTANCES = 100 162 | 163 | # Bounding box refinement standard deviation for RPN and final detections. 164 | RPN_BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 165 | BBOX_STD_DEV = np.array([0.1, 0.1, 0.2, 0.2]) 166 | 167 | # Max number of final detections 168 | DETECTION_MAX_INSTANCES = 100 169 | 170 | # Minimum probability value to accept a detected instance 171 | # ROIs below this threshold are skipped 172 | DETECTION_MIN_CONFIDENCE = 0.7 173 | 174 | # Non-maximum suppression threshold for detection 175 | DETECTION_NMS_THRESHOLD = 0.3 176 | 177 | # Learning rate and momentum 178 | # The Mask RCNN paper uses lr=0.02, but on TensorFlow it causes 179 | # weights to explode. Likely due to differences in optimizer 180 | # implementation. 181 | LEARNING_RATE = 0.001 182 | LEARNING_MOMENTUM = 0.9 183 | 184 | # Weight decay regularization 185 | WEIGHT_DECAY = 0.0001 186 | 187 | # Loss weights for more precise optimization. 188 | # Can be used for R-CNN training setup. 189 | LOSS_WEIGHTS = { 190 | "rpn_class_loss": 1., 191 | "rpn_bbox_loss": 1., 192 | "mrcnn_class_loss": 1., 193 | "mrcnn_bbox_loss": 1., 194 | "mrcnn_mask_loss": 1. 195 | } 196 | 197 | # Use RPN ROIs or externally generated ROIs for training 198 | # Keep this True for most situations. Set to False if you want to train 199 | # the head branches on ROI generated by code rather than the ROIs from 200 | # the RPN. For example, to debug the classifier head without having to 201 | # train the RPN. 202 | USE_RPN_ROIS = True 203 | 204 | # Train or freeze batch normalization layers 205 | # None: Train BN layers. This is the normal mode 206 | # False: Freeze BN layers. Good when using a small batch size 207 | # True: (don't use). Set layer in training mode even when predicting 208 | TRAIN_BN = False # Defaulting to False since batch size is often small 209 | 210 | # Gradient norm clipping 211 | GRADIENT_CLIP_NORM = 5.0 212 | 213 | def __init__(self): 214 | """Set values of computed attributes.""" 215 | # Effective batch size 216 | self.BATCH_SIZE = self.IMAGES_PER_GPU * self.GPU_COUNT 217 | 218 | # Input image size 219 | if self.IMAGE_RESIZE_MODE == "crop": 220 | self.IMAGE_SHAPE = np.array([self.IMAGE_MIN_DIM, self.IMAGE_MIN_DIM, 221 | self.IMAGE_CHANNEL_COUNT]) 222 | else: 223 | self.IMAGE_SHAPE = np.array([self.IMAGE_MAX_DIM, self.IMAGE_MAX_DIM, 224 | self.IMAGE_CHANNEL_COUNT]) 225 | 226 | # Image meta data length 227 | # See compose_image_meta() for details 228 | self.IMAGE_META_SIZE = 1 + 3 + 3 + 4 + 1 + self.NUM_CLASSES 229 | 230 | def display(self): 231 | """Display Configuration values.""" 232 | print("\nConfigurations:") 233 | for a in dir(self): 234 | if not a.startswith("__") and not callable(getattr(self, a)): 235 | print("{:30} {}".format(a, getattr(self, a))) 236 | print("\n") 237 | -------------------------------------------------------------------------------- /stylize.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import tensorflow as tf 4 | import numpy as np 5 | import scipy.misc 6 | from sklearn.cluster import KMeans 7 | 8 | import Model 9 | import Parser 10 | 11 | 12 | ''' 13 | read & write & init 14 | ''' 15 | def read_image(path, hard_width): # read and preprocess 16 | img = scipy.misc.imread(path) 17 | if hard_width: 18 | img = scipy.misc.imresize(img, float(hard_width) / img.shape[1]) 19 | img = img.astype(np.float32) 20 | img = img[np.newaxis, :, :, :] 21 | img = img - [123.68, 116.779, 103.939] 22 | return img 23 | 24 | def read_single_mask(path, hard_width): 25 | rawmask = scipy.misc.imread(path) 26 | if hard_width: 27 | rawmask = scipy.misc.imresize(rawmask, float(hard_width) / rawmask.shape[1], interp='nearest') 28 | rawmask = rawmask / 255 # integer division, only pure white pixels become 1 29 | rawmask = rawmask.astype(np.float32) 30 | single = (rawmask.transpose([2, 0, 1]))[0] 31 | return np.stack([single]) 32 | 33 | # colorful, run K-Means to get rid of possible intermediate colors 34 | def read_colorful_mask(target_path, style_path, hard_width, n_colors): 35 | if target_path is None or style_path is None: 36 | raise AttributeError("mask path can't be empty when n_colors > 1 ") 37 | 38 | target_mask = scipy.misc.imread(target_path) 39 | style_mask = scipy.misc.imread(style_path) 40 | if hard_width: # use 'nearest' to avoid more intermediate colors 41 | target_mask = scipy.misc.imresize(target_mask, float(hard_width) / target_mask.shape[1], 42 | interp='nearest') 43 | style_mask = scipy.misc.imresize(style_mask, float(hard_width) / style_mask.shape[1], 44 | interp='nearest') 45 | 46 | # flatten 47 | target_shape = target_mask.shape[0:2] 48 | target_mask = target_mask.reshape([target_shape[0]*target_shape[1], -1]) 49 | style_shape = style_mask.shape[0:2] 50 | style_mask = style_mask.reshape([style_shape[0]*style_shape[1], -1]) 51 | 52 | # cluster 53 | kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(style_mask) 54 | 55 | # predict 56 | target_labels = kmeans.predict(target_mask.astype(np.float32)) 57 | target_labels = target_labels.reshape([target_shape[0], target_shape[1]]) 58 | style_labels = kmeans.predict(style_mask.astype(np.float32)) 59 | style_labels = style_labels.reshape([style_shape[0], style_shape[1]]) 60 | 61 | # stack 62 | target_masks = [] 63 | style_masks = [] 64 | for i in range(n_colors): 65 | target_masks.append( (target_labels == i).astype(np.float32) ) 66 | style_masks.append( (style_labels == i).astype(np.float32) ) 67 | return np.stack(target_masks), np.stack(style_masks) 68 | 69 | def write_image(path, img): # postprocess and write 70 | img = img + [123.68, 116.779, 103.939] 71 | img = img[0] 72 | img = np.clip(img, 0, 255).astype('uint8') 73 | scipy.misc.imsave(path, img) 74 | 75 | def get_init_image(content_img, init_noise_ratio): 76 | # why [-20, 20]??? 77 | noise_img = np.random.uniform(-20., 20., content_img.shape).astype(np.float32) 78 | init_img = init_noise_ratio * noise_img + (1. - init_noise_ratio) * content_img 79 | return init_img 80 | 81 | 82 | ''' 83 | compute features & masks 84 | build net 85 | ''' 86 | def compute_features(vgg_weights, pooling_type, input_img, layers): 87 | input = tf.placeholder(tf.float32, shape=input_img.shape) 88 | net = Model.build_image_net(input, vgg_weights, pooling_type) 89 | features = {} 90 | with tf.Session() as sess: 91 | for layer in layers: 92 | features[layer] = sess.run(net[layer], feed_dict={input: input_img}) 93 | return features 94 | 95 | def compute_layer_masks(masks, layers, ds_type): 96 | masks_tf = masks.transpose([1,2,0]) # [numberOfMasks, h, w] -> [h, w, masks] 97 | masks_tf = masks_tf[np.newaxis, :, :, :] # -> [1, h, w, masks] 98 | 99 | input = tf.placeholder(tf.float32, shape=masks_tf.shape) 100 | net = Model.build_mask_net(input, ds_type) # only do pooling, so no intervention between masks 101 | layer_masks = {} 102 | with tf.Session() as sess: 103 | for layer in layers: 104 | out = sess.run(net[layer], feed_dict={input: masks_tf}) 105 | layer_masks[layer] = out[0].transpose([2,0,1]) 106 | return layer_masks 107 | 108 | def build_target_net(vgg_weights, pooling_type, target_shape): 109 | input = tf.Variable( np.zeros(target_shape).astype('float32') ) 110 | net = Model.build_image_net(input, vgg_weights, pooling_type) 111 | net['input'] = input 112 | return net 113 | 114 | 115 | ''' 116 | loss 117 | ''' 118 | def content_layer_loss(p, x, loss_norm): 119 | _, h, w, d = p.shape 120 | M = h * w 121 | N = d 122 | if loss_norm == 1: 123 | K = 1. / (N * M) 124 | elif loss_norm == 2: 125 | K = 1. / (2. * N**0.5 * M**0.5) 126 | loss = K * tf.reduce_sum( tf.pow((x - p), 2) ) 127 | return loss 128 | 129 | def sum_content_loss(target_net, content_features, layers, layers_weights, loss_norm): 130 | content_loss = 0. 131 | for layer, weight in zip(layers, layers_weights): 132 | p = content_features[layer] 133 | x = target_net[layer] 134 | content_loss += content_layer_loss(p, x, loss_norm) * weight 135 | content_loss /= float(sum(layers_weights)) 136 | return content_loss 137 | 138 | def masked_gram(x, mx, mask_norm, N): 139 | R = mx.shape[0] 140 | M = mx.shape[1] * mx.shape[2] 141 | 142 | # TODO: use local variable? 143 | mx = mx.reshape([R, M]) 144 | x = tf.reshape(x, [M, N]) 145 | x = tf.transpose(x) # N * M 146 | masked_gram = [] 147 | for i in range(R): 148 | mask = mx[i] 149 | masked_x = x * mask 150 | if mask_norm == 'square_sum': 151 | K = 1. / np.sum(mask**2) 152 | elif mask_norm == 'sum': 153 | K = 1. / np.sum(mask) 154 | gram = K * tf.matmul(masked_x, tf.transpose(masked_x)) 155 | masked_gram.append(gram) 156 | return tf.stack(masked_gram) 157 | 158 | def masked_style_layer_loss(a, ma, x, mx, mask_norm): 159 | N = a.shape[3] 160 | R = ma.shape[0] 161 | K = 1. / (4. * N**2 * R) 162 | A = masked_gram(a, ma, mask_norm, N) 163 | G = masked_gram(x, mx, mask_norm, N) 164 | loss = K * tf.reduce_sum( tf.pow((G - A), 2) ) 165 | return loss 166 | 167 | def sum_masked_style_loss(target_net, style_features, target_masks, style_masks, layers, layers_weights, mask_norm): 168 | style_loss = 0. 169 | for layer, weight in zip(layers, layers_weights): 170 | a = style_features[layer] 171 | ma = style_masks[layer] 172 | x = target_net[layer] 173 | mx = target_masks[layer] 174 | style_loss += masked_style_layer_loss(a, ma, x, mx, mask_norm) * weight 175 | style_loss /= float(sum(layers_weights)) 176 | return style_loss 177 | 178 | def gram_matrix(x): 179 | _, h, w, d = x.get_shape() # x is a tensor 180 | M = h.value * w.value 181 | N = d.value 182 | F = tf.reshape(x, (M, N)) 183 | G = tf.matmul(tf.transpose(F), F) 184 | return (1./M) * G 185 | 186 | def style_layer_loss(a, x): 187 | N = a.shape[3] 188 | A = gram_matrix(tf.convert_to_tensor(a)) 189 | G = gram_matrix(x) 190 | loss = (1./(4 * N**2 )) * tf.reduce_sum(tf.pow((G - A), 2)) 191 | return loss 192 | 193 | def sum_style_loss(target_net, style_features, layers, layers_weights): # for testing 194 | style_loss = 0. 195 | for layer, weight in zip(layers, layers_weights): 196 | a = style_features[layer] 197 | x = target_net[layer] 198 | style_loss += style_layer_loss(a, x) * weight 199 | style_loss /= float(sum(layers_weights)) 200 | return style_loss 201 | 202 | def sum_total_variation_loss(input, shape): 203 | b, h, w, d = shape 204 | x = input 205 | tv_y_size = b * (h-1) * w * d 206 | tv_x_size = b * h * (w-1) * d 207 | loss_y = tf.nn.l2_loss(x[:,1:,:,:] - x[:,:-1,:,:]) # l2_loss() use 1/2 factor 208 | loss_y /= tv_y_size 209 | loss_x = tf.nn.l2_loss(x[:,:,1:,:] - x[:,:,:-1,:]) 210 | loss_x /= tv_x_size 211 | loss = 2 * (loss_y + loss_x) 212 | loss = tf.cast(loss, tf.float32) # ? 213 | return loss 214 | 215 | 216 | ''' 217 | main 218 | ''' 219 | def main(args): 220 | 221 | ''' 222 | init 223 | ''' 224 | # read images and preprocess 225 | if args.content_img: 226 | content_img = read_image(args.content_img, args.hard_width) 227 | style_img = read_image(args.style_img, args.hard_width) 228 | 229 | # get stacked 0./1. masks 230 | if args.mask_n_colors > 1: # colorful 231 | target_masks_origin, style_masks_origin = read_colorful_mask(args.target_mask, args.style_mask, 232 | args.hard_width, args.mask_n_colors) 233 | 234 | else: # single mask 235 | if args.target_mask is None: 236 | if args.content_img: 237 | target_masks_origin = np.ones(content_img.shape[0:3]).astype(np.float32) 238 | else: 239 | target_masks_origin = np.ones(style_img.shape[0:3]).astype(np.float32) 240 | else: 241 | target_masks_origin = read_single_mask(args.target_mask, args.hard_width) 242 | 243 | if args.style_mask is None: 244 | style_masks_origin = np.ones(style_img.shape[0:3]).astype(np.float32) 245 | else: 246 | style_masks_origin = read_single_mask(args.style_mask, args.hard_width) 247 | 248 | # init img & target shape 249 | if args.content_img: 250 | target_shape = content_img.shape 251 | init_img = get_init_image(content_img, args.init_noise_ratio) 252 | else: 253 | target_shape = [1] + list(target_masks_origin.shape[1:3]) + [3] 254 | init_img = np.random.uniform(-20., 20., target_shape).astype(np.float32) 255 | 256 | # check shape & number of masks 257 | if args.content_img and content_img.shape[1:3] != target_masks_origin.shape[1:3]: 258 | print('content and mask have different shape') 259 | sys.exit(0) 260 | if style_img.shape[1:3] != style_masks_origin.shape[1:3]: 261 | print('style and mask have different shape') 262 | sys.exit(0) 263 | if target_masks_origin.shape[0] != style_masks_origin.shape[0]: 264 | print('content and style have different masks') 265 | sys.exit(0) 266 | 267 | ''' 268 | compute features & build net 269 | ''' 270 | # prepare model weights 271 | vgg_weights = Model.prepare_model(args.model_path) 272 | 273 | # feature maps of specific layers 274 | if args.content_img: 275 | content_features = compute_features(vgg_weights, args.feature_pooling_type, 276 | content_img, args.content_layers) 277 | style_features = compute_features(vgg_weights, args.feature_pooling_type, 278 | style_img, args.style_layers) 279 | 280 | # masks of specific layers 281 | target_masks = compute_layer_masks(target_masks_origin, args.style_layers, 282 | args.mask_downsample_type) 283 | style_masks = compute_layer_masks(style_masks_origin, args.style_layers, 284 | args.mask_downsample_type) 285 | 286 | # build net 287 | target_net = build_target_net(vgg_weights, args.feature_pooling_type, target_shape) 288 | 289 | 290 | ''' 291 | loss 292 | ''' 293 | if args.content_img: 294 | content_loss = sum_content_loss(target_net, content_features, 295 | args.content_layers, args.content_layers_weights, 296 | args.content_loss_normalization) 297 | else: 298 | content_loss = 0. 299 | 300 | style_masked_loss = sum_masked_style_loss(target_net, style_features, 301 | target_masks, style_masks, 302 | args.style_layers, args.style_layers_weights, 303 | args.mask_normalization_type) 304 | 305 | if args.tv_weight != 0.: 306 | tv_loss = sum_total_variation_loss(target_net['input'], target_shape) 307 | else: 308 | tv_loss = 0. 309 | 310 | total_loss = args.content_weight * content_loss + \ 311 | args.style_weight * style_masked_loss + \ 312 | args.tv_weight * tv_loss 313 | 314 | 315 | ''' 316 | train 317 | ''' 318 | if not os.path.exists(args.output_dir): 319 | os.mkdir(args.output_dir) 320 | 321 | if args.optimizer == 'adam': 322 | optimizer = tf.train.AdamOptimizer(args.learning_rate) 323 | train_op = optimizer.minimize(total_loss) 324 | #init 325 | init_op = tf.global_variables_initializer() # must! Adam has some varibales to init 326 | sess = tf.Session() 327 | sess.run(init_op) 328 | sess.run( target_net['input'].assign(init_img) ) 329 | #train 330 | for i in range(args.iteration): 331 | sess.run(train_op) 332 | if i % args.log_iteration == 0: 333 | print('Iteration %d: loss = %f' % (i+1, sess.run(total_loss))) 334 | result = sess.run(target_net['input']) 335 | output_path = os.path.join(args.output_dir, 'result_%s.png' % (str(i).zfill(4))) 336 | write_image(output_path, result) 337 | 338 | elif args.optimizer == 'lbfgs': 339 | optimizer = tf.contrib.opt.ScipyOptimizerInterface( 340 | total_loss, method='L-BFGS-B', 341 | options={'maxiter': args.iteration, 342 | 'disp': args.log_iteration}) 343 | # init 344 | init_op = tf.global_variables_initializer() 345 | sess = tf.Session() 346 | sess.run(init_op) 347 | sess.run( target_net['input'].assign(init_img) ) 348 | # train 349 | optimizer.minimize(sess) 350 | 351 | 352 | ''' 353 | out 354 | ''' 355 | print('Iteration %d: loss = %f' % (args.iteration, sess.run(total_loss))) 356 | result = sess.run(target_net['input']) 357 | output_path = os.path.join(args.output_dir,'result_final.png') 358 | write_image(output_path, result) 359 | 360 | 361 | if __name__ == '__main__': 362 | args = Parser.parse_args() 363 | main(args) 364 | 365 | 366 | 367 | 368 | 369 | -------------------------------------------------------------------------------- /second_segment.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import sys 4 | import json 5 | import datetime 6 | import numpy as np 7 | import skimage.draw 8 | 9 | # Root directory of the project 10 | ROOT_DIR = os.path.abspath(r"/home/danish/Project/mix") 11 | 12 | # Import Mask RCNN 13 | sys.path.append(ROOT_DIR) # To find local version of the library 14 | from mrcnn.config import Config 15 | from mrcnn import model as modellib, utils 16 | 17 | # Path to trained weights file 18 | COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "weights.h5") 19 | 20 | # Directory to save logs and model checkpoints, if not provided 21 | # through the command line argument --logs 22 | DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") 23 | 24 | ############################################################ 25 | # Configurations 26 | ############################################################ 27 | 28 | 29 | class ShirtConfig(Config): 30 | """Configuration for training on the toy dataset. 31 | Derives from the base Config class and overrides some values. 32 | """ 33 | # Give the configuration a recognizable name 34 | NAME = "shirt" 35 | 36 | # We use a GPU with 12GB memory, which can fit two images. 37 | # Adjust down if you use a smaller GPU. 38 | IMAGES_PER_GPU = 1 39 | 40 | # Number of classes (including background) 41 | NUM_CLASSES = 1 + 3 # Background + shirt 42 | 43 | # Number of training steps per epoch 44 | STEPS_PER_EPOCH = 100 45 | 46 | # Skip detections with < 90% confidence 47 | DETECTION_MIN_CONFIDENCE = 0.9 48 | 49 | 50 | ############################################################ 51 | # Dataset 52 | ############################################################ 53 | 54 | class ShirtDataset(utils.Dataset): 55 | 56 | def load_shirt(self, dataset_dir, subset): 57 | """Load a subset of the Shirt dataset. 58 | dataset_dir: Root directory of the dataset. 59 | subset: Subset to load: train or val 60 | """ 61 | # Add classes. We have only one class to add. 62 | self.add_class("shirt", 1, "shirt") 63 | self.add_class("shirt",2,"tshirt") 64 | self.add_class("shirt",3,"folded_shirt") 65 | 66 | # Train or validation dataset? 67 | assert subset in ["train", "val"] 68 | dataset_dir = os.path.join(dataset_dir, subset) 69 | 70 | # Load annotations 71 | # VGG Image Annotator (up to version 1.6) saves each image in the form: 72 | # { 'filename': '28503151_5b5b7ec140_b.jpg', 73 | # 'regions': { 74 | # '0': { 75 | # 'region_attributes': {}, 76 | # 'shape_attributes': { 77 | # 'all_points_x': [...], 78 | # 'all_points_y': [...], 79 | # 'name': 'polygon'}}, 80 | # ... more regions ... 81 | # }, 82 | # 'size': 100202 83 | # } 84 | # We mostly care about the x and y coordinates of each region 85 | # Note: In VIA 2.0, regions was changed from a dict to a list. 86 | annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json"))) 87 | annotations = list(annotations.values()) # don't need the dict keys 88 | 89 | # The VIA tool saves images in the JSON even if they don't have any 90 | # annotations. Skip unannotated images. 91 | annotations = [a for a in annotations if a['regions']] 92 | 93 | # Add images 94 | for a in annotations: 95 | # Get the x, y coordinaets of points of the polygons that make up 96 | # the outline of each object instance. These are stores in the 97 | # shape_attributes (see json format above) 98 | # The if condition is needed to support VIA versions 1.x and 2.x. 99 | if type(a['regions']) is dict: 100 | polygons = [r['shape_attributes'] for r in a['regions'].values()] 101 | else: 102 | polygons = [r['shape_attributes'] for r in a['regions']] 103 | 104 | # load_mask() needs the image size to convert polygons to masks. 105 | # Unfortunately, VIA doesn't include it in JSON, so we must read 106 | # the image. This is only managable since the dataset is tiny. 107 | image_path = os.path.join(dataset_dir, a['filename']) 108 | image = skimage.io.imread(image_path) 109 | height, width = image.shape[:2] 110 | 111 | self.add_image( 112 | "shirt", 113 | image_id=a['filename'], # use file name as a unique image id 114 | path=image_path, 115 | width=width, height=height, 116 | polygons=polygons) 117 | 118 | def load_mask(self, image_id): 119 | """Generate instance masks for an image. 120 | Returns: 121 | masks: A bool array of shape [height, width, instance count] with 122 | one mask per instance. 123 | class_ids: a 1D array of class IDs of the instance masks. 124 | """ 125 | # If not a shirt dataset image, delegate to parent class. 126 | image_info = self.image_info[image_id] 127 | if image_info["source"] != "shirt": 128 | return super(self.__class__, self).load_mask(image_id) 129 | 130 | # Convert polygons to a bitmap mask of shape 131 | # [height, width, instance_count] 132 | info = self.image_info[image_id] 133 | mask = np.zeros([info["height"], info["width"], len(info["polygons"])], 134 | dtype=np.uint8) 135 | for i, p in enumerate(info["polygons"]): 136 | # Get indexes of pixels inside the polygon and set them to 1 137 | rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x']) 138 | mask[rr, cc, i] = 1 139 | 140 | # Return mask, and array of class IDs of each instance. Since we have 141 | # one class ID only, we return an array of 1s 142 | return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) 143 | 144 | def image_reference(self, image_id): 145 | """Return the path of the image.""" 146 | info = self.image_info[image_id] 147 | if info["source"] == "shirt": 148 | return info["path"] 149 | else: 150 | super(self.__class__, self).image_reference(image_id) 151 | 152 | 153 | def train(model): 154 | """Train the model.""" 155 | # Training dataset. 156 | dataset_train = ShirtDataset() 157 | dataset_train.load_shirt(args.dataset, "train") 158 | dataset_train.prepare() 159 | 160 | # Validation dataset 161 | dataset_val = ShirtDataset() 162 | dataset_val.load_shirt(args.dataset, "val") 163 | dataset_val.prepare() 164 | 165 | # *** This training schedule is an example. Update to your needs *** 166 | # Since we're using a very small dataset, and starting from 167 | # COCO trained weights, we don't need to train too long. Also, 168 | # no need to train all layers, just the heads should do it. 169 | print("Training network heads") 170 | model.train(dataset_train, dataset_val, 171 | learning_rate=config.LEARNING_RATE, 172 | epochs=5, 173 | layers='heads') 174 | 175 | 176 | def color_splash(image, mask): 177 | """Apply color splash effect. 178 | image: RGB image [height, width, 3] 179 | mask: instance segmentation mask [height, width, instance count] 180 | 181 | Returns result image. 182 | """ 183 | # Make a grayscale copy of the image. The grayscale copy still 184 | # has 3 RGB channels, though. 185 | gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 0 186 | # Copy color pixels from the original color image where mask is set 187 | if mask.shape[-1] > 0: 188 | # We're treating all instances as one, so collapse the mask into one layer 189 | mask = (np.sum(mask, -1, keepdims=True) >= 1) 190 | # splash = np.where(mask, image, gray).astype(np.uint8) 191 | splash = np.where(mask,[255,255,255],gray).astype(np.uint8) 192 | else: 193 | splash = gray.astype(np.uint8) 194 | return splash 195 | 196 | 197 | def detect_and_color_splash(model, image_path=None, video_path=None): 198 | assert image_path or video_path 199 | 200 | # Image or video? 201 | if image_path: 202 | # Run model detection and generate the color splash effect 203 | print("Running on {}".format(args.image)) 204 | # Read image 205 | image = skimage.io.imread(args.image) 206 | # Detect objects 207 | r = model.detect([image], verbose=1)[0] 208 | # Color splash 209 | splash = color_splash(image, r['masks']) 210 | # Check if the filenames are already there 211 | # exist = os.path.isfile('image_path/') 212 | # Save output 213 | file_name = r"/home/danish/Project/mix/static/second_segmented.png" 214 | # locations = "" 215 | skimage.io.imsave(file_name, splash) 216 | elif video_path: 217 | import cv2 218 | # Video capture 219 | vcapture = cv2.VideoCapture(video_path) 220 | width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH)) 221 | height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) 222 | fps = vcapture.get(cv2.CAP_PROP_FPS) 223 | 224 | # Define codec and create video writer 225 | file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now()) 226 | vwriter = cv2.VideoWriter(file_name, 227 | cv2.VideoWriter_fourcc(*'MJPG'), 228 | fps, (width, height)) 229 | 230 | count = 0 231 | success = True 232 | while success: 233 | print("frame: ", count) 234 | # Read next image 235 | success, image = vcapture.read() 236 | if success: 237 | # OpenCV returns images as BGR, convert to RGB 238 | image = image[..., ::-1] 239 | # Detect objects 240 | r = model.detect([image], verbose=0)[0] 241 | # Color splash 242 | splash = color_splash(image, r['masks']) 243 | # RGB -> BGR to save image to video 244 | splash = splash[..., ::-1] 245 | # Add image to video writer 246 | vwriter.write(splash) 247 | count += 1 248 | vwriter.release() 249 | print("Saved to ", file_name) 250 | 251 | 252 | ############################################################ 253 | # Training 254 | ############################################################ 255 | 256 | if __name__ == '__main__': 257 | import argparse 258 | 259 | # Parse command line arguments 260 | parser = argparse.ArgumentParser( 261 | description='Train Mask R-CNN to detect shirts.') 262 | parser.add_argument("command", 263 | metavar="", 264 | help="'train' or 'splash'") 265 | parser.add_argument('--dataset', required=False, 266 | metavar="/dataset/", 267 | help='Directory of the Shirt dataset') 268 | parser.add_argument('--weights', required=True, 269 | metavar="/weights.h5", 270 | help="Path to weights .h5 file or 'coco'") 271 | parser.add_argument('--logs', required=False, 272 | default=DEFAULT_LOGS_DIR, 273 | metavar="/path/to/logs/", 274 | help='Logs and checkpoints directory (default=logs/)') 275 | parser.add_argument('--image', required=False, 276 | metavar="/", 277 | help='Image to apply the color splash effect on') 278 | parser.add_argument('--video', required=False, 279 | metavar="path or URL to video", 280 | help='Video to apply the color splash effect on') 281 | args = parser.parse_args() 282 | 283 | # Validate arguments 284 | if args.command == "train": 285 | assert args.dataset, "Argument --dataset is required for training" 286 | elif args.command == "splash": 287 | assert args.image or args.video,\ 288 | "Provide --image or --video to apply color splash" 289 | 290 | print("Weights: ", args.weights) 291 | print("Dataset: ", args.dataset) 292 | print("Logs: ", args.logs) 293 | 294 | # Configurations 295 | if args.command == "train": 296 | config = ShirtConfig() 297 | else: 298 | class InferenceConfig(ShirtConfig): 299 | # Set batch size to 1 since we'll be running inference on 300 | # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU 301 | GPU_COUNT = 1 302 | IMAGES_PER_GPU = 1 303 | config = InferenceConfig() 304 | config.display() 305 | 306 | # Create model 307 | if args.command == "train": 308 | model = modellib.MaskRCNN(mode="training", config=config, 309 | model_dir=args.logs) 310 | else: 311 | model = modellib.MaskRCNN(mode="inference", config=config, 312 | model_dir=args.logs) 313 | 314 | # Select weights file to load 315 | if args.weights.lower() == "coco": 316 | weights_path = COCO_WEIGHTS_PATH 317 | # Download weights file 318 | if not os.path.exists(weights_path): 319 | utils.download_trained_weights(weights_path) 320 | elif args.weights.lower() == "last": 321 | # Find last trained weights 322 | weights_path = model.find_last() 323 | elif args.weights.lower() == "imagenet": 324 | # Start from ImageNet trained weights 325 | weights_path = model.get_imagenet_weights() 326 | else: 327 | weights_path = args.weights 328 | 329 | # Load weights 330 | print("Loading weights ", weights_path) 331 | if args.weights.lower() == "coco": 332 | # Exclude the last layers because they require a matching 333 | # number of classes 334 | model.load_weights(weights_path, by_name=True, exclude=[ 335 | "mrcnn_class_logits", "mrcnn_bbox_fc", 336 | "mrcnn_bbox", "mrcnn_mask"]) 337 | else: 338 | model.load_weights(weights_path, by_name=True) 339 | 340 | # Train or evaluate 341 | if args.command == "train": 342 | train(model) 343 | elif args.command == "splash": 344 | detect_and_color_splash(model, image_path=args.image, 345 | video_path=args.video) 346 | else: 347 | print("'{}' is not recognized. " 348 | "Use 'train' or 'splash'".format(args.command)) 349 | -------------------------------------------------------------------------------- /first_segment.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import datetime 5 | import numpy as np 6 | import skimage.draw 7 | 8 | # Root directory of the project 9 | ROOT_DIR = os.path.abspath(r"/home/danish/Project/mix") 10 | 11 | # Import Mask RCNN 12 | sys.path.append(ROOT_DIR) # To find local version of the library 13 | from mrcnn.config import Config 14 | from mrcnn import model as modellib, utils 15 | 16 | # Path to trained weights file 17 | COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "weights.h5") 18 | 19 | # Directory to save logs and model checkpoints, if not provided 20 | # through the command line argument --logs 21 | DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs") 22 | 23 | ############################################################ 24 | # Configurations 25 | ############################################################ 26 | 27 | 28 | class ShirtConfig(Config): 29 | """Configuration for training on the toy dataset. 30 | Derives from the base Config class and overrides some values. 31 | """ 32 | # Give the configuration a recognizable name 33 | NAME = "shirt" 34 | 35 | # We use a GPU with 12GB memory, which can fit two images. 36 | # Adjust down if you use a smaller GPU. 37 | IMAGES_PER_GPU = 1 38 | 39 | # Number of classes (including background) 40 | NUM_CLASSES = 1 + 3 # Background + shirt 41 | 42 | # Number of training steps per epoch 43 | STEPS_PER_EPOCH = 100 44 | 45 | # Skip detections with < 90% confidence 46 | DETECTION_MIN_CONFIDENCE = 0.9 47 | 48 | 49 | ############################################################ 50 | # Dataset 51 | ############################################################ 52 | 53 | class ShirtDataset(utils.Dataset): 54 | 55 | def load_shirt(self, dataset_dir, subset): 56 | """Load a subset of the Shirt dataset. 57 | dataset_dir: Root directory of the dataset. 58 | subset: Subset to load: train or val 59 | """ 60 | # Add classes. We have only one class to add. 61 | self.add_class("shirt", 1, "shirt") 62 | self.add_class("shirt",2,"tshirt") 63 | self.add_class("shirt",3,"folded_shirt") 64 | 65 | # Train or validation dataset? 66 | assert subset in ["train", "val"] 67 | dataset_dir = os.path.join(dataset_dir, subset) 68 | 69 | # Load annotations 70 | # VGG Image Annotator (up to version 1.6) saves each image in the form: 71 | # { 'filename': '28503151_5b5b7ec140_b.jpg', 72 | # 'regions': { 73 | # '0': { 74 | # 'region_attributes': {}, 75 | # 'shape_attributes': { 76 | # 'all_points_x': [...], 77 | # 'all_points_y': [...], 78 | # 'name': 'polygon'}}, 79 | # ... more regions ... 80 | # }, 81 | # 'size': 100202 82 | # } 83 | # We mostly care about the x and y coordinates of each region 84 | # Note: In VIA 2.0, regions was changed from a dict to a list. 85 | annotations = json.load(open(os.path.join(dataset_dir, "via_region_data.json"))) 86 | annotations = list(annotations.values()) # don't need the dict keys 87 | 88 | # The VIA tool saves images in the JSON even if they don't have any 89 | # annotations. Skip unannotated images. 90 | annotations = [a for a in annotations if a['regions']] 91 | 92 | # Add images 93 | for a in annotations: 94 | # Get the x, y coordinaets of points of the polygons that make up 95 | # the outline of each object instance. These are stores in the 96 | # shape_attributes (see json format above) 97 | # The if condition is needed to support VIA versions 1.x and 2.x. 98 | if type(a['regions']) is dict: 99 | polygons = [r['shape_attributes'] for r in a['regions'].values()] 100 | else: 101 | polygons = [r['shape_attributes'] for r in a['regions']] 102 | 103 | # load_mask() needs the image size to convert polygons to masks. 104 | # Unfortunately, VIA doesn't include it in JSON, so we must read 105 | # the image. This is only managable since the dataset is tiny. 106 | image_path = os.path.join(dataset_dir, a['filename']) 107 | image = skimage.io.imread(image_path) 108 | height, width = image.shape[:2] 109 | 110 | self.add_image( 111 | "shirt", 112 | image_id=a['filename'], # use file name as a unique image id 113 | path=image_path, 114 | width=width, height=height, 115 | polygons=polygons) 116 | 117 | def load_mask(self, image_id): 118 | """Generate instance masks for an image. 119 | Returns: 120 | masks: A bool array of shape [height, width, instance count] with 121 | one mask per instance. 122 | class_ids: a 1D array of class IDs of the instance masks. 123 | """ 124 | # If not a shirt dataset image, delegate to parent class. 125 | image_info = self.image_info[image_id] 126 | if image_info["source"] != "shirt": 127 | return super(self.__class__, self).load_mask(image_id) 128 | 129 | # Convert polygons to a bitmap mask of shape 130 | # [height, width, instance_count] 131 | info = self.image_info[image_id] 132 | mask = np.zeros([info["height"], info["width"], len(info["polygons"])], 133 | dtype=np.uint8) 134 | for i, p in enumerate(info["polygons"]): 135 | # Get indexes of pixels inside the polygon and set them to 1 136 | rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x']) 137 | mask[rr, cc, i] = 1 138 | 139 | # Return mask, and array of class IDs of each instance. Since we have 140 | # one class ID only, we return an array of 1s 141 | return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32) 142 | 143 | def image_reference(self, image_id): 144 | """Return the path of the image.""" 145 | info = self.image_info[image_id] 146 | if info["source"] == "shirt": 147 | return info["path"] 148 | else: 149 | super(self.__class__, self).image_reference(image_id) 150 | 151 | 152 | def train(model): 153 | """Train the model.""" 154 | # Training dataset. 155 | dataset_train = ShirtDataset() 156 | dataset_train.load_shirt(args.dataset, "train") 157 | dataset_train.prepare() 158 | 159 | # Validation dataset 160 | dataset_val = ShirtDataset() 161 | dataset_val.load_shirt(args.dataset, "val") 162 | dataset_val.prepare() 163 | 164 | # *** This training schedule is an example. Update to your needs *** 165 | # Since we're using a very small dataset, and starting from 166 | # COCO trained weights, we don't need to train too long. Also, 167 | # no need to train all layers, just the heads should do it. 168 | print("Training network heads") 169 | model.train(dataset_train, dataset_val, 170 | learning_rate=config.LEARNING_RATE, 171 | epochs=5, 172 | layers='heads') 173 | 174 | 175 | def color_splash(image, mask): 176 | """Apply color splash effect. 177 | image: RGB image [height, width, 3] 178 | mask: instance segmentation mask [height, width, instance count] 179 | 180 | Returns result image. 181 | """ 182 | # Make a grayscale copy of the image. The grayscale copy still 183 | # has 3 RGB channels, though. 184 | gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 0 185 | # Copy color pixels from the original color image where mask is set 186 | if mask.shape[-1] > 0: 187 | # We're treating all instances as one, so collapse the mask into one layer 188 | mask = (np.sum(mask, -1, keepdims=True) >= 1) 189 | # splash = np.where(mask, image, gray).astype(np.uint8) 190 | splash = np.where(mask,[255,255,255],gray).astype(np.uint8) 191 | else: 192 | splash = gray.astype(np.uint8) 193 | return splash 194 | 195 | 196 | def detect_and_color_splash(model, image_path=None, video_path=None): 197 | assert image_path or video_path 198 | 199 | # Image or video? 200 | if image_path: 201 | # Run model detection and generate the color splash effect 202 | print("Running on {}".format(args.image)) 203 | # Read image 204 | image = skimage.io.imread(args.image) 205 | # Detect objects 206 | r = model.detect([image], verbose=1)[0] 207 | # Color splash 208 | splash = color_splash(image, r['masks']) 209 | # Check if the filenames are already there 210 | # exist = os.path.isfile('image_path/') 211 | # Save output 212 | file_name = r"/home/danish/Project/mix/static/first_segmented.png" 213 | # locations = "/home/danish/Downloads/Pot/rice/static" 214 | skimage.io.imsave(file_name, splash) 215 | elif video_path: 216 | import cv2 217 | # Video capture 218 | vcapture = cv2.VideoCapture(video_path) 219 | width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH)) 220 | height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT)) 221 | fps = vcapture.get(cv2.CAP_PROP_FPS) 222 | 223 | # Define codec and create video writer 224 | file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now()) 225 | vwriter = cv2.VideoWriter(file_name, 226 | cv2.VideoWriter_fourcc(*'MJPG'), 227 | fps, (width, height)) 228 | 229 | count = 0 230 | success = True 231 | while success: 232 | print("frame: ", count) 233 | # Read next image 234 | success, image = vcapture.read() 235 | if success: 236 | # OpenCV returns images as BGR, convert to RGB 237 | image = image[..., ::-1] 238 | # Detect objects 239 | r = model.detect([image], verbose=0)[0] 240 | # Color splash 241 | splash = color_splash(image, r['masks']) 242 | # RGB -> BGR to save image to video 243 | splash = splash[..., ::-1] 244 | # Add image to video writer 245 | vwriter.write(splash) 246 | count += 1 247 | vwriter.release() 248 | print("Saved to ", file_name) 249 | 250 | 251 | ############################################################ 252 | # Training 253 | ############################################################ 254 | 255 | if __name__ == '__main__': 256 | import argparse 257 | 258 | # Parse command line arguments 259 | parser = argparse.ArgumentParser( 260 | description='Train Mask R-CNN to detect shirts.') 261 | parser.add_argument("command", 262 | metavar="", 263 | help="'train' or 'splash'") 264 | parser.add_argument('--dataset', required=False, 265 | metavar="/dataset/", 266 | help='Directory of the Shirt dataset') 267 | parser.add_argument('--weights', required=True, 268 | metavar="/weights.h5", 269 | help="Path to weights .h5 file or 'coco'") 270 | parser.add_argument('--logs', required=False, 271 | default=DEFAULT_LOGS_DIR, 272 | metavar="/path/to/logs/", 273 | help='Logs and checkpoints directory (default=logs/)') 274 | parser.add_argument('--image', required=False, 275 | metavar="/", 276 | help='Image to apply the color splash effect on') 277 | parser.add_argument('--video', required=False, 278 | metavar="path or URL to video", 279 | help='Video to apply the color splash effect on') 280 | args = parser.parse_args() 281 | 282 | # Validate arguments 283 | if args.command == "train": 284 | assert args.dataset, "Argument --dataset is required for training" 285 | elif args.command == "splash": 286 | assert args.image or args.video,\ 287 | "Provide --image or --video to apply color splash" 288 | 289 | print("Weights: ", args.weights) 290 | print("Dataset: ", args.dataset) 291 | print("Logs: ", args.logs) 292 | 293 | # Configurations 294 | if args.command == "train": 295 | config = ShirtConfig() 296 | else: 297 | class InferenceConfig(ShirtConfig): 298 | # Set batch size to 1 since we'll be running inference on 299 | # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU 300 | GPU_COUNT = 1 301 | IMAGES_PER_GPU = 1 302 | config = InferenceConfig() 303 | config.display() 304 | 305 | # Create model 306 | if args.command == "train": 307 | model = modellib.MaskRCNN(mode="training", config=config, 308 | model_dir=args.logs) 309 | else: 310 | model = modellib.MaskRCNN(mode="inference", config=config, 311 | model_dir=args.logs) 312 | 313 | # Select weights file to load 314 | if args.weights.lower() == "coco": 315 | weights_path = COCO_WEIGHTS_PATH 316 | # Download weights file 317 | if not os.path.exists(weights_path): 318 | utils.download_trained_weights(weights_path) 319 | elif args.weights.lower() == "last": 320 | # Find last trained weights 321 | weights_path = model.find_last() 322 | elif args.weights.lower() == "imagenet": 323 | # Start from ImageNet trained weights 324 | weights_path = model.get_imagenet_weights() 325 | else: 326 | weights_path = args.weights 327 | 328 | # Load weights 329 | print("Loading weights ", weights_path) 330 | if args.weights.lower() == "coco": 331 | # Exclude the last layers because they require a matching 332 | # number of classes 333 | model.load_weights(weights_path, by_name=True, exclude=[ 334 | "mrcnn_class_logits", "mrcnn_bbox_fc", 335 | "mrcnn_bbox", "mrcnn_mask"]) 336 | else: 337 | model.load_weights(weights_path, by_name=True) 338 | 339 | # Train or evaluate 340 | if args.command == "train": 341 | train(model) 342 | elif args.command == "splash": 343 | detect_and_color_splash(model, image_path=args.image, 344 | video_path=args.video) 345 | else: 346 | print("'{}' is not recognized. " 347 | "Use 'train' or 'splash'".format(args.command)) 348 | -------------------------------------------------------------------------------- /static/css/glyphicon.css: -------------------------------------------------------------------------------- 1 | @font-face { 2 | font-family: 'Glyphicons Halflings'; 3 | 4 | src: url('../fonts/glyphicons-halflings-regular.eot'); 5 | src: url('../fonts/glyphicons-halflings-regular.eot?#iefix') format('embedded-opentype'), url('../fonts/glyphicons-halflings-regular.woff2') format('woff2'), url('../fonts/glyphicons-halflings-regular.woff') format('woff'), url('../fonts/glyphicons-halflings-regular.ttf') format('truetype'), url('../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular') format('svg'); 6 | } 7 | .glyphicon { 8 | position: relative; 9 | top: 1px; 10 | display: inline-block; 11 | font-family: 'Glyphicons Halflings'; 12 | font-style: normal; 13 | font-weight: normal; 14 | line-height: 1; 15 | 16 | -webkit-font-smoothing: antialiased; 17 | -moz-osx-font-smoothing: grayscale; 18 | } 19 | .glyphicon-asterisk:before { 20 | content: "\002a"; 21 | } 22 | .glyphicon-plus:before { 23 | content: "\002b"; 24 | } 25 | .glyphicon-euro:before, 26 | .glyphicon-eur:before { 27 | content: "\20ac"; 28 | } 29 | .glyphicon-minus:before { 30 | content: "\2212"; 31 | } 32 | .glyphicon-cloud:before { 33 | content: "\2601"; 34 | } 35 | .glyphicon-envelope:before { 36 | content: "\2709"; 37 | } 38 | .glyphicon-pencil:before { 39 | content: "\270f"; 40 | } 41 | .glyphicon-glass:before { 42 | content: "\e001"; 43 | } 44 | .glyphicon-music:before { 45 | content: "\e002"; 46 | } 47 | .glyphicon-search:before { 48 | content: "\e003"; 49 | } 50 | .glyphicon-heart:before { 51 | content: "\e005"; 52 | } 53 | .glyphicon-star:before { 54 | content: "\e006"; 55 | } 56 | .glyphicon-star-empty:before { 57 | content: "\e007"; 58 | } 59 | .glyphicon-user:before { 60 | content: "\e008"; 61 | } 62 | .glyphicon-film:before { 63 | content: "\e009"; 64 | } 65 | .glyphicon-th-large:before { 66 | content: "\e010"; 67 | } 68 | .glyphicon-th:before { 69 | content: "\e011"; 70 | } 71 | .glyphicon-th-list:before { 72 | content: "\e012"; 73 | } 74 | .glyphicon-ok:before { 75 | content: "\e013"; 76 | } 77 | .glyphicon-remove:before { 78 | content: "\e014"; 79 | } 80 | .glyphicon-zoom-in:before { 81 | content: "\e015"; 82 | } 83 | .glyphicon-zoom-out:before { 84 | content: "\e016"; 85 | } 86 | .glyphicon-off:before { 87 | content: "\e017"; 88 | } 89 | .glyphicon-signal:before { 90 | content: "\e018"; 91 | } 92 | .glyphicon-cog:before { 93 | content: "\e019"; 94 | } 95 | .glyphicon-trash:before { 96 | content: "\e020"; 97 | } 98 | .glyphicon-home:before { 99 | content: "\e021"; 100 | } 101 | .glyphicon-file:before { 102 | content: "\e022"; 103 | } 104 | .glyphicon-time:before { 105 | content: "\e023"; 106 | } 107 | .glyphicon-road:before { 108 | content: "\e024"; 109 | } 110 | .glyphicon-download-alt:before { 111 | content: "\e025"; 112 | } 113 | .glyphicon-download:before { 114 | content: "\e026"; 115 | } 116 | .glyphicon-upload:before { 117 | content: "\e027"; 118 | } 119 | .glyphicon-inbox:before { 120 | content: "\e028"; 121 | } 122 | .glyphicon-play-circle:before { 123 | content: "\e029"; 124 | } 125 | .glyphicon-repeat:before { 126 | content: "\e030"; 127 | } 128 | .glyphicon-refresh:before { 129 | content: "\e031"; 130 | } 131 | .glyphicon-list-alt:before { 132 | content: "\e032"; 133 | } 134 | .glyphicon-lock:before { 135 | content: "\e033"; 136 | } 137 | .glyphicon-flag:before { 138 | content: "\e034"; 139 | } 140 | .glyphicon-headphones:before { 141 | content: "\e035"; 142 | } 143 | .glyphicon-volume-off:before { 144 | content: "\e036"; 145 | } 146 | .glyphicon-volume-down:before { 147 | content: "\e037"; 148 | } 149 | .glyphicon-volume-up:before { 150 | content: "\e038"; 151 | } 152 | .glyphicon-qrcode:before { 153 | content: "\e039"; 154 | } 155 | .glyphicon-barcode:before { 156 | content: "\e040"; 157 | } 158 | .glyphicon-tag:before { 159 | content: "\e041"; 160 | } 161 | .glyphicon-tags:before { 162 | content: "\e042"; 163 | } 164 | .glyphicon-book:before { 165 | content: "\e043"; 166 | } 167 | .glyphicon-bookmark:before { 168 | content: "\e044"; 169 | } 170 | .glyphicon-print:before { 171 | content: "\e045"; 172 | } 173 | .glyphicon-camera:before { 174 | content: "\e046"; 175 | } 176 | .glyphicon-font:before { 177 | content: "\e047"; 178 | } 179 | .glyphicon-bold:before { 180 | content: "\e048"; 181 | } 182 | .glyphicon-italic:before { 183 | content: "\e049"; 184 | } 185 | .glyphicon-text-height:before { 186 | content: "\e050"; 187 | } 188 | .glyphicon-text-width:before { 189 | content: "\e051"; 190 | } 191 | .glyphicon-align-left:before { 192 | content: "\e052"; 193 | } 194 | .glyphicon-align-center:before { 195 | content: "\e053"; 196 | } 197 | .glyphicon-align-right:before { 198 | content: "\e054"; 199 | } 200 | .glyphicon-align-justify:before { 201 | content: "\e055"; 202 | } 203 | .glyphicon-list:before { 204 | content: "\e056"; 205 | } 206 | .glyphicon-indent-left:before { 207 | content: "\e057"; 208 | } 209 | .glyphicon-indent-right:before { 210 | content: "\e058"; 211 | } 212 | .glyphicon-facetime-video:before { 213 | content: "\e059"; 214 | } 215 | .glyphicon-picture:before { 216 | content: "\e060"; 217 | } 218 | .glyphicon-map-marker:before { 219 | content: "\e062"; 220 | } 221 | .glyphicon-adjust:before { 222 | content: "\e063"; 223 | } 224 | .glyphicon-tint:before { 225 | content: "\e064"; 226 | } 227 | .glyphicon-edit:before { 228 | content: "\e065"; 229 | } 230 | .glyphicon-share:before { 231 | content: "\e066"; 232 | } 233 | .glyphicon-check:before { 234 | content: "\e067"; 235 | } 236 | .glyphicon-move:before { 237 | content: "\e068"; 238 | } 239 | .glyphicon-step-backward:before { 240 | content: "\e069"; 241 | } 242 | .glyphicon-fast-backward:before { 243 | content: "\e070"; 244 | } 245 | .glyphicon-backward:before { 246 | content: "\e071"; 247 | } 248 | .glyphicon-play:before { 249 | content: "\e072"; 250 | } 251 | .glyphicon-pause:before { 252 | content: "\e073"; 253 | } 254 | .glyphicon-stop:before { 255 | content: "\e074"; 256 | } 257 | .glyphicon-forward:before { 258 | content: "\e075"; 259 | } 260 | .glyphicon-fast-forward:before { 261 | content: "\e076"; 262 | } 263 | .glyphicon-step-forward:before { 264 | content: "\e077"; 265 | } 266 | .glyphicon-eject:before { 267 | content: "\e078"; 268 | } 269 | .glyphicon-chevron-left:before { 270 | content: "\e079"; 271 | } 272 | .glyphicon-chevron-right:before { 273 | content: "\e080"; 274 | } 275 | .glyphicon-plus-sign:before { 276 | content: "\e081"; 277 | } 278 | .glyphicon-minus-sign:before { 279 | content: "\e082"; 280 | } 281 | .glyphicon-remove-sign:before { 282 | content: "\e083"; 283 | } 284 | .glyphicon-ok-sign:before { 285 | content: "\e084"; 286 | } 287 | .glyphicon-question-sign:before { 288 | content: "\e085"; 289 | } 290 | .glyphicon-info-sign:before { 291 | content: "\e086"; 292 | } 293 | .glyphicon-screenshot:before { 294 | content: "\e087"; 295 | } 296 | .glyphicon-remove-circle:before { 297 | content: "\e088"; 298 | } 299 | .glyphicon-ok-circle:before { 300 | content: "\e089"; 301 | } 302 | .glyphicon-ban-circle:before { 303 | content: "\e090"; 304 | } 305 | .glyphicon-arrow-left:before { 306 | content: "\e091"; 307 | } 308 | .glyphicon-arrow-right:before { 309 | content: "\e092"; 310 | } 311 | .glyphicon-arrow-up:before { 312 | content: "\e093"; 313 | } 314 | .glyphicon-arrow-down:before { 315 | content: "\e094"; 316 | } 317 | .glyphicon-share-alt:before { 318 | content: "\e095"; 319 | } 320 | .glyphicon-resize-full:before { 321 | content: "\e096"; 322 | } 323 | .glyphicon-resize-small:before { 324 | content: "\e097"; 325 | } 326 | .glyphicon-exclamation-sign:before { 327 | content: "\e101"; 328 | } 329 | .glyphicon-gift:before { 330 | content: "\e102"; 331 | } 332 | .glyphicon-leaf:before { 333 | content: "\e103"; 334 | } 335 | .glyphicon-fire:before { 336 | content: "\e104"; 337 | } 338 | .glyphicon-eye-open:before { 339 | content: "\e105"; 340 | } 341 | .glyphicon-eye-close:before { 342 | content: "\e106"; 343 | } 344 | .glyphicon-warning-sign:before { 345 | content: "\e107"; 346 | } 347 | .glyphicon-plane:before { 348 | content: "\e108"; 349 | } 350 | .glyphicon-calendar:before { 351 | content: "\e109"; 352 | } 353 | .glyphicon-random:before { 354 | content: "\e110"; 355 | } 356 | .glyphicon-comment:before { 357 | content: "\e111"; 358 | } 359 | .glyphicon-magnet:before { 360 | content: "\e112"; 361 | } 362 | .glyphicon-chevron-up:before { 363 | content: "\e113"; 364 | } 365 | .glyphicon-chevron-down:before { 366 | content: "\e114"; 367 | } 368 | .glyphicon-retweet:before { 369 | content: "\e115"; 370 | } 371 | .glyphicon-shopping-cart:before { 372 | content: "\e116"; 373 | } 374 | .glyphicon-folder-close:before { 375 | content: "\e117"; 376 | } 377 | .glyphicon-folder-open:before { 378 | content: "\e118"; 379 | } 380 | .glyphicon-resize-vertical:before { 381 | content: "\e119"; 382 | } 383 | .glyphicon-resize-horizontal:before { 384 | content: "\e120"; 385 | } 386 | .glyphicon-hdd:before { 387 | content: "\e121"; 388 | } 389 | .glyphicon-bullhorn:before { 390 | content: "\e122"; 391 | } 392 | .glyphicon-bell:before { 393 | content: "\e123"; 394 | } 395 | .glyphicon-certificate:before { 396 | content: "\e124"; 397 | } 398 | .glyphicon-thumbs-up:before { 399 | content: "\e125"; 400 | } 401 | .glyphicon-thumbs-down:before { 402 | content: "\e126"; 403 | } 404 | .glyphicon-hand-right:before { 405 | content: "\e127"; 406 | } 407 | .glyphicon-hand-left:before { 408 | content: "\e128"; 409 | } 410 | .glyphicon-hand-up:before { 411 | content: "\e129"; 412 | } 413 | .glyphicon-hand-down:before { 414 | content: "\e130"; 415 | } 416 | .glyphicon-circle-arrow-right:before { 417 | content: "\e131"; 418 | } 419 | .glyphicon-circle-arrow-left:before { 420 | content: "\e132"; 421 | } 422 | .glyphicon-circle-arrow-up:before { 423 | content: "\e133"; 424 | } 425 | .glyphicon-circle-arrow-down:before { 426 | content: "\e134"; 427 | } 428 | .glyphicon-globe:before { 429 | content: "\e135"; 430 | } 431 | .glyphicon-wrench:before { 432 | content: "\e136"; 433 | } 434 | .glyphicon-tasks:before { 435 | content: "\e137"; 436 | } 437 | .glyphicon-filter:before { 438 | content: "\e138"; 439 | } 440 | .glyphicon-briefcase:before { 441 | content: "\e139"; 442 | } 443 | .glyphicon-fullscreen:before { 444 | content: "\e140"; 445 | } 446 | .glyphicon-dashboard:before { 447 | content: "\e141"; 448 | } 449 | .glyphicon-paperclip:before { 450 | content: "\e142"; 451 | } 452 | .glyphicon-heart-empty:before { 453 | content: "\e143"; 454 | } 455 | .glyphicon-link:before { 456 | content: "\e144"; 457 | } 458 | .glyphicon-phone:before { 459 | content: "\e145"; 460 | } 461 | .glyphicon-pushpin:before { 462 | content: "\e146"; 463 | } 464 | .glyphicon-usd:before { 465 | content: "\e148"; 466 | } 467 | .glyphicon-gbp:before { 468 | content: "\e149"; 469 | } 470 | .glyphicon-sort:before { 471 | content: "\e150"; 472 | } 473 | .glyphicon-sort-by-alphabet:before { 474 | content: "\e151"; 475 | } 476 | .glyphicon-sort-by-alphabet-alt:before { 477 | content: "\e152"; 478 | } 479 | .glyphicon-sort-by-order:before { 480 | content: "\e153"; 481 | } 482 | .glyphicon-sort-by-order-alt:before { 483 | content: "\e154"; 484 | } 485 | .glyphicon-sort-by-attributes:before { 486 | content: "\e155"; 487 | } 488 | .glyphicon-sort-by-attributes-alt:before { 489 | content: "\e156"; 490 | } 491 | .glyphicon-unchecked:before { 492 | content: "\e157"; 493 | } 494 | .glyphicon-expand:before { 495 | content: "\e158"; 496 | } 497 | .glyphicon-collapse-down:before { 498 | content: "\e159"; 499 | } 500 | .glyphicon-collapse-up:before { 501 | content: "\e160"; 502 | } 503 | .glyphicon-log-in:before { 504 | content: "\e161"; 505 | } 506 | .glyphicon-flash:before { 507 | content: "\e162"; 508 | } 509 | .glyphicon-log-out:before { 510 | content: "\e163"; 511 | } 512 | .glyphicon-new-window:before { 513 | content: "\e164"; 514 | } 515 | .glyphicon-record:before { 516 | content: "\e165"; 517 | } 518 | .glyphicon-save:before { 519 | content: "\e166"; 520 | } 521 | .glyphicon-open:before { 522 | content: "\e167"; 523 | } 524 | .glyphicon-saved:before { 525 | content: "\e168"; 526 | } 527 | .glyphicon-import:before { 528 | content: "\e169"; 529 | } 530 | .glyphicon-export:before { 531 | content: "\e170"; 532 | } 533 | .glyphicon-send:before { 534 | content: "\e171"; 535 | } 536 | .glyphicon-floppy-disk:before { 537 | content: "\e172"; 538 | } 539 | .glyphicon-floppy-saved:before { 540 | content: "\e173"; 541 | } 542 | .glyphicon-floppy-remove:before { 543 | content: "\e174"; 544 | } 545 | .glyphicon-floppy-save:before { 546 | content: "\e175"; 547 | } 548 | .glyphicon-floppy-open:before { 549 | content: "\e176"; 550 | } 551 | .glyphicon-credit-card:before { 552 | content: "\e177"; 553 | } 554 | .glyphicon-transfer:before { 555 | content: "\e178"; 556 | } 557 | .glyphicon-cutlery:before { 558 | content: "\e179"; 559 | } 560 | .glyphicon-header:before { 561 | content: "\e180"; 562 | } 563 | .glyphicon-compressed:before { 564 | content: "\e181"; 565 | } 566 | .glyphicon-earphone:before { 567 | content: "\e182"; 568 | } 569 | .glyphicon-phone-alt:before { 570 | content: "\e183"; 571 | } 572 | .glyphicon-tower:before { 573 | content: "\e184"; 574 | } 575 | .glyphicon-stats:before { 576 | content: "\e185"; 577 | } 578 | .glyphicon-sd-video:before { 579 | content: "\e186"; 580 | } 581 | .glyphicon-hd-video:before { 582 | content: "\e187"; 583 | } 584 | .glyphicon-subtitles:before { 585 | content: "\e188"; 586 | } 587 | .glyphicon-sound-stereo:before { 588 | content: "\e189"; 589 | } 590 | .glyphicon-sound-dolby:before { 591 | content: "\e190"; 592 | } 593 | .glyphicon-sound-5-1:before { 594 | content: "\e191"; 595 | } 596 | .glyphicon-sound-6-1:before { 597 | content: "\e192"; 598 | } 599 | .glyphicon-sound-7-1:before { 600 | content: "\e193"; 601 | } 602 | .glyphicon-copyright-mark:before { 603 | content: "\e194"; 604 | } 605 | .glyphicon-registration-mark:before { 606 | content: "\e195"; 607 | } 608 | .glyphicon-cloud-download:before { 609 | content: "\e197"; 610 | } 611 | .glyphicon-cloud-upload:before { 612 | content: "\e198"; 613 | } 614 | .glyphicon-tree-conifer:before { 615 | content: "\e199"; 616 | } 617 | .glyphicon-tree-deciduous:before { 618 | content: "\e200"; 619 | } 620 | .glyphicon-cd:before { 621 | content: "\e201"; 622 | } 623 | .glyphicon-save-file:before { 624 | content: "\e202"; 625 | } 626 | .glyphicon-open-file:before { 627 | content: "\e203"; 628 | } 629 | .glyphicon-level-up:before { 630 | content: "\e204"; 631 | } 632 | .glyphicon-copy:before { 633 | content: "\e205"; 634 | } 635 | .glyphicon-paste:before { 636 | content: "\e206"; 637 | } 638 | .glyphicon-alert:before { 639 | content: "\e209"; 640 | } 641 | .glyphicon-equalizer:before { 642 | content: "\e210"; 643 | } 644 | .glyphicon-king:before { 645 | content: "\e211"; 646 | } 647 | .glyphicon-queen:before { 648 | content: "\e212"; 649 | } 650 | .glyphicon-pawn:before { 651 | content: "\e213"; 652 | } 653 | .glyphicon-bishop:before { 654 | content: "\e214"; 655 | } 656 | .glyphicon-knight:before { 657 | content: "\e215"; 658 | } 659 | .glyphicon-baby-formula:before { 660 | content: "\e216"; 661 | } 662 | .glyphicon-tent:before { 663 | content: "\26fa"; 664 | } 665 | .glyphicon-blackboard:before { 666 | content: "\e218"; 667 | } 668 | .glyphicon-bed:before { 669 | content: "\e219"; 670 | } 671 | .glyphicon-apple:before { 672 | content: "\f8ff"; 673 | } 674 | .glyphicon-erase:before { 675 | content: "\e221"; 676 | } 677 | .glyphicon-hourglass:before { 678 | content: "\231b"; 679 | } 680 | .glyphicon-lamp:before { 681 | content: "\e223"; 682 | } 683 | .glyphicon-duplicate:before { 684 | content: "\e224"; 685 | } 686 | .glyphicon-piggy-bank:before { 687 | content: "\e225"; 688 | } 689 | .glyphicon-scissors:before { 690 | content: "\e226"; 691 | } 692 | .glyphicon-bitcoin:before { 693 | content: "\e227"; 694 | } 695 | .glyphicon-btc:before { 696 | content: "\e227"; 697 | } 698 | .glyphicon-xbt:before { 699 | content: "\e227"; 700 | } 701 | .glyphicon-yen:before { 702 | content: "\00a5"; 703 | } 704 | .glyphicon-jpy:before { 705 | content: "\00a5"; 706 | } 707 | .glyphicon-ruble:before { 708 | content: "\20bd"; 709 | } 710 | .glyphicon-rub:before { 711 | content: "\20bd"; 712 | } 713 | .glyphicon-scale:before { 714 | content: "\e230"; 715 | } 716 | .glyphicon-ice-lolly:before { 717 | content: "\e231"; 718 | } 719 | .glyphicon-ice-lolly-tasted:before { 720 | content: "\e232"; 721 | } 722 | .glyphicon-education:before { 723 | content: "\e233"; 724 | } 725 | .glyphicon-option-horizontal:before { 726 | content: "\e234"; 727 | } 728 | .glyphicon-option-vertical:before { 729 | content: "\e235"; 730 | } 731 | .glyphicon-menu-hamburger:before { 732 | content: "\e236"; 733 | } 734 | .glyphicon-modal-window:before { 735 | content: "\e237"; 736 | } 737 | .glyphicon-oil:before { 738 | content: "\e238"; 739 | } 740 | .glyphicon-grain:before { 741 | content: "\e239"; 742 | } 743 | .glyphicon-sunglasses:before { 744 | content: "\e240"; 745 | } 746 | .glyphicon-text-size:before { 747 | content: "\e241"; 748 | } 749 | .glyphicon-text-color:before { 750 | content: "\e242"; 751 | } 752 | .glyphicon-text-background:before { 753 | content: "\e243"; 754 | } 755 | .glyphicon-object-align-top:before { 756 | content: "\e244"; 757 | } 758 | .glyphicon-object-align-bottom:before { 759 | content: "\e245"; 760 | } 761 | .glyphicon-object-align-horizontal:before { 762 | content: "\e246"; 763 | } 764 | .glyphicon-object-align-left:before { 765 | content: "\e247"; 766 | } 767 | .glyphicon-object-align-vertical:before { 768 | content: "\e248"; 769 | } 770 | .glyphicon-object-align-right:before { 771 | content: "\e249"; 772 | } 773 | .glyphicon-triangle-right:before { 774 | content: "\e250"; 775 | } 776 | .glyphicon-triangle-left:before { 777 | content: "\e251"; 778 | } 779 | .glyphicon-triangle-bottom:before { 780 | content: "\e252"; 781 | } 782 | .glyphicon-triangle-top:before { 783 | content: "\e253"; 784 | } 785 | .glyphicon-console:before { 786 | content: "\e254"; 787 | } 788 | .glyphicon-superscript:before { 789 | content: "\e255"; 790 | } 791 | .glyphicon-subscript:before { 792 | content: "\e256"; 793 | } 794 | .glyphicon-menu-left:before { 795 | content: "\e257"; 796 | } 797 | .glyphicon-menu-right:before { 798 | content: "\e258"; 799 | } 800 | .glyphicon-menu-down:before { 801 | content: "\e259"; 802 | } 803 | .glyphicon-menu-up:before { 804 | content: "\e260"; 805 | } -------------------------------------------------------------------------------- /mrcnn/visualize.py: -------------------------------------------------------------------------------- 1 | """ 2 | Mask R-CNN 3 | Display and Visualization Functions. 4 | 5 | Copyright (c) 2017 Matterport, Inc. 6 | Licensed under the MIT License (see LICENSE for details) 7 | Written by Waleed Abdulla 8 | """ 9 | 10 | import os 11 | import sys 12 | import random 13 | import itertools 14 | import colorsys 15 | 16 | import numpy as np 17 | from skimage.measure import find_contours 18 | import matplotlib.pyplot as plt 19 | from matplotlib import patches, lines 20 | from matplotlib.patches import Polygon 21 | import IPython.display 22 | 23 | # Root directory of the project 24 | ROOT_DIR = os.path.abspath("../") 25 | 26 | # Import Mask RCNN 27 | sys.path.append(ROOT_DIR) # To find local version of the library 28 | from mrcnn import utils 29 | 30 | 31 | ############################################################ 32 | # Visualization 33 | ############################################################ 34 | 35 | def display_images(images, titles=None, cols=4, cmap=None, norm=None, 36 | interpolation=None): 37 | """Display the given set of images, optionally with titles. 38 | images: list or array of image tensors in HWC format. 39 | titles: optional. A list of titles to display with each image. 40 | cols: number of images per row 41 | cmap: Optional. Color map to use. For example, "Blues". 42 | norm: Optional. A Normalize instance to map values to colors. 43 | interpolation: Optional. Image interpolation to use for display. 44 | """ 45 | titles = titles if titles is not None else [""] * len(images) 46 | rows = len(images) // cols + 1 47 | plt.figure(figsize=(14, 14 * rows // cols)) 48 | i = 1 49 | for image, title in zip(images, titles): 50 | plt.subplot(rows, cols, i) 51 | plt.title(title, fontsize=9) 52 | plt.axis('off') 53 | plt.imshow(image.astype(np.uint8), cmap=cmap, 54 | norm=norm, interpolation=interpolation) 55 | i += 1 56 | plt.show() 57 | 58 | 59 | def random_colors(N, bright=True): 60 | """ 61 | Generate random colors. 62 | To get visually distinct colors, generate them in HSV space then 63 | convert to RGB. 64 | """ 65 | brightness = 1.0 if bright else 0.7 66 | hsv = [(i / N, 1, brightness) for i in range(N)] 67 | colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) 68 | random.shuffle(colors) 69 | return colors 70 | 71 | 72 | def apply_mask(image, mask, color, alpha=0.5): 73 | """Apply the given mask to the image. 74 | """ 75 | for c in range(3): 76 | image[:, :, c] = np.where(mask == 1, 77 | image[:, :, c] * 78 | (1 - alpha) + alpha * color[c] * 255, 79 | image[:, :, c]) 80 | return image 81 | 82 | 83 | def display_instances(image, boxes, masks, class_ids, class_names, 84 | scores=None, title="", 85 | figsize=(16, 16), ax=None, 86 | show_mask=True, show_bbox=True, 87 | colors=None, captions=None): 88 | """ 89 | boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates. 90 | masks: [height, width, num_instances] 91 | class_ids: [num_instances] 92 | class_names: list of class names of the dataset 93 | scores: (optional) confidence scores for each box 94 | title: (optional) Figure title 95 | show_mask, show_bbox: To show masks and bounding boxes or not 96 | figsize: (optional) the size of the image 97 | colors: (optional) An array or colors to use with each object 98 | captions: (optional) A list of strings to use as captions for each object 99 | """ 100 | # Number of instances 101 | N = boxes.shape[0] 102 | if not N: 103 | print("\n*** No instances to display *** \n") 104 | else: 105 | assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] 106 | 107 | # If no axis is passed, create one and automatically call show() 108 | auto_show = False 109 | if not ax: 110 | _, ax = plt.subplots(1, figsize=figsize) 111 | auto_show = True 112 | 113 | # Generate random colors 114 | colors = colors or random_colors(N) 115 | 116 | # Show area outside image boundaries. 117 | height, width = image.shape[:2] 118 | ax.set_ylim(height + 10, -10) 119 | ax.set_xlim(-10, width + 10) 120 | ax.axis('off') 121 | ax.set_title(title) 122 | 123 | masked_image = image.astype(np.uint32).copy() 124 | for i in range(N): 125 | color = colors[i] 126 | 127 | # Bounding box 128 | if not np.any(boxes[i]): 129 | # Skip this instance. Has no bbox. Likely lost in image cropping. 130 | continue 131 | y1, x1, y2, x2 = boxes[i] 132 | if show_bbox: 133 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 134 | alpha=0.7, linestyle="dashed", 135 | edgecolor=color, facecolor='none') 136 | ax.add_patch(p) 137 | 138 | # Label 139 | if not captions: 140 | class_id = class_ids[i] 141 | score = scores[i] if scores is not None else None 142 | label = class_names[class_id] 143 | caption = "{} {:.3f}".format(label, score) if score else label 144 | else: 145 | caption = captions[i] 146 | ax.text(x1, y1 + 8, caption, 147 | color='w', size=11, backgroundcolor="none") 148 | 149 | # Mask 150 | mask = masks[:, :, i] 151 | if show_mask: 152 | masked_image = apply_mask(masked_image, mask, color) 153 | 154 | # Mask Polygon 155 | # Pad to ensure proper polygons for masks that touch image edges. 156 | padded_mask = np.zeros( 157 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 158 | padded_mask[1:-1, 1:-1] = mask 159 | contours = find_contours(padded_mask, 0.5) 160 | for verts in contours: 161 | # Subtract the padding and flip (y, x) to (x, y) 162 | verts = np.fliplr(verts) - 1 163 | p = Polygon(verts, facecolor="none", edgecolor=color) 164 | ax.add_patch(p) 165 | ax.imshow(masked_image.astype(np.uint8)) 166 | if auto_show: 167 | plt.show() 168 | 169 | 170 | def display_differences(image, 171 | gt_box, gt_class_id, gt_mask, 172 | pred_box, pred_class_id, pred_score, pred_mask, 173 | class_names, title="", ax=None, 174 | show_mask=True, show_box=True, 175 | iou_threshold=0.5, score_threshold=0.5): 176 | """Display ground truth and prediction instances on the same image.""" 177 | # Match predictions to ground truth 178 | gt_match, pred_match, overlaps = utils.compute_matches( 179 | gt_box, gt_class_id, gt_mask, 180 | pred_box, pred_class_id, pred_score, pred_mask, 181 | iou_threshold=iou_threshold, score_threshold=score_threshold) 182 | # Ground truth = green. Predictions = red 183 | colors = [(0, 1, 0, .8)] * len(gt_match)\ 184 | + [(1, 0, 0, 1)] * len(pred_match) 185 | # Concatenate GT and predictions 186 | class_ids = np.concatenate([gt_class_id, pred_class_id]) 187 | scores = np.concatenate([np.zeros([len(gt_match)]), pred_score]) 188 | boxes = np.concatenate([gt_box, pred_box]) 189 | masks = np.concatenate([gt_mask, pred_mask], axis=-1) 190 | # Captions per instance show score/IoU 191 | captions = ["" for m in gt_match] + ["{:.2f} / {:.2f}".format( 192 | pred_score[i], 193 | (overlaps[i, int(pred_match[i])] 194 | if pred_match[i] > -1 else overlaps[i].max())) 195 | for i in range(len(pred_match))] 196 | # Set title if not provided 197 | title = title or "Ground Truth and Detections\n GT=green, pred=red, captions: score/IoU" 198 | # Display 199 | display_instances( 200 | image, 201 | boxes, masks, class_ids, 202 | class_names, scores, ax=ax, 203 | show_bbox=show_box, show_mask=show_mask, 204 | colors=colors, captions=captions, 205 | title=title) 206 | 207 | 208 | def draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10): 209 | """ 210 | anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates. 211 | proposals: [n, 4] the same anchors but refined to fit objects better. 212 | """ 213 | masked_image = image.copy() 214 | 215 | # Pick random anchors in case there are too many. 216 | ids = np.arange(rois.shape[0], dtype=np.int32) 217 | ids = np.random.choice( 218 | ids, limit, replace=False) if ids.shape[0] > limit else ids 219 | 220 | fig, ax = plt.subplots(1, figsize=(12, 12)) 221 | if rois.shape[0] > limit: 222 | plt.title("Showing {} random ROIs out of {}".format( 223 | len(ids), rois.shape[0])) 224 | else: 225 | plt.title("{} ROIs".format(len(ids))) 226 | 227 | # Show area outside image boundaries. 228 | ax.set_ylim(image.shape[0] + 20, -20) 229 | ax.set_xlim(-50, image.shape[1] + 20) 230 | ax.axis('off') 231 | 232 | for i, id in enumerate(ids): 233 | color = np.random.rand(3) 234 | class_id = class_ids[id] 235 | # ROI 236 | y1, x1, y2, x2 = rois[id] 237 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 238 | edgecolor=color if class_id else "gray", 239 | facecolor='none', linestyle="dashed") 240 | ax.add_patch(p) 241 | # Refined ROI 242 | if class_id: 243 | ry1, rx1, ry2, rx2 = refined_rois[id] 244 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 245 | edgecolor=color, facecolor='none') 246 | ax.add_patch(p) 247 | # Connect the top-left corners of the anchor and proposal for easy visualization 248 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 249 | 250 | # Label 251 | label = class_names[class_id] 252 | ax.text(rx1, ry1 + 8, "{}".format(label), 253 | color='w', size=11, backgroundcolor="none") 254 | 255 | # Mask 256 | m = utils.unmold_mask(mask[id], rois[id] 257 | [:4].astype(np.int32), image.shape) 258 | masked_image = apply_mask(masked_image, m, color) 259 | 260 | ax.imshow(masked_image) 261 | 262 | # Print stats 263 | print("Positive ROIs: ", class_ids[class_ids > 0].shape[0]) 264 | print("Negative ROIs: ", class_ids[class_ids == 0].shape[0]) 265 | print("Positive Ratio: {:.2f}".format( 266 | class_ids[class_ids > 0].shape[0] / class_ids.shape[0])) 267 | 268 | 269 | # TODO: Replace with matplotlib equivalent? 270 | def draw_box(image, box, color): 271 | """Draw 3-pixel width bounding boxes on the given image array. 272 | color: list of 3 int values for RGB. 273 | """ 274 | y1, x1, y2, x2 = box 275 | image[y1:y1 + 2, x1:x2] = color 276 | image[y2:y2 + 2, x1:x2] = color 277 | image[y1:y2, x1:x1 + 2] = color 278 | image[y1:y2, x2:x2 + 2] = color 279 | return image 280 | 281 | 282 | def display_top_masks(image, mask, class_ids, class_names, limit=4): 283 | """Display the given image and the top few class masks.""" 284 | to_display = [] 285 | titles = [] 286 | to_display.append(image) 287 | titles.append("H x W={}x{}".format(image.shape[0], image.shape[1])) 288 | # Pick top prominent classes in this image 289 | unique_class_ids = np.unique(class_ids) 290 | mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) 291 | for i in unique_class_ids] 292 | top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area), 293 | key=lambda r: r[1], reverse=True) if v[1] > 0] 294 | # Generate images and titles 295 | for i in range(limit): 296 | class_id = top_ids[i] if i < len(top_ids) else -1 297 | # Pull masks of instances belonging to the same class. 298 | m = mask[:, :, np.where(class_ids == class_id)[0]] 299 | m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1) 300 | to_display.append(m) 301 | titles.append(class_names[class_id] if class_id != -1 else "-") 302 | display_images(to_display, titles=titles, cols=limit + 1, cmap="Blues_r") 303 | 304 | 305 | def plot_precision_recall(AP, precisions, recalls): 306 | """Draw the precision-recall curve. 307 | 308 | AP: Average precision at IoU >= 0.5 309 | precisions: list of precision values 310 | recalls: list of recall values 311 | """ 312 | # Plot the Precision-Recall curve 313 | _, ax = plt.subplots(1) 314 | ax.set_title("Precision-Recall Curve. AP@50 = {:.3f}".format(AP)) 315 | ax.set_ylim(0, 1.1) 316 | ax.set_xlim(0, 1.1) 317 | _ = ax.plot(recalls, precisions) 318 | 319 | 320 | def plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, 321 | overlaps, class_names, threshold=0.5): 322 | """Draw a grid showing how ground truth objects are classified. 323 | gt_class_ids: [N] int. Ground truth class IDs 324 | pred_class_id: [N] int. Predicted class IDs 325 | pred_scores: [N] float. The probability scores of predicted classes 326 | overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes. 327 | class_names: list of all class names in the dataset 328 | threshold: Float. The prediction probability required to predict a class 329 | """ 330 | gt_class_ids = gt_class_ids[gt_class_ids != 0] 331 | pred_class_ids = pred_class_ids[pred_class_ids != 0] 332 | 333 | plt.figure(figsize=(12, 10)) 334 | plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues) 335 | plt.yticks(np.arange(len(pred_class_ids)), 336 | ["{} ({:.2f})".format(class_names[int(id)], pred_scores[i]) 337 | for i, id in enumerate(pred_class_ids)]) 338 | plt.xticks(np.arange(len(gt_class_ids)), 339 | [class_names[int(id)] for id in gt_class_ids], rotation=90) 340 | 341 | thresh = overlaps.max() / 2. 342 | for i, j in itertools.product(range(overlaps.shape[0]), 343 | range(overlaps.shape[1])): 344 | text = "" 345 | if overlaps[i, j] > threshold: 346 | text = "match" if gt_class_ids[j] == pred_class_ids[i] else "wrong" 347 | color = ("white" if overlaps[i, j] > thresh 348 | else "black" if overlaps[i, j] > 0 349 | else "grey") 350 | plt.text(j, i, "{:.3f}\n{}".format(overlaps[i, j], text), 351 | horizontalalignment="center", verticalalignment="center", 352 | fontsize=9, color=color) 353 | 354 | plt.tight_layout() 355 | plt.xlabel("Ground Truth") 356 | plt.ylabel("Predictions") 357 | 358 | 359 | def draw_boxes(image, boxes=None, refined_boxes=None, 360 | masks=None, captions=None, visibilities=None, 361 | title="", ax=None): 362 | """Draw bounding boxes and segmentation masks with different 363 | customizations. 364 | 365 | boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates. 366 | refined_boxes: Like boxes, but draw with solid lines to show 367 | that they're the result of refining 'boxes'. 368 | masks: [N, height, width] 369 | captions: List of N titles to display on each box 370 | visibilities: (optional) List of values of 0, 1, or 2. Determine how 371 | prominent each bounding box should be. 372 | title: An optional title to show over the image 373 | ax: (optional) Matplotlib axis to draw on. 374 | """ 375 | # Number of boxes 376 | assert boxes is not None or refined_boxes is not None 377 | N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0] 378 | 379 | # Matplotlib Axis 380 | if not ax: 381 | _, ax = plt.subplots(1, figsize=(12, 12)) 382 | 383 | # Generate random colors 384 | colors = random_colors(N) 385 | 386 | # Show area outside image boundaries. 387 | margin = image.shape[0] // 10 388 | ax.set_ylim(image.shape[0] + margin, -margin) 389 | ax.set_xlim(-margin, image.shape[1] + margin) 390 | ax.axis('off') 391 | 392 | ax.set_title(title) 393 | 394 | masked_image = image.astype(np.uint32).copy() 395 | for i in range(N): 396 | # Box visibility 397 | visibility = visibilities[i] if visibilities is not None else 1 398 | if visibility == 0: 399 | color = "gray" 400 | style = "dotted" 401 | alpha = 0.5 402 | elif visibility == 1: 403 | color = colors[i] 404 | style = "dotted" 405 | alpha = 1 406 | elif visibility == 2: 407 | color = colors[i] 408 | style = "solid" 409 | alpha = 1 410 | 411 | # Boxes 412 | if boxes is not None: 413 | if not np.any(boxes[i]): 414 | # Skip this instance. Has no bbox. Likely lost in cropping. 415 | continue 416 | y1, x1, y2, x2 = boxes[i] 417 | p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, 418 | alpha=alpha, linestyle=style, 419 | edgecolor=color, facecolor='none') 420 | ax.add_patch(p) 421 | 422 | # Refined boxes 423 | if refined_boxes is not None and visibility > 0: 424 | ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32) 425 | p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, 426 | edgecolor=color, facecolor='none') 427 | ax.add_patch(p) 428 | # Connect the top-left corners of the anchor and proposal 429 | if boxes is not None: 430 | ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color)) 431 | 432 | # Captions 433 | if captions is not None: 434 | caption = captions[i] 435 | # If there are refined boxes, display captions on them 436 | if refined_boxes is not None: 437 | y1, x1, y2, x2 = ry1, rx1, ry2, rx2 438 | ax.text(x1, y1, caption, size=11, verticalalignment='top', 439 | color='w', backgroundcolor="none", 440 | bbox={'facecolor': color, 'alpha': 0.5, 441 | 'pad': 2, 'edgecolor': 'none'}) 442 | 443 | # Masks 444 | if masks is not None: 445 | mask = masks[:, :, i] 446 | masked_image = apply_mask(masked_image, mask, color) 447 | # Mask Polygon 448 | # Pad to ensure proper polygons for masks that touch image edges. 449 | padded_mask = np.zeros( 450 | (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) 451 | padded_mask[1:-1, 1:-1] = mask 452 | contours = find_contours(padded_mask, 0.5) 453 | for verts in contours: 454 | # Subtract the padding and flip (y, x) to (x, y) 455 | verts = np.fliplr(verts) - 1 456 | p = Polygon(verts, facecolor="none", edgecolor=color) 457 | ax.add_patch(p) 458 | ax.imshow(masked_image.astype(np.uint8)) 459 | 460 | 461 | def display_table(table): 462 | """Display values in a table format. 463 | table: an iterable of rows, and each row is an iterable of values. 464 | """ 465 | html = "" 466 | for row in table: 467 | row_html = "" 468 | for col in row: 469 | row_html += "{:40}".format(str(col)) 470 | html += "" + row_html + "" 471 | html = "" + html + "
" 472 | IPython.display.display(IPython.display.HTML(html)) 473 | 474 | 475 | def display_weight_stats(model): 476 | """Scans all the weights in the model and returns a list of tuples 477 | that contain stats about each weight. 478 | """ 479 | layers = model.get_trainable_layers() 480 | table = [["WEIGHT NAME", "SHAPE", "MIN", "MAX", "STD"]] 481 | for l in layers: 482 | weight_values = l.get_weights() # list of Numpy arrays 483 | weight_tensors = l.weights # list of TF tensors 484 | for i, w in enumerate(weight_values): 485 | weight_name = weight_tensors[i].name 486 | # Detect problematic layers. Exclude biases of conv layers. 487 | alert = "" 488 | if w.min() == w.max() and not (l.__class__.__name__ == "Conv2D" and i == 1): 489 | alert += "*** dead?" 490 | if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000: 491 | alert += "*** Overflow?" 492 | # Add row 493 | table.append([ 494 | weight_name + alert, 495 | str(w.shape), 496 | "{:+9.4f}".format(w.min()), 497 | "{:+10.4f}".format(w.max()), 498 | "{:+9.4f}".format(w.std()), 499 | ]) 500 | display_table(table) 501 | -------------------------------------------------------------------------------- /static/css/style.css: -------------------------------------------------------------------------------- 1 | /* 2 | Theme Name: Regna 3 | Theme URL: https://bootstrapmade.com/regna-bootstrap-onepage-template/ 4 | Author: BootstrapMade.com 5 | License: https://bootstrapmade.com/license/ 6 | */ 7 | /*-------------------------------------------------------------- 8 | # General 9 | --------------------------------------------------------------*/ 10 | body { 11 | background: #fff; 12 | color: #666666; 13 | font-family: "Open Sans", sans-serif; 14 | } 15 | 16 | a { 17 | color: #2dc997; 18 | } 19 | 20 | a:hover, a:active, a:focus { 21 | color: #2dca98; 22 | outline: none; 23 | text-decoration: none; 24 | } 25 | 26 | p { 27 | padding: 0; 28 | margin: 0 0 30px 0; 29 | } 30 | 31 | h1, h2, h3, h4, h5, h6 { 32 | font-family: "Tw Cen MT", sans-serif; 33 | font-weight: 400; 34 | margin: 0 0 20px 0; 35 | padding: 0; 36 | } 37 | 38 | /* Prelaoder */ 39 | #preloader { 40 | position: fixed; 41 | left: 0; 42 | top: 0; 43 | z-index: 999; 44 | width: 100%; 45 | height: 100%; 46 | overflow: visible; 47 | background: #fff url("../img/preloader.svg") no-repeat center center; 48 | } 49 | 50 | /* Back to top button */ 51 | .back-to-top { 52 | position: fixed; 53 | display: none; 54 | background: rgba(0, 0, 0, 0.4); 55 | color: #fff; 56 | padding: 6px 12px 9px 12px; 57 | font-size: 16px; 58 | border-radius: 2px; 59 | right: 15px; 60 | bottom: 15px; 61 | transition: background 0.5s; 62 | } 63 | 64 | @media (max-width: 768px) { 65 | .back-to-top { 66 | bottom: 15px; 67 | } 68 | } 69 | 70 | .back-to-top:focus { 71 | background: rgba(0, 0, 0, 0.2); 72 | color: #fff; 73 | outline: none; 74 | } 75 | 76 | .back-to-top:hover { 77 | background: #2dc997; 78 | color: #fff; 79 | } 80 | 81 | /*-------------------------------------------------------------- 82 | # Header 83 | --------------------------------------------------------------*/ 84 | #header { 85 | padding: 30px 0; 86 | height: 92px; 87 | position: fixed; 88 | left: 0; 89 | top: 0; 90 | right: 0; 91 | transition: all 0.5s; 92 | z-index: 997; 93 | } 94 | 95 | #header #logo { 96 | float: left; 97 | } 98 | 99 | #header #logo h1 { 100 | font-size: 36px; 101 | margin: 0; 102 | padding: 6px 0; 103 | line-height: 1; 104 | font-family: "Tw Cen MT", sans-serif; 105 | font-weight: 700; 106 | letter-spacing: 3px; 107 | text-transform: uppercase; 108 | } 109 | 110 | #header #logo h1 a, #header #logo h1 a:hover { 111 | color: #fff; 112 | } 113 | 114 | #header #logo img { 115 | padding: 0; 116 | margin: 0; 117 | } 118 | 119 | @media (max-width: 768px) { 120 | #header #logo h1 { 121 | font-size: 26px; 122 | } 123 | #header #logo img { 124 | max-height: 40px; 125 | } 126 | } 127 | 128 | #header.header-fixed { 129 | background: rgba(52, 59, 64, 0.9); 130 | padding: 20px 0; 131 | height: 72px; 132 | transition: all 0.5s; 133 | } 134 | 135 | /*-------------------------------------------------------------- 136 | # Hero Section 137 | --------------------------------------------------------------*/ 138 | #hero { 139 | width: 100%; 140 | height: 100vh; 141 | /*background: url({{ url_for('static', filename='/images/hero-bg.jpg') }}) top center; 142 | background: url(static/images/hero-bg.jpg) top center; 143 | background-size: cover; 144 | position: relative;*/ 145 | } 146 | 147 | @media (min-width: 1024px) { 148 | #hero { 149 | background-attachment: fixed; 150 | } 151 | } 152 | 153 | #hero:before { 154 | content: ""; 155 | background: rgba(0, 0, 0, 0.5); 156 | position: absolute; 157 | bottom: 0; 158 | top: 0; 159 | left: 0; 160 | right: 0; 161 | } 162 | 163 | #hero .hero-container { 164 | position: absolute; 165 | bottom: 0; 166 | top: 0; 167 | left: 0; 168 | right: 0; 169 | display: flex; 170 | justify-content: center; 171 | align-items: center; 172 | flex-direction: column; 173 | text-align: center; 174 | } 175 | 176 | #hero h1 { 177 | margin: 30px 0 10px 0; 178 | font-size: 48px; 179 | font-weight: 700; 180 | line-height: 56px; 181 | /*text-transform: uppercase; */ 182 | color: #2dc997; 183 | } 184 | 185 | @media (max-width: 768px) { 186 | #hero h1 { 187 | font-size: 28px; 188 | line-height: 36px; 189 | } 190 | } 191 | 192 | #hero h2 { 193 | color: #2dc997; 194 | margin-bottom: 50px; 195 | font-size: 24px; 196 | } 197 | 198 | @media (max-width: 768px) { 199 | #hero h2 { 200 | font-size: 18px; 201 | line-height: 24px; 202 | margin-bottom: 30px; 203 | } 204 | } 205 | 206 | #hero .btn-get-started { 207 | font-family: "Poppins", sans-serif; 208 | text-transform: uppercase; 209 | font-weight: 500; 210 | font-size: 16px; 211 | letter-spacing: 1px; 212 | display: inline-block; 213 | padding: 8px 28px; 214 | border-radius: 50px; 215 | transition: 0.5s; 216 | margin: 10px; 217 | border: 2px solid #fff; 218 | color: #fff; 219 | } 220 | 221 | #hero .btn-get-started:hover { 222 | background: #2dc997; 223 | border: 2px solid #2dc997; 224 | } 225 | 226 | /*-------------------------------------------------------------- 227 | # Navigation Menu 228 | --------------------------------------------------------------*/ 229 | /* Nav Menu Essentials */ 230 | .nav-menu, .nav-menu * { 231 | margin: 0; 232 | padding: 0; 233 | list-style: none; 234 | } 235 | 236 | .nav-menu ul { 237 | position: absolute; 238 | display: none; 239 | top: 100%; 240 | left: 0; 241 | z-index: 99; 242 | } 243 | 244 | .nav-menu li { 245 | position: relative; 246 | white-space: nowrap; 247 | } 248 | 249 | .nav-menu > li { 250 | float: left; 251 | } 252 | 253 | .nav-menu li:hover > ul, 254 | .nav-menu li.sfHover > ul { 255 | display: block; 256 | } 257 | 258 | .nav-menu ul ul { 259 | top: 0; 260 | left: 100%; 261 | } 262 | 263 | .nav-menu ul li { 264 | min-width: 180px; 265 | } 266 | 267 | /* Nav Menu Arrows */ 268 | .sf-arrows .sf-with-ul { 269 | padding-right: 30px; 270 | } 271 | 272 | .sf-arrows .sf-with-ul:after { 273 | content: "\f107"; 274 | position: absolute; 275 | right: 15px; 276 | font-family: FontAwesome; 277 | font-style: normal; 278 | font-weight: normal; 279 | } 280 | 281 | .sf-arrows ul .sf-with-ul:after { 282 | content: "\f105"; 283 | } 284 | 285 | /* Nav Meu Container */ 286 | #nav-menu-container { 287 | float: right; 288 | margin: 0; 289 | } 290 | 291 | @media (max-width: 768px) { 292 | #nav-menu-container { 293 | display: none; 294 | } 295 | } 296 | 297 | /* Nav Meu Styling */ 298 | .nav-menu a { 299 | padding: 0 8px 10px 8px; 300 | text-decoration: none; 301 | display: inline-block; 302 | color: #fff; 303 | font-family: "Poppins", sans-serif; 304 | font-weight: 400; 305 | text-transform: uppercase; 306 | font-size: 13px; 307 | outline: none; 308 | } 309 | 310 | .nav-menu > li { 311 | margin-left: 10px; 312 | } 313 | 314 | .nav-menu > li > a:before { 315 | content: ""; 316 | position: absolute; 317 | width: 100%; 318 | height: 2px; 319 | bottom: 0; 320 | left: 0; 321 | background-color: #2dc997; 322 | visibility: hidden; 323 | -webkit-transform: scaleX(0); 324 | transform: scaleX(0); 325 | -webkit-transition: all 0.3s ease-in-out 0s; 326 | transition: all 0.3s ease-in-out 0s; 327 | } 328 | 329 | .nav-menu a:hover:before, .nav-menu li:hover > a:before, .nav-menu .menu-active > a:before { 330 | visibility: visible; 331 | -webkit-transform: scaleX(1); 332 | transform: scaleX(1); 333 | } 334 | 335 | .nav-menu ul { 336 | margin: 4px 0 0 0; 337 | border: 1px solid #e7e7e7; 338 | } 339 | 340 | .nav-menu ul li { 341 | background: #fff; 342 | } 343 | 344 | .nav-menu ul li:first-child { 345 | border-top: 0; 346 | } 347 | 348 | .nav-menu ul li a { 349 | padding: 10px; 350 | color: #333; 351 | transition: 0.3s; 352 | display: block; 353 | font-size: 13px; 354 | text-transform: none; 355 | } 356 | 357 | .nav-menu ul li a:hover { 358 | background: #2dc997; 359 | color: #fff; 360 | } 361 | 362 | .nav-menu ul ul { 363 | margin: 0; 364 | } 365 | 366 | /* Mobile Nav Toggle */ 367 | #mobile-nav-toggle { 368 | position: fixed; 369 | right: 0; 370 | top: 0; 371 | z-index: 999; 372 | margin: 20px 20px 0 0; 373 | border: 0; 374 | background: none; 375 | font-size: 24px; 376 | display: none; 377 | transition: all 0.4s; 378 | outline: none; 379 | cursor: pointer; 380 | } 381 | 382 | #mobile-nav-toggle i { 383 | color: #fff; 384 | } 385 | 386 | @media (max-width: 768px) { 387 | #mobile-nav-toggle { 388 | display: inline; 389 | } 390 | } 391 | 392 | /* Mobile Nav Styling */ 393 | #mobile-nav { 394 | position: fixed; 395 | top: 0; 396 | padding-top: 18px; 397 | bottom: 0; 398 | z-index: 998; 399 | background: rgba(52, 59, 64, 0.9); 400 | left: -260px; 401 | width: 260px; 402 | overflow-y: auto; 403 | transition: 0.4s; 404 | } 405 | 406 | #mobile-nav ul { 407 | padding: 0; 408 | margin: 0; 409 | list-style: none; 410 | } 411 | 412 | #mobile-nav ul li { 413 | position: relative; 414 | } 415 | 416 | #mobile-nav ul li a { 417 | color: #fff; 418 | font-size: 16px; 419 | overflow: hidden; 420 | padding: 10px 22px 10px 15px; 421 | position: relative; 422 | text-decoration: none; 423 | width: 100%; 424 | display: block; 425 | outline: none; 426 | } 427 | 428 | #mobile-nav ul li a:hover { 429 | color: #fff; 430 | } 431 | 432 | #mobile-nav ul li li { 433 | padding-left: 30px; 434 | } 435 | 436 | #mobile-nav ul .menu-has-children i { 437 | position: absolute; 438 | right: 0; 439 | z-index: 99; 440 | padding: 15px; 441 | cursor: pointer; 442 | color: #fff; 443 | } 444 | 445 | #mobile-nav ul .menu-has-children i.fa-chevron-up { 446 | color: #2dc997; 447 | } 448 | 449 | #mobile-nav ul .menu-item-active { 450 | color: #2dc997; 451 | } 452 | 453 | #mobile-body-overly { 454 | width: 100%; 455 | height: 100%; 456 | z-index: 997; 457 | top: 0; 458 | left: 0; 459 | position: fixed; 460 | background: rgba(52, 59, 64, 0.9); 461 | display: none; 462 | } 463 | 464 | /* Mobile Nav body classes */ 465 | body.mobile-nav-active { 466 | overflow: hidden; 467 | } 468 | 469 | body.mobile-nav-active #mobile-nav { 470 | left: 0; 471 | } 472 | 473 | body.mobile-nav-active #mobile-nav-toggle { 474 | color: #fff; 475 | } 476 | 477 | /*-------------------------------------------------------------- 478 | # Sections 479 | --------------------------------------------------------------*/ 480 | /* Sections Header 481 | --------------------------------*/ 482 | .section-header .section-title { 483 | font-size: 32px; 484 | color: #111; 485 | text-transform: uppercase; 486 | text-align: center; 487 | font-weight: 700; 488 | margin-bottom: 5px; 489 | } 490 | 491 | .section-header .section-description { 492 | text-align: center; 493 | padding-bottom: 40px; 494 | color: #999; 495 | } 496 | 497 | /* About Us Section 498 | --------------------------------*/ 499 | #about { 500 | background: #fff; 501 | padding: 80px 0; 502 | } 503 | 504 | #about .about-container .background { 505 | min-height: 300px; 506 | background: url(../img/about-img.jpg) center top no-repeat; 507 | margin-bottom: 10px; 508 | } 509 | 510 | #about .about-container .content { 511 | background: #fff; 512 | } 513 | 514 | #about .about-container .title { 515 | color: #333; 516 | font-weight: 700; 517 | font-size: 32px; 518 | } 519 | 520 | @media (max-width: 768px) { 521 | #about .about-container .title { 522 | padding-top: 15px; 523 | } 524 | } 525 | 526 | #about .about-container p { 527 | line-height: 26px; 528 | } 529 | 530 | #about .about-container p:last-child { 531 | margin-bottom: 0; 532 | } 533 | 534 | #about .about-container .icon-box { 535 | background: #fff; 536 | background-size: cover; 537 | padding: 0 0 30px 0; 538 | } 539 | 540 | #about .about-container .icon-box .icon { 541 | float: left; 542 | background: #fff; 543 | width: 64px; 544 | height: 64px; 545 | display: flex; 546 | justify-content: center; 547 | align-items: center; 548 | flex-direction: column; 549 | text-align: center; 550 | border-radius: 50%; 551 | border: 2px solid #2dc997; 552 | } 553 | 554 | #about .about-container .icon-box .icon i { 555 | color: #2dc997; 556 | font-size: 24px; 557 | } 558 | 559 | #about .about-container .icon-box .title { 560 | margin-left: 80px; 561 | font-weight: 500; 562 | margin-bottom: 5px; 563 | font-size: 18px; 564 | text-transform: uppercase; 565 | } 566 | 567 | #about .about-container .icon-box .title a { 568 | color: #111; 569 | } 570 | 571 | #about .about-container .icon-box .description { 572 | margin-left: 80px; 573 | line-height: 24px; 574 | font-size: 14px; 575 | } 576 | 577 | /* Facts Section 578 | --------------------------------*/ 579 | #facts { 580 | background: #f7f7f7; 581 | padding: 80px 0 60px 0; 582 | } 583 | 584 | #facts .counters span { 585 | font-size: 48px; 586 | display: block; 587 | color: #2dc997; 588 | } 589 | 590 | #facts .counters p { 591 | padding: 0; 592 | margin: 0 0 20px 0; 593 | font-family: "Poppins", sans-serif; 594 | font-size: 14px; 595 | } 596 | 597 | /* Services Section 598 | --------------------------------*/ 599 | #services { 600 | background: #fff; 601 | background-size: cover; 602 | padding: 80px 0 60px 0; 603 | } 604 | 605 | #services .box { 606 | padding: 50px 20px; 607 | margin-bottom: 50px; 608 | text-align: center; 609 | border: 1px solid #e6e6e6; 610 | height: 200px; 611 | position: relative; 612 | background: #fafafa; 613 | } 614 | 615 | #services .icon { 616 | position: absolute; 617 | top: -36px; 618 | left: calc(50% - 36px); 619 | transition: 0.2s; 620 | border-radius: 50%; 621 | border: 6px solid #fff; 622 | display: flex; 623 | justify-content: center; 624 | align-items: center; 625 | flex-direction: column; 626 | text-align: center; 627 | width: 72px; 628 | height: 72px; 629 | background: #2dc997; 630 | } 631 | 632 | #services .icon a { 633 | display: inline-block; 634 | } 635 | 636 | #services .icon i { 637 | color: #fff; 638 | font-size: 24px; 639 | } 640 | 641 | #services .box:hover .icon { 642 | background: #fff; 643 | border: 2px solid #2dc997; 644 | } 645 | 646 | #services .box:hover .icon i { 647 | color: #2dc997; 648 | } 649 | 650 | #services .box:hover .icon a { 651 | color: #2dc997; 652 | } 653 | 654 | #services .title { 655 | font-weight: 700; 656 | font-size: 18px; 657 | margin-bottom: 15px; 658 | text-transform: uppercase; 659 | } 660 | 661 | #services .title a { 662 | color: #111; 663 | } 664 | 665 | #services .description { 666 | font-size: 14px; 667 | line-height: 24px; 668 | } 669 | 670 | /* Call To Action Section 671 | --------------------------------*/ 672 | #call-to-action { 673 | background: linear-gradient(rgba(0, 0, 0, 0.6), rgba(0, 0, 0, 0.6)), url({{ url_for ('static', filename='/images/call-to-action-bg.jpg') }}) fixed center center; 674 | background-size: cover; 675 | padding: 80px 0; 676 | } 677 | 678 | #call-to-action .cta-title { 679 | color: #fff; 680 | font-size: 28px; 681 | font-weight: 700; 682 | } 683 | 684 | #call-to-action .cta-text { 685 | color: #fff; 686 | } 687 | 688 | @media (min-width: 769px) { 689 | #call-to-action .cta-btn-container { 690 | display: flex; 691 | align-items: center; 692 | justify-content: flex-end; 693 | } 694 | } 695 | 696 | #call-to-action .cta-btn { 697 | font-family: "Poppins", sans-serif; 698 | text-transform: uppercase; 699 | font-weight: 500; 700 | font-size: 16px; 701 | letter-spacing: 1px; 702 | display: inline-block; 703 | padding: 8px 30px; 704 | border-radius: 50px; 705 | transition: 0.5s; 706 | margin: 10px; 707 | border: 2px solid #fff; 708 | color: #fff; 709 | } 710 | 711 | #call-to-action .cta-btn:hover { 712 | background: #2dc997; 713 | border: 2px solid #2dc997; 714 | } 715 | 716 | /* Portfolio Section 717 | --------------------------------*/ 718 | #portfolio { 719 | background: #f7f7f7; 720 | padding: 80px 0; 721 | } 722 | 723 | #portfolio #portfolio-wrapper { 724 | padding-right: 15px; 725 | } 726 | 727 | #portfolio #portfolio-flters { 728 | padding: 0; 729 | margin: 0 0 45px 0; 730 | list-style: none; 731 | text-align: center; 732 | } 733 | 734 | #portfolio #portfolio-flters li { 735 | cursor: pointer; 736 | margin: 0 10px; 737 | display: inline-block; 738 | padding: 10px 22px; 739 | font-size: 12px; 740 | line-height: 20px; 741 | color: #666666; 742 | border-radius: 4px; 743 | text-transform: uppercase; 744 | background: #fff; 745 | margin-bottom: 5px; 746 | transition: all 0.3s ease-in-out; 747 | } 748 | 749 | #portfolio #portfolio-flters li:hover, #portfolio #portfolio-flters li.filter-active { 750 | background: #2dc997; 751 | color: #fff; 752 | } 753 | 754 | #portfolio .portfolio-item { 755 | position: relative; 756 | height: 200px; 757 | overflow: hidden !important; 758 | margin-bottom: 15px; 759 | transition: all 350ms ease; 760 | transform: scale(1); 761 | } 762 | 763 | #portfolio .portfolio-item a { 764 | display: block; 765 | margin-right: 15px; 766 | } 767 | 768 | #portfolio .portfolio-item img { 769 | position: relative; 770 | top: 0; 771 | transition: all 600ms cubic-bezier(0.645, 0.045, 0.355, 1); 772 | } 773 | 774 | #portfolio .portfolio-item .details { 775 | height: 50px; 776 | background: #2dc997; 777 | position: absolute; 778 | width: 100%; 779 | height: 50px; 780 | bottom: -50px; 781 | transition: all 300ms cubic-bezier(0.645, 0.045, 0.355, 1); 782 | } 783 | 784 | #portfolio .portfolio-item .details h4 { 785 | font-size: 14px; 786 | font-weight: 700; 787 | color: #fff; 788 | padding: 8px 0 2px 8px; 789 | margin: 0; 790 | } 791 | 792 | #portfolio .portfolio-item .details span { 793 | display: block; 794 | color: #fff; 795 | font-size: 13px; 796 | padding-left: 8px; 797 | } 798 | 799 | #portfolio .portfolio-item:hover .details { 800 | bottom: 0; 801 | } 802 | 803 | #portfolio .portfolio-item:hover img { 804 | top: -30px; 805 | } 806 | 807 | /* Team Section 808 | --------------------------------*/ 809 | #team { 810 | background: #fff; 811 | padding: 80px 0 60px 0; 812 | } 813 | 814 | #team .member { 815 | text-align: center; 816 | margin-bottom: 20px; 817 | } 818 | 819 | #team .member .pic { 820 | margin-bottom: 15px; 821 | overflow: hidden; 822 | height: 260px; 823 | } 824 | 825 | #team .member .pic img { 826 | max-width: 100%; 827 | } 828 | 829 | #team .member h4 { 830 | font-weight: 700; 831 | margin-bottom: 2px; 832 | font-size: 18px; 833 | } 834 | 835 | #team .member span { 836 | font-style: italic; 837 | display: block; 838 | font-size: 13px; 839 | } 840 | 841 | #team .member .social { 842 | margin-top: 15px; 843 | } 844 | 845 | #team .member .social a { 846 | color: #b3b3b3; 847 | } 848 | 849 | #team .member .social a:hover { 850 | color: #2dc997; 851 | } 852 | 853 | #team .member .social i { 854 | font-size: 18px; 855 | margin: 0 2px; 856 | } 857 | 858 | /* Contact Section 859 | --------------------------------*/ 860 | #contact { 861 | background: #f7f7f7; 862 | padding: 80px 0 40px 0; 863 | } 864 | 865 | #contact #google-map { 866 | height: 300px; 867 | margin-bottom: 20px; 868 | } 869 | 870 | #contact .info { 871 | color: #333333; 872 | } 873 | 874 | #contact .info i { 875 | font-size: 32px; 876 | color: #2dc997; 877 | float: left; 878 | } 879 | 880 | #contact .info p { 881 | padding: 0 0 10px 50px; 882 | margin-bottom: 20px; 883 | line-height: 22px; 884 | font-size: 14px; 885 | } 886 | 887 | #contact .info .email p { 888 | padding-top: 5px; 889 | } 890 | 891 | #contact .social-links { 892 | padding-bottom: 20px; 893 | } 894 | 895 | #contact .social-links a { 896 | font-size: 18px; 897 | display: inline-block; 898 | background: #333; 899 | color: #fff; 900 | line-height: 1; 901 | padding: 8px 0; 902 | border-radius: 50%; 903 | text-align: center; 904 | width: 36px; 905 | height: 36px; 906 | transition: 0.3s; 907 | } 908 | 909 | #contact .social-links a:hover { 910 | background: #2dc997; 911 | color: #fff; 912 | } 913 | 914 | #contact .form #sendmessage { 915 | color: #2dc997; 916 | border: 1px solid #2dc997; 917 | display: none; 918 | text-align: center; 919 | padding: 15px; 920 | font-weight: 600; 921 | margin-bottom: 15px; 922 | } 923 | 924 | #contact .form #errormessage { 925 | color: red; 926 | display: none; 927 | border: 1px solid red; 928 | text-align: center; 929 | padding: 15px; 930 | font-weight: 600; 931 | margin-bottom: 15px; 932 | } 933 | 934 | #contact .form #sendmessage.show, #contact .form #errormessage.show, #contact .form .show { 935 | display: block; 936 | } 937 | 938 | #contact .form .validation { 939 | color: red; 940 | display: none; 941 | margin: 0 0 20px; 942 | font-weight: 400; 943 | font-size: 13px; 944 | } 945 | 946 | #contact .form input, #contact .form textarea { 947 | border-radius: 0; 948 | box-shadow: none; 949 | font-size: 14px; 950 | } 951 | 952 | #contact .form button[type="submit"] { 953 | background: #2dc997; 954 | border: 0; 955 | padding: 10px 24px; 956 | color: #fff; 957 | transition: 0.4s; 958 | } 959 | 960 | #contact .form button[type="submit"]:hover { 961 | background: #51d8ad; 962 | } 963 | 964 | /*-------------------------------------------------------------- 965 | # Footer 966 | --------------------------------------------------------------*/ 967 | #footer { 968 | background: #343b40; 969 | padding: 30px 0; 970 | color: #fff; 971 | font-size: 14px; 972 | } 973 | 974 | #footer .copyright { 975 | text-align: center; 976 | } 977 | 978 | #footer .credits { 979 | padding-top: 10px; 980 | text-align: center; 981 | font-size: 13px; 982 | color: #ccc; 983 | } 984 | -------------------------------------------------------------------------------- /static/css/bootstrap-reboot.min.css.map: -------------------------------------------------------------------------------- 1 | {"version":3,"sources":["../../scss/bootstrap-reboot.scss","../../scss/_reboot.scss","dist/css/bootstrap-reboot.css","bootstrap-reboot.css","../../scss/mixins/_hover.scss"],"names":[],"mappings":"AAAA;;;;;;ACoBA,ECXA,QADA,SDeE,WAAA,WAGF,KACE,YAAA,WACA,YAAA,KACA,yBAAA,KACA,qBAAA,KACA,mBAAA,UACA,4BAAA,YAKA,cACE,MAAA,aAMJ,QAAA,MAAA,WAAA,OAAA,OAAA,OAAA,OAAA,KAAA,IAAA,QACE,QAAA,MAWF,KACE,OAAA,EACA,YAAA,aAAA,CAAA,kBAAA,CAAA,UAAA,CAAA,MAAA,CAAA,gBAAA,CAAA,KAAA,CAAA,UAAA,CAAA,mBAAA,CAAA,gBAAA,CAAA,iBAAA,CAAA,mBACA,UAAA,KACA,YAAA,IACA,YAAA,IACA,MAAA,QACA,WAAA,KACA,iBAAA,KEvBF,sBFgCE,QAAA,YASF,GACE,WAAA,YACA,OAAA,EACA,SAAA,QAaF,GAAA,GAAA,GAAA,GAAA,GAAA,GACE,WAAA,EACA,cAAA,MAQF,EACE,WAAA,EACA,cAAA,KChDF,0BD0DA,YAEE,gBAAA,UACA,wBAAA,UAAA,OAAA,gBAAA,UAAA,OACA,OAAA,KACA,cAAA,EAGF,QACE,cAAA,KACA,WAAA,OACA,YAAA,QCrDF,GDwDA,GCzDA,GD4DE,WAAA,EACA,cAAA,KAGF,MCxDA,MACA,MAFA,MD6DE,cAAA,EAGF,GACE,YAAA,IAGF,GACE,cAAA,MACA,YAAA,EAGF,WACE,OAAA,EAAA,EAAA,KAGF,IACE,WAAA,OAIF,EC1DA,OD4DE,YAAA,OAIF,MACE,UAAA,IAQF,IChEA,IDkEE,SAAA,SACA,UAAA,IACA,YAAA,EACA,eAAA,SAGF,IAAM,OAAA,OACN,IAAM,IAAA,MAON,EACE,MAAA,QACA,gBAAA,KACA,iBAAA,YACA,6BAAA,QG7LA,QHgME,MAAA,QACA,gBAAA,UAUJ,8BACE,MAAA,QACA,gBAAA,KGzMA,oCAAA,oCH4ME,MAAA,QACA,gBAAA,KANJ,oCAUI,QAAA,EClEJ,KACA,ID0EA,ICzEA,KD6EE,YAAA,cAAA,CAAA,KAAA,CAAA,MAAA,CAAA,QAAA,CAAA,iBAAA,CAAA,aAAA,CAAA,UACA,UAAA,IAGF,IAEE,WAAA,EAEA,cAAA,KAEA,SAAA,KAGA,mBAAA,UAQF,OAEE,OAAA,EAAA,EAAA,KAQF,IACE,eAAA,OACA,aAAA,KAGF,IAGE,SAAA,OACA,eAAA,OAQF,MACE,gBAAA,SAGF,QACE,YAAA,OACA,eAAA,OACA,MAAA,QACA,WAAA,KACA,aAAA,OAGF,GAGE,WAAA,QAQF,MAEE,QAAA,aACA,cAAA,MAMF,OACE,cAAA,EAOF,aACE,QAAA,IAAA,OACA,QAAA,IAAA,KAAA,yBC9GF,ODiHA,MC/GA,SADA,OAEA,SDmHE,OAAA,EACA,YAAA,QACA,UAAA,QACA,YAAA,QAGF,OCjHA,MDmHE,SAAA,QAGF,OCjHA,ODmHE,eAAA,KC7GF,aACA,cDkHA,OCpHA,mBDwHE,mBAAA,OCjHF,gCACA,+BACA,gCDmHA,yBAIE,QAAA,EACA,aAAA,KClHF,qBDqHA,kBAEE,WAAA,WACA,QAAA,EAIF,iBCrHA,2BACA,kBAFA,iBD+HE,mBAAA,QAGF,SACE,SAAA,KAEA,OAAA,SAGF,SAME,UAAA,EAEA,QAAA,EACA,OAAA,EACA,OAAA,EAKF,OACE,QAAA,MACA,MAAA,KACA,UAAA,KACA,QAAA,EACA,cAAA,MACA,UAAA,OACA,YAAA,QACA,MAAA,QACA,YAAA,OAGF,SACE,eAAA,SEnIF,yCDEA,yCDuIE,OAAA,KEpIF,cF4IE,eAAA,KACA,mBAAA,KExIF,4CDEA,yCD+IE,mBAAA,KAQF,6BACE,KAAA,QACA,mBAAA,OAOF,OACE,QAAA,aAGF,QACE,QAAA,UACA,OAAA,QAGF,SACE,QAAA,KErJF,SF2JE,QAAA","sourcesContent":["/*!\n * Bootstrap Reboot v4.1.3 (https://getbootstrap.com/)\n * Copyright 2011-2018 The Bootstrap Authors\n * Copyright 2011-2018 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)\n */\n\n@import \"functions\";\n@import \"variables\";\n@import \"mixins\";\n@import \"reboot\";\n","// stylelint-disable at-rule-no-vendor-prefix, declaration-no-important, selector-no-qualifying-type, property-no-vendor-prefix\n\n// Reboot\n//\n// Normalization of HTML elements, manually forked from Normalize.css to remove\n// styles targeting irrelevant browsers while applying new styles.\n//\n// Normalize is licensed MIT. https://github.com/necolas/normalize.css\n\n\n// Document\n//\n// 1. Change from `box-sizing: content-box` so that `width` is not affected by `padding` or `border`.\n// 2. Change the default font family in all browsers.\n// 3. Correct the line height in all browsers.\n// 4. Prevent adjustments of font size after orientation changes in IE on Windows Phone and in iOS.\n// 5. Setting @viewport causes scrollbars to overlap content in IE11 and Edge, so\n// we force a non-overlapping, non-auto-hiding scrollbar to counteract.\n// 6. Change the default tap highlight to be completely transparent in iOS.\n\n*,\n*::before,\n*::after {\n box-sizing: border-box; // 1\n}\n\nhtml {\n font-family: sans-serif; // 2\n line-height: 1.15; // 3\n -webkit-text-size-adjust: 100%; // 4\n -ms-text-size-adjust: 100%; // 4\n -ms-overflow-style: scrollbar; // 5\n -webkit-tap-highlight-color: rgba($black, 0); // 6\n}\n\n// IE10+ doesn't honor `` in some cases.\n@at-root {\n @-ms-viewport {\n width: device-width;\n }\n}\n\n// stylelint-disable selector-list-comma-newline-after\n// Shim for \"new\" HTML5 structural elements to display correctly (IE10, older browsers)\narticle, aside, figcaption, figure, footer, header, hgroup, main, nav, section {\n display: block;\n}\n// stylelint-enable selector-list-comma-newline-after\n\n// Body\n//\n// 1. Remove the margin in all browsers.\n// 2. As a best practice, apply a default `background-color`.\n// 3. Set an explicit initial text-align value so that we can later use the\n// the `inherit` value on things like `` elements.\n\nbody {\n margin: 0; // 1\n font-family: $font-family-base;\n font-size: $font-size-base;\n font-weight: $font-weight-base;\n line-height: $line-height-base;\n color: $body-color;\n text-align: left; // 3\n background-color: $body-bg; // 2\n}\n\n// Suppress the focus outline on elements that cannot be accessed via keyboard.\n// This prevents an unwanted focus outline from appearing around elements that\n// might still respond to pointer events.\n//\n// Credit: https://github.com/suitcss/base\n[tabindex=\"-1\"]:focus {\n outline: 0 !important;\n}\n\n\n// Content grouping\n//\n// 1. Add the correct box sizing in Firefox.\n// 2. Show the overflow in Edge and IE.\n\nhr {\n box-sizing: content-box; // 1\n height: 0; // 1\n overflow: visible; // 2\n}\n\n\n//\n// Typography\n//\n\n// Remove top margins from headings\n//\n// By default, `

`-`

` all receive top and bottom margins. We nuke the top\n// margin for easier control within type scales as it avoids margin collapsing.\n// stylelint-disable selector-list-comma-newline-after\nh1, h2, h3, h4, h5, h6 {\n margin-top: 0;\n margin-bottom: $headings-margin-bottom;\n}\n// stylelint-enable selector-list-comma-newline-after\n\n// Reset margins on paragraphs\n//\n// Similarly, the top margin on `

`s get reset. However, we also reset the\n// bottom margin to use `rem` units instead of `em`.\np {\n margin-top: 0;\n margin-bottom: $paragraph-margin-bottom;\n}\n\n// Abbreviations\n//\n// 1. Remove the bottom border in Firefox 39-.\n// 2. Add the correct text decoration in Chrome, Edge, IE, Opera, and Safari.\n// 3. Add explicit cursor to indicate changed behavior.\n// 4. Duplicate behavior to the data-* attribute for our tooltip plugin\n\nabbr[title],\nabbr[data-original-title] { // 4\n text-decoration: underline; // 2\n text-decoration: underline dotted; // 2\n cursor: help; // 3\n border-bottom: 0; // 1\n}\n\naddress {\n margin-bottom: 1rem;\n font-style: normal;\n line-height: inherit;\n}\n\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\n\ndt {\n font-weight: $dt-font-weight;\n}\n\ndd {\n margin-bottom: .5rem;\n margin-left: 0; // Undo browser default\n}\n\nblockquote {\n margin: 0 0 1rem;\n}\n\ndfn {\n font-style: italic; // Add the correct font style in Android 4.3-\n}\n\n// stylelint-disable font-weight-notation\nb,\nstrong {\n font-weight: bolder; // Add the correct font weight in Chrome, Edge, and Safari\n}\n// stylelint-enable font-weight-notation\n\nsmall {\n font-size: 80%; // Add the correct font size in all browsers\n}\n\n//\n// Prevent `sub` and `sup` elements from affecting the line height in\n// all browsers.\n//\n\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\n\nsub { bottom: -.25em; }\nsup { top: -.5em; }\n\n\n//\n// Links\n//\n\na {\n color: $link-color;\n text-decoration: $link-decoration;\n background-color: transparent; // Remove the gray background on active links in IE 10.\n -webkit-text-decoration-skip: objects; // Remove gaps in links underline in iOS 8+ and Safari 8+.\n\n @include hover {\n color: $link-hover-color;\n text-decoration: $link-hover-decoration;\n }\n}\n\n// And undo these styles for placeholder links/named anchors (without href)\n// which have not been made explicitly keyboard-focusable (without tabindex).\n// It would be more straightforward to just use a[href] in previous block, but that\n// causes specificity issues in many other styles that are too complex to fix.\n// See https://github.com/twbs/bootstrap/issues/19402\n\na:not([href]):not([tabindex]) {\n color: inherit;\n text-decoration: none;\n\n @include hover-focus {\n color: inherit;\n text-decoration: none;\n }\n\n &:focus {\n outline: 0;\n }\n}\n\n\n//\n// Code\n//\n\npre,\ncode,\nkbd,\nsamp {\n font-family: $font-family-monospace;\n font-size: 1em; // Correct the odd `em` font sizing in all browsers.\n}\n\npre {\n // Remove browser default top margin\n margin-top: 0;\n // Reset browser default of `1em` to use `rem`s\n margin-bottom: 1rem;\n // Don't allow content to break outside\n overflow: auto;\n // We have @viewport set which causes scrollbars to overlap content in IE11 and Edge, so\n // we force a non-overlapping, non-auto-hiding scrollbar to counteract.\n -ms-overflow-style: scrollbar;\n}\n\n\n//\n// Figures\n//\n\nfigure {\n // Apply a consistent margin strategy (matches our type styles).\n margin: 0 0 1rem;\n}\n\n\n//\n// Images and content\n//\n\nimg {\n vertical-align: middle;\n border-style: none; // Remove the border on images inside links in IE 10-.\n}\n\nsvg {\n // Workaround for the SVG overflow bug in IE10/11 is still required.\n // See https://github.com/twbs/bootstrap/issues/26878\n overflow: hidden;\n vertical-align: middle;\n}\n\n\n//\n// Tables\n//\n\ntable {\n border-collapse: collapse; // Prevent double borders\n}\n\ncaption {\n padding-top: $table-cell-padding;\n padding-bottom: $table-cell-padding;\n color: $table-caption-color;\n text-align: left;\n caption-side: bottom;\n}\n\nth {\n // Matches default `` alignment by inheriting from the ``, or the\n // closest parent with a set `text-align`.\n text-align: inherit;\n}\n\n\n//\n// Forms\n//\n\nlabel {\n // Allow labels to use `margin` for spacing.\n display: inline-block;\n margin-bottom: $label-margin-bottom;\n}\n\n// Remove the default `border-radius` that macOS Chrome adds.\n//\n// Details at https://github.com/twbs/bootstrap/issues/24093\nbutton {\n border-radius: 0;\n}\n\n// Work around a Firefox/IE bug where the transparent `button` background\n// results in a loss of the default `button` focus styles.\n//\n// Credit: https://github.com/suitcss/base/\nbutton:focus {\n outline: 1px dotted;\n outline: 5px auto -webkit-focus-ring-color;\n}\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0; // Remove the margin in Firefox and Safari\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\n\nbutton,\ninput {\n overflow: visible; // Show the overflow in Edge\n}\n\nbutton,\nselect {\n text-transform: none; // Remove the inheritance of text transform in Firefox\n}\n\n// 1. Prevent a WebKit bug where (2) destroys native `audio` and `video`\n// controls in Android 4.\n// 2. Correct the inability to style clickable types in iOS and Safari.\nbutton,\nhtml [type=\"button\"], // 1\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button; // 2\n}\n\n// Remove inner border and padding from Firefox, but don't restore the outline like Normalize.\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n padding: 0;\n border-style: none;\n}\n\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n box-sizing: border-box; // 1. Add the correct box sizing in IE 10-\n padding: 0; // 2. Remove the padding in IE 10-\n}\n\n\ninput[type=\"date\"],\ninput[type=\"time\"],\ninput[type=\"datetime-local\"],\ninput[type=\"month\"] {\n // Remove the default appearance of temporal inputs to avoid a Mobile Safari\n // bug where setting a custom line-height prevents text from being vertically\n // centered within the input.\n // See https://bugs.webkit.org/show_bug.cgi?id=139848\n // and https://github.com/twbs/bootstrap/issues/11266\n -webkit-appearance: listbox;\n}\n\ntextarea {\n overflow: auto; // Remove the default vertical scrollbar in IE.\n // Textareas should really only resize vertically so they don't break their (horizontal) containers.\n resize: vertical;\n}\n\nfieldset {\n // Browsers set a default `min-width: min-content;` on fieldsets,\n // unlike e.g. `

`s, which have `min-width: 0;` by default.\n // So we reset that to ensure fieldsets behave more like a standard block element.\n // See https://github.com/twbs/bootstrap/issues/12359\n // and https://html.spec.whatwg.org/multipage/#the-fieldset-and-legend-elements\n min-width: 0;\n // Reset the default outline behavior of fieldsets so they don't affect page layout.\n padding: 0;\n margin: 0;\n border: 0;\n}\n\n// 1. Correct the text wrapping in Edge and IE.\n// 2. Correct the color inheritance from `fieldset` elements in IE.\nlegend {\n display: block;\n width: 100%;\n max-width: 100%; // 1\n padding: 0;\n margin-bottom: .5rem;\n font-size: 1.5rem;\n line-height: inherit;\n color: inherit; // 2\n white-space: normal; // 1\n}\n\nprogress {\n vertical-align: baseline; // Add the correct vertical alignment in Chrome, Firefox, and Opera.\n}\n\n// Correct the cursor style of increment and decrement buttons in Chrome.\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n[type=\"search\"] {\n // This overrides the extra rounded corners on search inputs in iOS so that our\n // `.form-control` class can properly style them. Note that this cannot simply\n // be added to `.form-control` as it's not specific enough. For details, see\n // https://github.com/twbs/bootstrap/issues/11586.\n outline-offset: -2px; // 2. Correct the outline style in Safari.\n -webkit-appearance: none;\n}\n\n//\n// Remove the inner padding and cancel buttons in Chrome and Safari on macOS.\n//\n\n[type=\"search\"]::-webkit-search-cancel-button,\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n//\n// 1. Correct the inability to style clickable types in iOS and Safari.\n// 2. Change font properties to `inherit` in Safari.\n//\n\n::-webkit-file-upload-button {\n font: inherit; // 2\n -webkit-appearance: button; // 1\n}\n\n//\n// Correct element displays\n//\n\noutput {\n display: inline-block;\n}\n\nsummary {\n display: list-item; // Add the correct display in all browsers\n cursor: pointer;\n}\n\ntemplate {\n display: none; // Add the correct display in IE\n}\n\n// Always hide an element with the `hidden` HTML attribute (from PureCSS).\n// Needed for proper display in IE 10-.\n[hidden] {\n display: none !important;\n}\n","/*!\n * Bootstrap Reboot v4.1.3 (https://getbootstrap.com/)\n * Copyright 2011-2018 The Bootstrap Authors\n * Copyright 2011-2018 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)\n */\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\nhtml {\n font-family: sans-serif;\n line-height: 1.15;\n -webkit-text-size-adjust: 100%;\n -ms-text-size-adjust: 100%;\n -ms-overflow-style: scrollbar;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\n@-ms-viewport {\n width: device-width;\n}\n\narticle, aside, figcaption, figure, footer, header, hgroup, main, nav, section {\n display: block;\n}\n\nbody {\n margin: 0;\n font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\";\n font-size: 1rem;\n font-weight: 400;\n line-height: 1.5;\n color: #212529;\n text-align: left;\n background-color: #fff;\n}\n\n[tabindex=\"-1\"]:focus {\n outline: 0 !important;\n}\n\nhr {\n box-sizing: content-box;\n height: 0;\n overflow: visible;\n}\n\nh1, h2, h3, h4, h5, h6 {\n margin-top: 0;\n margin-bottom: 0.5rem;\n}\n\np {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nabbr[title],\nabbr[data-original-title] {\n text-decoration: underline;\n -webkit-text-decoration: underline dotted;\n text-decoration: underline dotted;\n cursor: help;\n border-bottom: 0;\n}\n\naddress {\n margin-bottom: 1rem;\n font-style: normal;\n line-height: inherit;\n}\n\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\n\ndt {\n font-weight: 700;\n}\n\ndd {\n margin-bottom: .5rem;\n margin-left: 0;\n}\n\nblockquote {\n margin: 0 0 1rem;\n}\n\ndfn {\n font-style: italic;\n}\n\nb,\nstrong {\n font-weight: bolder;\n}\n\nsmall {\n font-size: 80%;\n}\n\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -.25em;\n}\n\nsup {\n top: -.5em;\n}\n\na {\n color: #007bff;\n text-decoration: none;\n background-color: transparent;\n -webkit-text-decoration-skip: objects;\n}\n\na:hover {\n color: #0056b3;\n text-decoration: underline;\n}\n\na:not([href]):not([tabindex]) {\n color: inherit;\n text-decoration: none;\n}\n\na:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus {\n color: inherit;\n text-decoration: none;\n}\n\na:not([href]):not([tabindex]):focus {\n outline: 0;\n}\n\npre,\ncode,\nkbd,\nsamp {\n font-family: SFMono-Regular, Menlo, Monaco, Consolas, \"Liberation Mono\", \"Courier New\", monospace;\n font-size: 1em;\n}\n\npre {\n margin-top: 0;\n margin-bottom: 1rem;\n overflow: auto;\n -ms-overflow-style: scrollbar;\n}\n\nfigure {\n margin: 0 0 1rem;\n}\n\nimg {\n vertical-align: middle;\n border-style: none;\n}\n\nsvg {\n overflow: hidden;\n vertical-align: middle;\n}\n\ntable {\n border-collapse: collapse;\n}\n\ncaption {\n padding-top: 0.75rem;\n padding-bottom: 0.75rem;\n color: #6c757d;\n text-align: left;\n caption-side: bottom;\n}\n\nth {\n text-align: inherit;\n}\n\nlabel {\n display: inline-block;\n margin-bottom: 0.5rem;\n}\n\nbutton {\n border-radius: 0;\n}\n\nbutton:focus {\n outline: 1px dotted;\n outline: 5px auto -webkit-focus-ring-color;\n}\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0;\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\n\nbutton,\ninput {\n overflow: visible;\n}\n\nbutton,\nselect {\n text-transform: none;\n}\n\nbutton,\nhtml [type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n padding: 0;\n border-style: none;\n}\n\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n box-sizing: border-box;\n padding: 0;\n}\n\ninput[type=\"date\"],\ninput[type=\"time\"],\ninput[type=\"datetime-local\"],\ninput[type=\"month\"] {\n -webkit-appearance: listbox;\n}\n\ntextarea {\n overflow: auto;\n resize: vertical;\n}\n\nfieldset {\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\n\nlegend {\n display: block;\n width: 100%;\n max-width: 100%;\n padding: 0;\n margin-bottom: .5rem;\n font-size: 1.5rem;\n line-height: inherit;\n color: inherit;\n white-space: normal;\n}\n\nprogress {\n vertical-align: baseline;\n}\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n[type=\"search\"] {\n outline-offset: -2px;\n -webkit-appearance: none;\n}\n\n[type=\"search\"]::-webkit-search-cancel-button,\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n::-webkit-file-upload-button {\n font: inherit;\n -webkit-appearance: button;\n}\n\noutput {\n display: inline-block;\n}\n\nsummary {\n display: list-item;\n cursor: pointer;\n}\n\ntemplate {\n display: none;\n}\n\n[hidden] {\n display: none !important;\n}\n/*# sourceMappingURL=bootstrap-reboot.css.map */","/*!\n * Bootstrap Reboot v4.1.3 (https://getbootstrap.com/)\n * Copyright 2011-2018 The Bootstrap Authors\n * Copyright 2011-2018 Twitter, Inc.\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)\n * Forked from Normalize.css, licensed MIT (https://github.com/necolas/normalize.css/blob/master/LICENSE.md)\n */\n*,\n*::before,\n*::after {\n box-sizing: border-box;\n}\n\nhtml {\n font-family: sans-serif;\n line-height: 1.15;\n -webkit-text-size-adjust: 100%;\n -ms-text-size-adjust: 100%;\n -ms-overflow-style: scrollbar;\n -webkit-tap-highlight-color: rgba(0, 0, 0, 0);\n}\n\n@-ms-viewport {\n width: device-width;\n}\n\narticle, aside, figcaption, figure, footer, header, hgroup, main, nav, section {\n display: block;\n}\n\nbody {\n margin: 0;\n font-family: -apple-system, BlinkMacSystemFont, \"Segoe UI\", Roboto, \"Helvetica Neue\", Arial, sans-serif, \"Apple Color Emoji\", \"Segoe UI Emoji\", \"Segoe UI Symbol\", \"Noto Color Emoji\";\n font-size: 1rem;\n font-weight: 400;\n line-height: 1.5;\n color: #212529;\n text-align: left;\n background-color: #fff;\n}\n\n[tabindex=\"-1\"]:focus {\n outline: 0 !important;\n}\n\nhr {\n box-sizing: content-box;\n height: 0;\n overflow: visible;\n}\n\nh1, h2, h3, h4, h5, h6 {\n margin-top: 0;\n margin-bottom: 0.5rem;\n}\n\np {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nabbr[title],\nabbr[data-original-title] {\n text-decoration: underline;\n text-decoration: underline dotted;\n cursor: help;\n border-bottom: 0;\n}\n\naddress {\n margin-bottom: 1rem;\n font-style: normal;\n line-height: inherit;\n}\n\nol,\nul,\ndl {\n margin-top: 0;\n margin-bottom: 1rem;\n}\n\nol ol,\nul ul,\nol ul,\nul ol {\n margin-bottom: 0;\n}\n\ndt {\n font-weight: 700;\n}\n\ndd {\n margin-bottom: .5rem;\n margin-left: 0;\n}\n\nblockquote {\n margin: 0 0 1rem;\n}\n\ndfn {\n font-style: italic;\n}\n\nb,\nstrong {\n font-weight: bolder;\n}\n\nsmall {\n font-size: 80%;\n}\n\nsub,\nsup {\n position: relative;\n font-size: 75%;\n line-height: 0;\n vertical-align: baseline;\n}\n\nsub {\n bottom: -.25em;\n}\n\nsup {\n top: -.5em;\n}\n\na {\n color: #007bff;\n text-decoration: none;\n background-color: transparent;\n -webkit-text-decoration-skip: objects;\n}\n\na:hover {\n color: #0056b3;\n text-decoration: underline;\n}\n\na:not([href]):not([tabindex]) {\n color: inherit;\n text-decoration: none;\n}\n\na:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus {\n color: inherit;\n text-decoration: none;\n}\n\na:not([href]):not([tabindex]):focus {\n outline: 0;\n}\n\npre,\ncode,\nkbd,\nsamp {\n font-family: SFMono-Regular, Menlo, Monaco, Consolas, \"Liberation Mono\", \"Courier New\", monospace;\n font-size: 1em;\n}\n\npre {\n margin-top: 0;\n margin-bottom: 1rem;\n overflow: auto;\n -ms-overflow-style: scrollbar;\n}\n\nfigure {\n margin: 0 0 1rem;\n}\n\nimg {\n vertical-align: middle;\n border-style: none;\n}\n\nsvg {\n overflow: hidden;\n vertical-align: middle;\n}\n\ntable {\n border-collapse: collapse;\n}\n\ncaption {\n padding-top: 0.75rem;\n padding-bottom: 0.75rem;\n color: #6c757d;\n text-align: left;\n caption-side: bottom;\n}\n\nth {\n text-align: inherit;\n}\n\nlabel {\n display: inline-block;\n margin-bottom: 0.5rem;\n}\n\nbutton {\n border-radius: 0;\n}\n\nbutton:focus {\n outline: 1px dotted;\n outline: 5px auto -webkit-focus-ring-color;\n}\n\ninput,\nbutton,\nselect,\noptgroup,\ntextarea {\n margin: 0;\n font-family: inherit;\n font-size: inherit;\n line-height: inherit;\n}\n\nbutton,\ninput {\n overflow: visible;\n}\n\nbutton,\nselect {\n text-transform: none;\n}\n\nbutton,\nhtml [type=\"button\"],\n[type=\"reset\"],\n[type=\"submit\"] {\n -webkit-appearance: button;\n}\n\nbutton::-moz-focus-inner,\n[type=\"button\"]::-moz-focus-inner,\n[type=\"reset\"]::-moz-focus-inner,\n[type=\"submit\"]::-moz-focus-inner {\n padding: 0;\n border-style: none;\n}\n\ninput[type=\"radio\"],\ninput[type=\"checkbox\"] {\n box-sizing: border-box;\n padding: 0;\n}\n\ninput[type=\"date\"],\ninput[type=\"time\"],\ninput[type=\"datetime-local\"],\ninput[type=\"month\"] {\n -webkit-appearance: listbox;\n}\n\ntextarea {\n overflow: auto;\n resize: vertical;\n}\n\nfieldset {\n min-width: 0;\n padding: 0;\n margin: 0;\n border: 0;\n}\n\nlegend {\n display: block;\n width: 100%;\n max-width: 100%;\n padding: 0;\n margin-bottom: .5rem;\n font-size: 1.5rem;\n line-height: inherit;\n color: inherit;\n white-space: normal;\n}\n\nprogress {\n vertical-align: baseline;\n}\n\n[type=\"number\"]::-webkit-inner-spin-button,\n[type=\"number\"]::-webkit-outer-spin-button {\n height: auto;\n}\n\n[type=\"search\"] {\n outline-offset: -2px;\n -webkit-appearance: none;\n}\n\n[type=\"search\"]::-webkit-search-cancel-button,\n[type=\"search\"]::-webkit-search-decoration {\n -webkit-appearance: none;\n}\n\n::-webkit-file-upload-button {\n font: inherit;\n -webkit-appearance: button;\n}\n\noutput {\n display: inline-block;\n}\n\nsummary {\n display: list-item;\n cursor: pointer;\n}\n\ntemplate {\n display: none;\n}\n\n[hidden] {\n display: none !important;\n}\n\n/*# sourceMappingURL=bootstrap-reboot.css.map */","// Hover mixin and `$enable-hover-media-query` are deprecated.\n//\n// Originally added during our alphas and maintained during betas, this mixin was\n// designed to prevent `:hover` stickiness on iOS-an issue where hover styles\n// would persist after initial touch.\n//\n// For backward compatibility, we've kept these mixins and updated them to\n// always return their regular pseudo-classes instead of a shimmed media query.\n//\n// Issue: https://github.com/twbs/bootstrap/issues/25195\n\n@mixin hover {\n &:hover { @content; }\n}\n\n@mixin hover-focus {\n &:hover,\n &:focus {\n @content;\n }\n}\n\n@mixin plain-hover-focus {\n &,\n &:hover,\n &:focus {\n @content;\n }\n}\n\n@mixin hover-focus-active {\n &:hover,\n &:focus,\n &:active {\n @content;\n }\n}\n"]} -------------------------------------------------------------------------------- /static/css/bootstrap-grid.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Grid v4.1.3 (https://getbootstrap.com/) 3 | * Copyright 2011-2018 The Bootstrap Authors 4 | * Copyright 2011-2018 Twitter, Inc. 5 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) 6 | */@-ms-viewport{width:device-width}html{box-sizing:border-box;-ms-overflow-style:scrollbar}*,::after,::before{box-sizing:inherit}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:576px){.container{max-width:540px}}@media (min-width:768px){.container{max-width:720px}}@media (min-width:992px){.container{max-width:960px}}@media (min-width:1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*=col-]{padding-right:0;padding-left:0}.col,.col-1,.col-10,.col-11,.col-12,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-auto,.col-lg,.col-lg-1,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-auto,.col-md,.col-md-1,.col-md-10,.col-md-11,.col-md-12,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-auto,.col-sm,.col-sm-1,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-auto,.col-xl,.col-xl-1,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-auto{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}.col{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-first{-ms-flex-order:-1;order:-1}.order-last{-ms-flex-order:13;order:13}.order-0{-ms-flex-order:0;order:0}.order-1{-ms-flex-order:1;order:1}.order-2{-ms-flex-order:2;order:2}.order-3{-ms-flex-order:3;order:3}.order-4{-ms-flex-order:4;order:4}.order-5{-ms-flex-order:5;order:5}.order-6{-ms-flex-order:6;order:6}.order-7{-ms-flex-order:7;order:7}.order-8{-ms-flex-order:8;order:8}.order-9{-ms-flex-order:9;order:9}.order-10{-ms-flex-order:10;order:10}.order-11{-ms-flex-order:11;order:11}.order-12{-ms-flex-order:12;order:12}.offset-1{margin-left:8.333333%}.offset-2{margin-left:16.666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.333333%}.offset-5{margin-left:41.666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.333333%}.offset-8{margin-left:66.666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.333333%}.offset-11{margin-left:91.666667%}@media (min-width:576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-sm-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-sm-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-sm-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-sm-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-sm-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-sm-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-sm-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-sm-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-sm-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-sm-first{-ms-flex-order:-1;order:-1}.order-sm-last{-ms-flex-order:13;order:13}.order-sm-0{-ms-flex-order:0;order:0}.order-sm-1{-ms-flex-order:1;order:1}.order-sm-2{-ms-flex-order:2;order:2}.order-sm-3{-ms-flex-order:3;order:3}.order-sm-4{-ms-flex-order:4;order:4}.order-sm-5{-ms-flex-order:5;order:5}.order-sm-6{-ms-flex-order:6;order:6}.order-sm-7{-ms-flex-order:7;order:7}.order-sm-8{-ms-flex-order:8;order:8}.order-sm-9{-ms-flex-order:9;order:9}.order-sm-10{-ms-flex-order:10;order:10}.order-sm-11{-ms-flex-order:11;order:11}.order-sm-12{-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.333333%}.offset-sm-2{margin-left:16.666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.333333%}.offset-sm-5{margin-left:41.666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.333333%}.offset-sm-8{margin-left:66.666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.333333%}.offset-sm-11{margin-left:91.666667%}}@media (min-width:768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-md-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-md-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-md-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-md-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-md-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-md-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-md-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-md-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-md-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-md-first{-ms-flex-order:-1;order:-1}.order-md-last{-ms-flex-order:13;order:13}.order-md-0{-ms-flex-order:0;order:0}.order-md-1{-ms-flex-order:1;order:1}.order-md-2{-ms-flex-order:2;order:2}.order-md-3{-ms-flex-order:3;order:3}.order-md-4{-ms-flex-order:4;order:4}.order-md-5{-ms-flex-order:5;order:5}.order-md-6{-ms-flex-order:6;order:6}.order-md-7{-ms-flex-order:7;order:7}.order-md-8{-ms-flex-order:8;order:8}.order-md-9{-ms-flex-order:9;order:9}.order-md-10{-ms-flex-order:10;order:10}.order-md-11{-ms-flex-order:11;order:11}.order-md-12{-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.333333%}.offset-md-2{margin-left:16.666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.333333%}.offset-md-5{margin-left:41.666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.333333%}.offset-md-8{margin-left:66.666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.333333%}.offset-md-11{margin-left:91.666667%}}@media (min-width:992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-lg-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-lg-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-lg-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-lg-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-lg-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-lg-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-lg-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-lg-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-lg-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-lg-first{-ms-flex-order:-1;order:-1}.order-lg-last{-ms-flex-order:13;order:13}.order-lg-0{-ms-flex-order:0;order:0}.order-lg-1{-ms-flex-order:1;order:1}.order-lg-2{-ms-flex-order:2;order:2}.order-lg-3{-ms-flex-order:3;order:3}.order-lg-4{-ms-flex-order:4;order:4}.order-lg-5{-ms-flex-order:5;order:5}.order-lg-6{-ms-flex-order:6;order:6}.order-lg-7{-ms-flex-order:7;order:7}.order-lg-8{-ms-flex-order:8;order:8}.order-lg-9{-ms-flex-order:9;order:9}.order-lg-10{-ms-flex-order:10;order:10}.order-lg-11{-ms-flex-order:11;order:11}.order-lg-12{-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.333333%}.offset-lg-2{margin-left:16.666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.333333%}.offset-lg-5{margin-left:41.666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.333333%}.offset-lg-8{margin-left:66.666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.333333%}.offset-lg-11{margin-left:91.666667%}}@media (min-width:1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-xl-1{-ms-flex:0 0 8.333333%;flex:0 0 8.333333%;max-width:8.333333%}.col-xl-2{-ms-flex:0 0 16.666667%;flex:0 0 16.666667%;max-width:16.666667%}.col-xl-3{-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-ms-flex:0 0 33.333333%;flex:0 0 33.333333%;max-width:33.333333%}.col-xl-5{-ms-flex:0 0 41.666667%;flex:0 0 41.666667%;max-width:41.666667%}.col-xl-6{-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-ms-flex:0 0 58.333333%;flex:0 0 58.333333%;max-width:58.333333%}.col-xl-8{-ms-flex:0 0 66.666667%;flex:0 0 66.666667%;max-width:66.666667%}.col-xl-9{-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-ms-flex:0 0 83.333333%;flex:0 0 83.333333%;max-width:83.333333%}.col-xl-11{-ms-flex:0 0 91.666667%;flex:0 0 91.666667%;max-width:91.666667%}.col-xl-12{-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-xl-first{-ms-flex-order:-1;order:-1}.order-xl-last{-ms-flex-order:13;order:13}.order-xl-0{-ms-flex-order:0;order:0}.order-xl-1{-ms-flex-order:1;order:1}.order-xl-2{-ms-flex-order:2;order:2}.order-xl-3{-ms-flex-order:3;order:3}.order-xl-4{-ms-flex-order:4;order:4}.order-xl-5{-ms-flex-order:5;order:5}.order-xl-6{-ms-flex-order:6;order:6}.order-xl-7{-ms-flex-order:7;order:7}.order-xl-8{-ms-flex-order:8;order:8}.order-xl-9{-ms-flex-order:9;order:9}.order-xl-10{-ms-flex-order:10;order:10}.order-xl-11{-ms-flex-order:11;order:11}.order-xl-12{-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.333333%}.offset-xl-2{margin-left:16.666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.333333%}.offset-xl-5{margin-left:41.666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.333333%}.offset-xl-8{margin-left:66.666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.333333%}.offset-xl-11{margin-left:91.666667%}}.d-none{display:none!important}.d-inline{display:inline!important}.d-inline-block{display:inline-block!important}.d-block{display:block!important}.d-table{display:table!important}.d-table-row{display:table-row!important}.d-table-cell{display:table-cell!important}.d-flex{display:-ms-flexbox!important;display:flex!important}.d-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}@media (min-width:576px){.d-sm-none{display:none!important}.d-sm-inline{display:inline!important}.d-sm-inline-block{display:inline-block!important}.d-sm-block{display:block!important}.d-sm-table{display:table!important}.d-sm-table-row{display:table-row!important}.d-sm-table-cell{display:table-cell!important}.d-sm-flex{display:-ms-flexbox!important;display:flex!important}.d-sm-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:768px){.d-md-none{display:none!important}.d-md-inline{display:inline!important}.d-md-inline-block{display:inline-block!important}.d-md-block{display:block!important}.d-md-table{display:table!important}.d-md-table-row{display:table-row!important}.d-md-table-cell{display:table-cell!important}.d-md-flex{display:-ms-flexbox!important;display:flex!important}.d-md-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:992px){.d-lg-none{display:none!important}.d-lg-inline{display:inline!important}.d-lg-inline-block{display:inline-block!important}.d-lg-block{display:block!important}.d-lg-table{display:table!important}.d-lg-table-row{display:table-row!important}.d-lg-table-cell{display:table-cell!important}.d-lg-flex{display:-ms-flexbox!important;display:flex!important}.d-lg-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media (min-width:1200px){.d-xl-none{display:none!important}.d-xl-inline{display:inline!important}.d-xl-inline-block{display:inline-block!important}.d-xl-block{display:block!important}.d-xl-table{display:table!important}.d-xl-table-row{display:table-row!important}.d-xl-table-cell{display:table-cell!important}.d-xl-flex{display:-ms-flexbox!important;display:flex!important}.d-xl-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}@media print{.d-print-none{display:none!important}.d-print-inline{display:inline!important}.d-print-inline-block{display:inline-block!important}.d-print-block{display:block!important}.d-print-table{display:table!important}.d-print-table-row{display:table-row!important}.d-print-table-cell{display:table-cell!important}.d-print-flex{display:-ms-flexbox!important;display:flex!important}.d-print-inline-flex{display:-ms-inline-flexbox!important;display:inline-flex!important}}.flex-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-center{-ms-flex-align:center!important;align-items:center!important}.align-items-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}@media (min-width:576px){.flex-sm-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-sm-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-sm-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-sm-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-sm-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-sm-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-sm-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-sm-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-sm-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-sm-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-sm-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-sm-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-sm-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-sm-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-sm-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-sm-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-sm-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-sm-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-sm-center{-ms-flex-align:center!important;align-items:center!important}.align-items-sm-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-sm-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-sm-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-sm-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-sm-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-sm-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-sm-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-sm-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-sm-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-sm-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-sm-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-sm-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-sm-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-sm-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:768px){.flex-md-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-md-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-md-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-md-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-md-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-md-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-md-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-md-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-md-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-md-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-md-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-md-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-md-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-md-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-md-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-md-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-md-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-md-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-md-center{-ms-flex-align:center!important;align-items:center!important}.align-items-md-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-md-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-md-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-md-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-md-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-md-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-md-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-md-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-md-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-md-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-md-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-md-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-md-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-md-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:992px){.flex-lg-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-lg-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-lg-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-lg-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-lg-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-lg-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-lg-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-lg-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-lg-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-lg-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-lg-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-lg-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-lg-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-lg-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-lg-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-lg-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-lg-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-lg-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-lg-center{-ms-flex-align:center!important;align-items:center!important}.align-items-lg-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-lg-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-lg-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-lg-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-lg-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-lg-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-lg-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-lg-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-lg-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-lg-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-lg-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-lg-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-lg-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-lg-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}}@media (min-width:1200px){.flex-xl-row{-ms-flex-direction:row!important;flex-direction:row!important}.flex-xl-column{-ms-flex-direction:column!important;flex-direction:column!important}.flex-xl-row-reverse{-ms-flex-direction:row-reverse!important;flex-direction:row-reverse!important}.flex-xl-column-reverse{-ms-flex-direction:column-reverse!important;flex-direction:column-reverse!important}.flex-xl-wrap{-ms-flex-wrap:wrap!important;flex-wrap:wrap!important}.flex-xl-nowrap{-ms-flex-wrap:nowrap!important;flex-wrap:nowrap!important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse!important;flex-wrap:wrap-reverse!important}.flex-xl-fill{-ms-flex:1 1 auto!important;flex:1 1 auto!important}.flex-xl-grow-0{-ms-flex-positive:0!important;flex-grow:0!important}.flex-xl-grow-1{-ms-flex-positive:1!important;flex-grow:1!important}.flex-xl-shrink-0{-ms-flex-negative:0!important;flex-shrink:0!important}.flex-xl-shrink-1{-ms-flex-negative:1!important;flex-shrink:1!important}.justify-content-xl-start{-ms-flex-pack:start!important;justify-content:flex-start!important}.justify-content-xl-end{-ms-flex-pack:end!important;justify-content:flex-end!important}.justify-content-xl-center{-ms-flex-pack:center!important;justify-content:center!important}.justify-content-xl-between{-ms-flex-pack:justify!important;justify-content:space-between!important}.justify-content-xl-around{-ms-flex-pack:distribute!important;justify-content:space-around!important}.align-items-xl-start{-ms-flex-align:start!important;align-items:flex-start!important}.align-items-xl-end{-ms-flex-align:end!important;align-items:flex-end!important}.align-items-xl-center{-ms-flex-align:center!important;align-items:center!important}.align-items-xl-baseline{-ms-flex-align:baseline!important;align-items:baseline!important}.align-items-xl-stretch{-ms-flex-align:stretch!important;align-items:stretch!important}.align-content-xl-start{-ms-flex-line-pack:start!important;align-content:flex-start!important}.align-content-xl-end{-ms-flex-line-pack:end!important;align-content:flex-end!important}.align-content-xl-center{-ms-flex-line-pack:center!important;align-content:center!important}.align-content-xl-between{-ms-flex-line-pack:justify!important;align-content:space-between!important}.align-content-xl-around{-ms-flex-line-pack:distribute!important;align-content:space-around!important}.align-content-xl-stretch{-ms-flex-line-pack:stretch!important;align-content:stretch!important}.align-self-xl-auto{-ms-flex-item-align:auto!important;align-self:auto!important}.align-self-xl-start{-ms-flex-item-align:start!important;align-self:flex-start!important}.align-self-xl-end{-ms-flex-item-align:end!important;align-self:flex-end!important}.align-self-xl-center{-ms-flex-item-align:center!important;align-self:center!important}.align-self-xl-baseline{-ms-flex-item-align:baseline!important;align-self:baseline!important}.align-self-xl-stretch{-ms-flex-item-align:stretch!important;align-self:stretch!important}} 7 | /*# sourceMappingURL=bootstrap-grid.min.css.map */ --------------------------------------------------------------------------------