├── web ├── images │ ├── 1_color.png │ ├── 1_line.png │ ├── 2_color.png │ ├── 2_line.png │ ├── 3_color.png │ ├── 3_line.png │ ├── 4_color.png │ ├── 4_line.png │ ├── 5_color.png │ ├── 5_line.png │ ├── 6_color.png │ ├── 6_line.png │ ├── 7_color.png │ ├── 7_line.png │ └── a_line.png ├── image_examples │ ├── picasso.png │ ├── sanae.png │ ├── armscross.jpg │ └── armscross.png ├── colorpicker │ ├── img │ │ └── bootstrap-colorpicker │ │ │ ├── hue.png │ │ │ ├── alpha.png │ │ │ ├── saturation.png │ │ │ ├── hue-horizontal.png │ │ │ └── alpha-horizontal.png │ ├── css │ │ ├── bootstrap-colorpicker.min.css.map │ │ ├── bootstrap-colorpicker.min.css │ │ ├── bootstrap-colorpicker.css │ │ └── bootstrap-colorpicker.css.map │ └── js │ │ ├── bootstrap-colorpicker.min.js │ │ └── bootstrap-colorpicker.js ├── css │ └── grid.css ├── index.html ├── draw.html └── sketch.js ├── .gitignore ├── README.md ├── test_processing.py ├── download_images.py ├── utils.py ├── server.py ├── guess_colors.py └── main.py /web/images/1_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/1_color.png -------------------------------------------------------------------------------- /web/images/1_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/1_line.png -------------------------------------------------------------------------------- /web/images/2_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/2_color.png -------------------------------------------------------------------------------- /web/images/2_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/2_line.png -------------------------------------------------------------------------------- /web/images/3_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/3_color.png -------------------------------------------------------------------------------- /web/images/3_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/3_line.png -------------------------------------------------------------------------------- /web/images/4_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/4_color.png -------------------------------------------------------------------------------- /web/images/4_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/4_line.png -------------------------------------------------------------------------------- /web/images/5_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/5_color.png -------------------------------------------------------------------------------- /web/images/5_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/5_line.png -------------------------------------------------------------------------------- /web/images/6_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/6_color.png -------------------------------------------------------------------------------- /web/images/6_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/6_line.png -------------------------------------------------------------------------------- /web/images/7_color.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/7_color.png -------------------------------------------------------------------------------- /web/images/7_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/7_line.png -------------------------------------------------------------------------------- /web/images/a_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/images/a_line.png -------------------------------------------------------------------------------- /web/image_examples/picasso.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/image_examples/picasso.png -------------------------------------------------------------------------------- /web/image_examples/sanae.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/image_examples/sanae.png -------------------------------------------------------------------------------- /web/image_examples/armscross.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/image_examples/armscross.jpg -------------------------------------------------------------------------------- /web/image_examples/armscross.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/image_examples/armscross.png -------------------------------------------------------------------------------- /web/colorpicker/img/bootstrap-colorpicker/hue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/colorpicker/img/bootstrap-colorpicker/hue.png -------------------------------------------------------------------------------- /web/colorpicker/img/bootstrap-colorpicker/alpha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/colorpicker/img/bootstrap-colorpicker/alpha.png -------------------------------------------------------------------------------- /web/colorpicker/img/bootstrap-colorpicker/saturation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/colorpicker/img/bootstrap-colorpicker/saturation.png -------------------------------------------------------------------------------- /web/colorpicker/img/bootstrap-colorpicker/hue-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/colorpicker/img/bootstrap-colorpicker/hue-horizontal.png -------------------------------------------------------------------------------- /web/colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kvfrans/deepcolor/HEAD/web/colorpicker/img/bootstrap-colorpicker/alpha-horizontal.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | imgs/ 2 | results_straight/ 3 | results_colortrain/ 4 | oldres/ 5 | checkpoint/ 6 | checkpoint_old/ 7 | old_results/ 8 | uploaded/ 9 | main.pyc 10 | utils.pyc 11 | server.pyc 12 | ops.pyc 13 | cv.py 14 | cv2.so 15 | *.pyc 16 | -------------------------------------------------------------------------------- /web/css/grid.css: -------------------------------------------------------------------------------- 1 | h4 { 2 | margin-top: 25px; 3 | } 4 | .row { 5 | margin-bottom: 20px; 6 | } 7 | .row .row { 8 | margin-top: 10px; 9 | margin-bottom: 0; 10 | } 11 | [class*="col-"] { 12 | padding-top: 15px; 13 | padding-bottom: 15px; 14 | background-color: #eee; 15 | background-color: rgba(86,61,124,.15); 16 | border: 1px solid #ddd; 17 | border: 1px solid rgba(86,61,124,.2); 18 | } 19 | 20 | hr { 21 | margin-top: 40px; 22 | margin-bottom: 40px; 23 | } 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deepcolor 2 | 3 | Automatic coloring and shading of manga-style lineart, using Tensorflow + cGANs 4 | 5 | ![](http://kvfrans.com/content/images/2017/03/Screen-Shot-2017-03-01-at-11-09-09-PM-1.png) 6 | 7 | ![](http://kvfrans.com/content/images/2017/03/Screen-Shot-2017-03-01-at-11-09-13-PM.png) 8 | 9 | Read the writeup: 10 | http://kvfrans.com/coloring-and-shading-line-art-automatically-through-conditional-gans/ 11 | 12 | Try the demo: 13 | http://color.kvfrans.com 14 | 15 | ## Setup 16 | 17 | ### Prereqs 18 | - Python 2.7, numpy 19 | - Tensorflow 0.12 20 | - OpenCV 21 | 22 | ### Running it 23 | 1. make a folder called "results" 24 | 2. make a folder called "imgs" 25 | 3. Fill the "imgs" folder with your own .jpg images, or run "download_images.py" to download from Safebooru. 26 | 4. Run "python main.py train". I trained for ~20 epochs, taking about 16 hours on one GPU. 27 | 5. To sample, run "python main.py sample" 28 | 6. To start the server, run "python server.py". It will host on port 8000. 29 | 30 | ### Pre-trained 31 | 32 | Get the pretrained model: 33 | https://drive.google.com/file/d/0BydPPLNieijIdDlUYWxhelEwRnM/view?usp=sharing 34 | 35 | Folder structure should go: 36 | ``` 37 | main.py 38 | server.py 39 | checkpoint/ 40 | tr/ 41 | checkpoint 42 | model-1101500.index 43 | model-1101500.data-00000-of-00001 44 | model-1101500.meta 45 | ``` 46 | 47 | 48 | Code based off [this pix2pix implementation](https://github.com/yenchenlin/pix2pix-tensorflow). 49 | -------------------------------------------------------------------------------- /web/colorpicker/css/bootstrap-colorpicker.min.css.map: -------------------------------------------------------------------------------- 1 | {"version":3,"sources":["src/less/colorpicker.less"],"names":[],"mappings":";;;;;;;;AAqBA,wBACE,MAAA,MACA,OAAA,MAXA,iBAAsB,iDAatB,OAAA,UACA,MAAA,KACA,0BACE,QAAA,MACA,OAAA,IACA,MAAA,IACA,OAAA,IAAA,MAAA,KAfF,sBAAA,IACA,mBAAA,IACA,cAAA,IAeE,SAAA,SACA,IAAA,EACA,KAAA,EACA,OAAA,KAAA,EAAA,EAAA,KACA,4BACE,QAAA,MACA,OAAA,IACA,MAAA,IACA,OAAA,IAAA,MAAA,KAzBJ,sBAAA,IACA,mBAAA,IACA,cAAA,IA8BF,mBADA,iBAEE,MAAA,KACA,OAAA,MACA,MAAA,KACA,OAAA,WACA,YAAA,IACA,cAAA,IAIiB,qBADF,mBAEf,QAAA,MACA,OAAA,IACA,WAAA,KACA,WAAA,IAAA,MAAA,KACA,SAAA,SACA,IAAA,EACA,KAAA,EACA,MAAA,KACA,WAAA,KAGF,iBA1DE,iBAAsB,0CA8DxB,mBA9DE,iBAAsB,4CAgEtB,QAAA,KAKF,mBADA,iBADA,wBAGE,gBAAA,QAGF,aACE,QAAA,IACA,UAAA,MACA,WAAA,IAxEA,sBAAA,IACA,mBAAA,IACA,cAAA,IAwEA,QAAA,KAIU,mBADA,oBAEV,QAAA,MACA,QAAA,GACA,YAAA,EAGU,mBACV,MAAA,KAGU,oBACV,QAAA,GACA,QAAA,aACA,YAAA,IAAA,MAAA,YACA,aAAA,IAAA,MAAA,YACA,cAAA,IAAA,MAAA,KACA,oBAAA,eACA,SAAA,SACA,IAAA,KACA,KAAA,IAGU,mBACV,QAAA,GACA,QAAA,aACA,YAAA,IAAA,MAAA,YACA,aAAA,IAAA,MAAA,YACA,cAAA,IAAA,MAAA,KACA,SAAA,SACA,IAAA,KACA,KAAA,IAGW,iBACX,SAAA,SAGU,oCACV,UAAA,MAGkC,uDAClC,QAAA,MAGF,mBACE,OAAA,KACA,WAAA,IACA,MAAA,KAlIA,iBAAsB,4CAoItB,oBAAA,EAAA,KAGiB,uBACjB,OAAA,KAGF,uBACE,QAAA,KACA,OAAA,KACA,WAAA,IACA,MAAA,KAGqB,yBACrB,OAAA,QACA,MAAA,KACA,OAAA,KACA,MAAA,KAGuB,2BACvB,YAAA,IAI2B,+BADW,0CAEtC,QAAA,aACA,OAAA,QACA,OAAA,KACA,eAAA,SACA,MAAA,KAGU,gCACV,SAAA,SACA,QAAA,aACA,MAAA,KACA,QAAA,KAGU,oCACV,MAAA,MACA,UAAA,MACA,OAAA,KAGkC,4DAClC,cAAA,IAGkC,uDAClC,MAAA,MAIkC,uDADA,qDAElC,MAAA,MACA,OAAA,KACA,MAAA,KACA,OAAA,WACA,YAAA,EACA,cAAA,IAIqD,yDADF,uDAEnD,QAAA,MACA,OAAA,KACA,WAAA,KACA,SAAA,SACA,IAAA,EACA,KAAA,EACA,MAAA,IACA,OAAA,KACA,WAAA,EAGkC,qDAlNlC,iBAAsB,qDAsNY,uDAtNlC,iBAAsB,uDA0NN,0BAChB,KAAA,KACA,MAAA,IAGgB,yBAChB,KAAA,KACA,MAAA,IAGmB,6BACnB,aAAA,EACA,YAAA,EAGmB,4BACnB,aAAA,EACA,YAAA,EAQC,uCAAA,qCAAA,4CAAA,2CAAA,iCACC,QAAA,MASD,sCAAA,oCAAA,2CAAA,0CAAA,gCACC,QAAA,KAIe,wCACjB,QAAA"} -------------------------------------------------------------------------------- /test_processing.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from matplotlib import pyplot as plt 4 | from glob import glob 5 | from random import randint 6 | 7 | data = glob("imgs/*.jpg") 8 | for imname in data: 9 | 10 | cimg = cv2.imread(imname,1) 11 | cimg = np.fliplr(cimg.reshape(-1,3)).reshape(cimg.shape) 12 | cimg = cv2.resize(cimg, (256,256)) 13 | 14 | img = cv2.imread(imname,0) 15 | 16 | # kernel = np.ones((5,5),np.float32)/25 17 | seg = np.ones_like(cimg) 18 | 19 | num_segs = 8 20 | seg_len = 256/num_segs 21 | 22 | for x in xrange(num_segs): 23 | for y in xrange(num_segs): 24 | seg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 0] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 0]) 25 | seg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 1] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 1]) 26 | seg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 2] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 2]) 27 | 28 | 29 | # img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) 30 | img_edge = cv2.adaptiveThreshold(img, 255, 31 | cv2.ADAPTIVE_THRESH_MEAN_C, 32 | cv2.THRESH_BINARY, 33 | blockSize=9, 34 | C=2) 35 | # img_edge = cv2.cvtColor(img_edge, cv2.COLOR_GRAY2RGB) 36 | # img_cartoon = cv2.bitwise_and(img, img_edge) 37 | 38 | plt.subplot(131),plt.imshow(cimg) 39 | plt.title('Original Image'), plt.xticks([]), plt.yticks([]) 40 | 41 | plt.subplot(132),plt.imshow(seg) 42 | plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) 43 | 44 | plt.subplot(133),plt.imshow(img_edge,cmap = 'gray') 45 | plt.title('Edge Image'), plt.xticks([]), plt.yticks([]) 46 | 47 | plt.show() 48 | -------------------------------------------------------------------------------- /download_images.py: -------------------------------------------------------------------------------- 1 | import urllib2 2 | import urllib 3 | import json 4 | import numpy as np 5 | import cv2 6 | import untangle 7 | 8 | maxsize = 512 9 | 10 | # tags = ["asu_tora","puuakachan","mankun","hammer_%28sunset_beach%29",""] 11 | 12 | # for tag in tags: 13 | 14 | count = 0 15 | 16 | for i in xrange(10000): 17 | stringreturn = urllib2.urlopen("http://safebooru.org/index.php?page=dapi&s=post&q=index&tags=1girl%20solo&pid="+str(i+3000)).read() 18 | xmlreturn = untangle.parse(stringreturn) 19 | for post in xmlreturn.posts.post: 20 | imgurl = "http:" + post["sample_url"] 21 | print imgurl 22 | if ("png" in imgurl) or ("jpg" in imgurl): 23 | 24 | resp = urllib.urlopen(imgurl) 25 | image = np.asarray(bytearray(resp.read()), dtype="uint8") 26 | image = cv2.imdecode(image, cv2.IMREAD_COLOR) 27 | height, width = image.shape[:2] 28 | if height > width: 29 | scalefactor = (maxsize*1.0) / width 30 | res = cv2.resize(image,(int(width * scalefactor), int(height*scalefactor)), interpolation = cv2.INTER_CUBIC) 31 | cropped = res[0:maxsize,0:maxsize] 32 | if width > height: 33 | scalefactor = (maxsize*1.0) / height 34 | res = cv2.resize(image,(int(width * scalefactor), int(height*scalefactor)), interpolation = cv2.INTER_CUBIC) 35 | center_x = int(round(width*scalefactor*0.5)) 36 | print center_x 37 | cropped = res[0:maxsize,center_x - maxsize/2:center_x + maxsize/2] 38 | 39 | # img_edge = cv2.adaptiveThreshold(cropped, 255, 40 | # cv2.ADAPTIVE_THRESH_MEAN_C, 41 | # cv2.THRESH_BINARY, 42 | # blockSize=9, 43 | # C=2) 44 | 45 | count += 1 46 | cv2.imwrite("imgs/"+str(count)+".jpg",cropped) 47 | # cv2.imwrite("imgs/"+str(post["id"])+"-edge.jpg",img_edge) 48 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | the color move 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 16 | 27 | 28 |

Examples

29 |

Only line image [source]

30 |
31 |
32 | 33 |
34 |
35 | 36 |
37 |
38 | 39 |

Line image with color hints

40 |
41 |
42 | 43 |
44 |
45 | 46 |
47 |
48 | 49 |

Fine art [source (Picasso)]

50 |
51 |
52 | 53 |
54 |
55 | 56 |
57 |
58 | 59 |

Picasso with color hint

60 |
61 |
62 | 63 |
64 |
65 | 66 |
67 |
68 | 69 |

Confusing background

70 |
71 |
72 | 73 |
74 |
75 | 76 |
77 |
78 | 79 |

Heterochromia

80 |
81 |
82 | 83 |
84 |
85 | 86 |
87 |
88 | 89 |

My bad attempt at drawing

90 |
91 |
92 | 93 |
94 |
95 | 96 |
97 |
98 | 99 |
100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /web/colorpicker/css/bootstrap-colorpicker.min.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Colorpicker v2.5.1 3 | * https://itsjavi.com/bootstrap-colorpicker/ 4 | * 5 | * Originally written by (c) 2012 Stefan Petre 6 | * Licensed under the Apache License v2.0 7 | * http://www.apache.org/licenses/LICENSE-2.0.txt 8 | * 9 | */.colorpicker-saturation{width:100px;height:100px;background-image:url(../img/bootstrap-colorpicker/saturation.png);cursor:crosshair;float:left}.colorpicker-saturation i{display:block;height:5px;width:5px;border:1px solid #000;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px;position:absolute;top:0;left:0;margin:-4px 0 0 -4px}.colorpicker-saturation i b{display:block;height:5px;width:5px;border:1px solid #fff;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.colorpicker-alpha,.colorpicker-hue{width:15px;height:100px;float:left;cursor:row-resize;margin-left:4px;margin-bottom:4px}.colorpicker-alpha i,.colorpicker-hue i{display:block;height:1px;background:#000;border-top:1px solid #fff;position:absolute;top:0;left:0;width:100%;margin-top:-1px}.colorpicker-hue{background-image:url(../img/bootstrap-colorpicker/hue.png)}.colorpicker-alpha{background-image:url(../img/bootstrap-colorpicker/alpha.png);display:none}.colorpicker-alpha,.colorpicker-hue,.colorpicker-saturation{background-size:contain}.colorpicker{padding:4px;min-width:130px;margin-top:1px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;z-index:2500}.colorpicker:after,.colorpicker:before{display:table;content:"";line-height:0}.colorpicker:after{clear:both}.colorpicker:before{content:'';display:inline-block;border-left:7px solid transparent;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-bottom-color:rgba(0,0,0,.2);position:absolute;top:-7px;left:6px}.colorpicker:after{content:'';display:inline-block;border-left:6px solid transparent;border-right:6px solid transparent;border-bottom:6px solid #fff;position:absolute;top:-6px;left:7px}.colorpicker div{position:relative}.colorpicker.colorpicker-with-alpha{min-width:140px}.colorpicker.colorpicker-with-alpha .colorpicker-alpha{display:block}.colorpicker-color{height:10px;margin-top:5px;clear:both;background-image:url(../img/bootstrap-colorpicker/alpha.png);background-position:0 100%}.colorpicker-color div{height:10px}.colorpicker-selectors{display:none;height:10px;margin-top:5px;clear:both}.colorpicker-selectors i{cursor:pointer;float:left;height:10px;width:10px}.colorpicker-selectors i+i{margin-left:3px}.colorpicker-element .add-on i,.colorpicker-element .input-group-addon i{display:inline-block;cursor:pointer;height:16px;vertical-align:text-top;width:16px}.colorpicker.colorpicker-inline{position:relative;display:inline-block;float:none;z-index:auto}.colorpicker.colorpicker-horizontal{width:110px;min-width:110px;height:auto}.colorpicker.colorpicker-horizontal .colorpicker-saturation{margin-bottom:4px}.colorpicker.colorpicker-horizontal .colorpicker-color{width:100px}.colorpicker.colorpicker-horizontal .colorpicker-alpha,.colorpicker.colorpicker-horizontal .colorpicker-hue{width:100px;height:15px;float:left;cursor:col-resize;margin-left:0;margin-bottom:4px}.colorpicker.colorpicker-horizontal .colorpicker-alpha i,.colorpicker.colorpicker-horizontal .colorpicker-hue i{display:block;height:15px;background:#fff;position:absolute;top:0;left:0;width:1px;border:none;margin-top:0}.colorpicker.colorpicker-horizontal .colorpicker-hue{background-image:url(../img/bootstrap-colorpicker/hue-horizontal.png)}.colorpicker.colorpicker-horizontal .colorpicker-alpha{background-image:url(../img/bootstrap-colorpicker/alpha-horizontal.png)}.colorpicker-right:before{left:auto;right:6px}.colorpicker-right:after{left:auto;right:7px}.colorpicker-no-arrow:before{border-right:0;border-left:0}.colorpicker-no-arrow:after{border-right:0;border-left:0}.colorpicker-alpha.colorpicker-visible,.colorpicker-hue.colorpicker-visible,.colorpicker-saturation.colorpicker-visible,.colorpicker-selectors.colorpicker-visible,.colorpicker.colorpicker-visible{display:block}.colorpicker-alpha.colorpicker-hidden,.colorpicker-hue.colorpicker-hidden,.colorpicker-saturation.colorpicker-hidden,.colorpicker-selectors.colorpicker-hidden,.colorpicker.colorpicker-hidden{display:none}.colorpicker-inline.colorpicker-visible{display:inline-block} 10 | /*# sourceMappingURL=bootstrap-colorpicker.min.css.map */ -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import cv2 4 | 5 | class batch_norm(object): 6 | # h1 = lrelu(tf.contrib.layers.batch_norm(conv2d(h0, self.df_dim*2, name='d_h1_conv'),decay=0.9,updates_collections=None,epsilon=0.00001,scale=True,scope="d_h1_conv")) 7 | def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"): 8 | with tf.variable_scope(name): 9 | self.epsilon = epsilon 10 | self.momentum = momentum 11 | self.name = name 12 | 13 | def __call__(self, x, train=True): 14 | return tf.contrib.layers.batch_norm(x, decay=self.momentum, updates_collections=None, epsilon=self.epsilon, scale=True, scope=self.name) 15 | 16 | batchnorm_count = 0 17 | def bnreset(): 18 | global batchnorm_count 19 | batchnorm_count = 0 20 | def bn(x): 21 | global batchnorm_count 22 | batch_object = batch_norm(name=("bn" + str(batchnorm_count))) 23 | batchnorm_count += 1 24 | return batch_object(x) 25 | 26 | def conv2d(input_, output_dim, 27 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, 28 | name="conv2d"): 29 | with tf.variable_scope(name): 30 | w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], 31 | initializer=tf.truncated_normal_initializer(stddev=stddev)) 32 | conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME') 33 | 34 | biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0)) 35 | conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) 36 | 37 | return conv 38 | 39 | def deconv2d(input_, output_shape, 40 | k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02, 41 | name="deconv2d", with_w=False): 42 | with tf.variable_scope(name): 43 | # filter : [height, width, output_channels, in_channels] 44 | w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], initializer=tf.random_normal_initializer(stddev=stddev)) 45 | deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) 46 | biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0)) 47 | deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) 48 | if with_w: 49 | return deconv, w, biases 50 | else: 51 | return deconv 52 | 53 | 54 | def lrelu(x, leak=0.2, name="lrelu"): 55 | return tf.maximum(x, leak*x) 56 | 57 | def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False): 58 | shape = input_.get_shape().as_list() 59 | with tf.variable_scope(scope or "Linear"): 60 | matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32, 61 | tf.random_normal_initializer(stddev=stddev)) 62 | bias = tf.get_variable("bias", [output_size], 63 | initializer=tf.constant_initializer(bias_start)) 64 | if with_w: 65 | return tf.matmul(input_, matrix) + bias, matrix, bias 66 | else: 67 | return tf.matmul(input_, matrix) + bias 68 | 69 | def get_image(image_path): 70 | return transform(imread(image_path)) 71 | 72 | def transform(image, npx=512, is_crop=True): 73 | cropped_image = cv2.resize(image, (256,256)) 74 | 75 | return np.array(cropped_image) 76 | 77 | def imread(path): 78 | readimage = cv2.imread(path, 1) 79 | return readimage 80 | 81 | def merge_color(images, size): 82 | h, w = images.shape[1], images.shape[2] 83 | img = np.zeros((h * size[0], w * size[1], 3)) 84 | 85 | for idx, image in enumerate(images): 86 | i = idx % size[1] 87 | j = idx / size[1] 88 | img[j*h:j*h+h, i*w:i*w+w, :] = image 89 | 90 | return img 91 | 92 | def merge(images, size): 93 | h, w = images.shape[1], images.shape[2] 94 | img = np.zeros((h * size[0], w * size[1], 1)) 95 | 96 | for idx, image in enumerate(images): 97 | i = idx % size[1] 98 | j = idx / size[1] 99 | img[j*h:j*h+h, i*w:i*w+w] = image 100 | 101 | return img[:,:,0] 102 | 103 | def ims(name, img): 104 | print "saving img " + name 105 | cv2.imwrite(name, img*255) 106 | -------------------------------------------------------------------------------- /web/colorpicker/css/bootstrap-colorpicker.css: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Colorpicker v2.5.1 3 | * https://itsjavi.com/bootstrap-colorpicker/ 4 | * 5 | * Originally written by (c) 2012 Stefan Petre 6 | * Licensed under the Apache License v2.0 7 | * http://www.apache.org/licenses/LICENSE-2.0.txt 8 | * 9 | */ 10 | .colorpicker-saturation { 11 | width: 100px; 12 | height: 100px; 13 | background-image: url("../img/bootstrap-colorpicker/saturation.png"); 14 | cursor: crosshair; 15 | float: left; 16 | } 17 | .colorpicker-saturation i { 18 | display: block; 19 | height: 5px; 20 | width: 5px; 21 | border: 1px solid #000; 22 | -webkit-border-radius: 5px; 23 | -moz-border-radius: 5px; 24 | border-radius: 5px; 25 | position: absolute; 26 | top: 0; 27 | left: 0; 28 | margin: -4px 0 0 -4px; 29 | } 30 | .colorpicker-saturation i b { 31 | display: block; 32 | height: 5px; 33 | width: 5px; 34 | border: 1px solid #fff; 35 | -webkit-border-radius: 5px; 36 | -moz-border-radius: 5px; 37 | border-radius: 5px; 38 | } 39 | .colorpicker-hue, 40 | .colorpicker-alpha { 41 | width: 15px; 42 | height: 100px; 43 | float: left; 44 | cursor: row-resize; 45 | margin-left: 4px; 46 | margin-bottom: 4px; 47 | } 48 | .colorpicker-hue i, 49 | .colorpicker-alpha i { 50 | display: block; 51 | height: 1px; 52 | background: #000; 53 | border-top: 1px solid #fff; 54 | position: absolute; 55 | top: 0; 56 | left: 0; 57 | width: 100%; 58 | margin-top: -1px; 59 | } 60 | .colorpicker-hue { 61 | background-image: url("../img/bootstrap-colorpicker/hue.png"); 62 | } 63 | .colorpicker-alpha { 64 | background-image: url("../img/bootstrap-colorpicker/alpha.png"); 65 | display: none; 66 | } 67 | .colorpicker-saturation, 68 | .colorpicker-hue, 69 | .colorpicker-alpha { 70 | background-size: contain; 71 | } 72 | .colorpicker { 73 | padding: 4px; 74 | min-width: 130px; 75 | margin-top: 1px; 76 | -webkit-border-radius: 4px; 77 | -moz-border-radius: 4px; 78 | border-radius: 4px; 79 | z-index: 2500; 80 | } 81 | .colorpicker:before, 82 | .colorpicker:after { 83 | display: table; 84 | content: ""; 85 | line-height: 0; 86 | } 87 | .colorpicker:after { 88 | clear: both; 89 | } 90 | .colorpicker:before { 91 | content: ''; 92 | display: inline-block; 93 | border-left: 7px solid transparent; 94 | border-right: 7px solid transparent; 95 | border-bottom: 7px solid #ccc; 96 | border-bottom-color: rgba(0, 0, 0, 0.2); 97 | position: absolute; 98 | top: -7px; 99 | left: 6px; 100 | } 101 | .colorpicker:after { 102 | content: ''; 103 | display: inline-block; 104 | border-left: 6px solid transparent; 105 | border-right: 6px solid transparent; 106 | border-bottom: 6px solid #ffffff; 107 | position: absolute; 108 | top: -6px; 109 | left: 7px; 110 | } 111 | .colorpicker div { 112 | position: relative; 113 | } 114 | .colorpicker.colorpicker-with-alpha { 115 | min-width: 140px; 116 | } 117 | .colorpicker.colorpicker-with-alpha .colorpicker-alpha { 118 | display: block; 119 | } 120 | .colorpicker-color { 121 | height: 10px; 122 | margin-top: 5px; 123 | clear: both; 124 | background-image: url("../img/bootstrap-colorpicker/alpha.png"); 125 | background-position: 0 100%; 126 | } 127 | .colorpicker-color div { 128 | height: 10px; 129 | } 130 | .colorpicker-selectors { 131 | display: none; 132 | height: 10px; 133 | margin-top: 5px; 134 | clear: both; 135 | } 136 | .colorpicker-selectors i { 137 | cursor: pointer; 138 | float: left; 139 | height: 10px; 140 | width: 10px; 141 | } 142 | .colorpicker-selectors i + i { 143 | margin-left: 3px; 144 | } 145 | .colorpicker-element .input-group-addon i, 146 | .colorpicker-element .add-on i { 147 | display: inline-block; 148 | cursor: pointer; 149 | height: 16px; 150 | vertical-align: text-top; 151 | width: 16px; 152 | } 153 | .colorpicker.colorpicker-inline { 154 | position: relative; 155 | display: inline-block; 156 | float: none; 157 | z-index: auto; 158 | } 159 | .colorpicker.colorpicker-horizontal { 160 | width: 110px; 161 | min-width: 110px; 162 | height: auto; 163 | } 164 | .colorpicker.colorpicker-horizontal .colorpicker-saturation { 165 | margin-bottom: 4px; 166 | } 167 | .colorpicker.colorpicker-horizontal .colorpicker-color { 168 | width: 100px; 169 | } 170 | .colorpicker.colorpicker-horizontal .colorpicker-hue, 171 | .colorpicker.colorpicker-horizontal .colorpicker-alpha { 172 | width: 100px; 173 | height: 15px; 174 | float: left; 175 | cursor: col-resize; 176 | margin-left: 0px; 177 | margin-bottom: 4px; 178 | } 179 | .colorpicker.colorpicker-horizontal .colorpicker-hue i, 180 | .colorpicker.colorpicker-horizontal .colorpicker-alpha i { 181 | display: block; 182 | height: 15px; 183 | background: #ffffff; 184 | position: absolute; 185 | top: 0; 186 | left: 0; 187 | width: 1px; 188 | border: none; 189 | margin-top: 0px; 190 | } 191 | .colorpicker.colorpicker-horizontal .colorpicker-hue { 192 | background-image: url("../img/bootstrap-colorpicker/hue-horizontal.png"); 193 | } 194 | .colorpicker.colorpicker-horizontal .colorpicker-alpha { 195 | background-image: url("../img/bootstrap-colorpicker/alpha-horizontal.png"); 196 | } 197 | .colorpicker-right:before { 198 | left: auto; 199 | right: 6px; 200 | } 201 | .colorpicker-right:after { 202 | left: auto; 203 | right: 7px; 204 | } 205 | .colorpicker-no-arrow:before { 206 | border-right: 0; 207 | border-left: 0; 208 | } 209 | .colorpicker-no-arrow:after { 210 | border-right: 0; 211 | border-left: 0; 212 | } 213 | .colorpicker.colorpicker-visible, 214 | .colorpicker-alpha.colorpicker-visible, 215 | .colorpicker-saturation.colorpicker-visible, 216 | .colorpicker-hue.colorpicker-visible, 217 | .colorpicker-selectors.colorpicker-visible { 218 | display: block; 219 | } 220 | .colorpicker.colorpicker-hidden, 221 | .colorpicker-alpha.colorpicker-hidden, 222 | .colorpicker-saturation.colorpicker-hidden, 223 | .colorpicker-hue.colorpicker-hidden, 224 | .colorpicker-selectors.colorpicker-hidden { 225 | display: none; 226 | } 227 | .colorpicker-inline.colorpicker-visible { 228 | display: inline-block; 229 | } 230 | /*# sourceMappingURL=bootstrap-colorpicker.css.map */ -------------------------------------------------------------------------------- /server.py: -------------------------------------------------------------------------------- 1 | from bottle import route, run, template, static_file, get, post, request, BaseRequest 2 | import urllib2 3 | import cv2 4 | import numpy as np 5 | import re 6 | import base64 7 | import tensorflow as tf 8 | 9 | import main 10 | from main import * 11 | import guess_colors 12 | from guess_colors import * 13 | 14 | BaseRequest.MEMFILE_MAX = 1000 * 1000 15 | 16 | c = Color(512, 1) 17 | p = Palette(256, 1) 18 | 19 | c.loadmodel(load_discrim=False) 20 | p.loadmodel(c.sess, False) 21 | 22 | 23 | @route('/') 24 | def send_static(filename): 25 | return static_file(filename, root='web/') 26 | 27 | @route('/draw') 28 | def send_static(): 29 | return static_file("draw.html", root='web/') 30 | 31 | @route('/') 32 | def send_static(): 33 | return static_file("index.html", root='web/') 34 | 35 | def imageblur(cimg, sampling=False): 36 | if sampling: 37 | cimg = cimg * 0.3 + np.ones_like(cimg) * 0.7 * 255 38 | else: 39 | for i in xrange(30): 40 | randx = randint(0,205) 41 | randy = randint(0,205) 42 | cimg[randx:randx+50, randy:randy+50] = 255 43 | return cv2.blur(cimg,(100,100)) 44 | 45 | @route("/standard_sanae", method="POST") 46 | def do_uploadtl(): 47 | lines_img = cv2.imread("web/image_examples/sanae.png", 1) 48 | lines_img = np.array(cv2.resize(lines_img, (512,512))) 49 | lines_img = cv2.adaptiveThreshold(cv2.cvtColor(lines_img, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) 50 | lines_img = cv2.merge((lines_img,lines_img,lines_img,255 - lines_img)) 51 | cnt = cv2.imencode(".png",lines_img)[1] 52 | return base64.b64encode(cnt) 53 | 54 | @route("/standard_armscross", method="POST") 55 | def do_uploadtl(): 56 | lines_img = cv2.imread("web/image_examples/armscross.png", 1) 57 | lines_img = np.array(cv2.resize(lines_img, (512,512))) 58 | lines_img = cv2.adaptiveThreshold(cv2.cvtColor(lines_img, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) 59 | lines_img = cv2.merge((lines_img,lines_img,lines_img,255 - lines_img)) 60 | cnt = cv2.imencode(".png",lines_img)[1] 61 | return base64.b64encode(cnt) 62 | 63 | @route("/standard_picasso", method="POST") 64 | def do_uploadtl(): 65 | lines_img = cv2.imread("web/image_examples/picasso.png", 1) 66 | lines_img = np.array(cv2.resize(lines_img, (512,512))) 67 | lines_img = cv2.adaptiveThreshold(cv2.cvtColor(lines_img, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) 68 | lines_img = cv2.merge((lines_img,lines_img,lines_img,255 - lines_img)) 69 | cnt = cv2.imencode(".png",lines_img)[1] 70 | return base64.b64encode(cnt) 71 | 72 | @route("/upload_toline", method="POST") 73 | def do_uploadtl(): 74 | print "Parsing line" 75 | img = request.files.get('img') 76 | lines_img = cv2.imdecode(np.fromstring(img.file.read(), np.uint8), cv2.CV_LOAD_IMAGE_UNCHANGED) 77 | lines_img = np.array(cv2.resize(lines_img, (512,512))) 78 | lines_img = cv2.adaptiveThreshold(cv2.cvtColor(lines_img, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) 79 | lines_img = cv2.merge((lines_img,lines_img,lines_img,255 - lines_img)) 80 | cnt = cv2.imencode(".png",lines_img)[1] 81 | return base64.b64encode(cnt) 82 | 83 | def imageblur(cimg, sampling=False): 84 | if sampling: 85 | cimg = cimg * 0.3 + np.ones_like(cimg) * 0.7 * 255 86 | else: 87 | for i in xrange(30): 88 | randx = randint(0,205) 89 | randy = randint(0,205) 90 | cimg[randx:randx+50, randy:randy+50] = 255 91 | return cv2.blur(cimg,(100,100)) 92 | 93 | @route('/upload_canvas', method='POST') 94 | def do_uploadc(): 95 | print "Got it" 96 | # lines = request.files.get('lines') 97 | # colors = request.files.get('colors') 98 | line_data = request.forms.get("lines") 99 | line_data = re.sub('^data:image/.+;base64,', '', line_data) 100 | line_s = base64.b64decode(line_data) 101 | line_img = np.fromstring(line_s, dtype=np.uint8) 102 | line_img = cv2.imdecode(line_img, -1) 103 | 104 | color_data = request.forms.get("colors") 105 | color_data = re.sub('^data:image/.+;base64,', '', color_data) 106 | color_s = base64.b64decode(color_data) 107 | color_img = np.fromstring(color_s, dtype=np.uint8) 108 | color_img = cv2.imdecode(color_img, -1) 109 | 110 | lines_img = np.array(cv2.resize(line_img, (512,512))) 111 | lines_img = np.array([lines_img]) / 255.0 112 | lines_img = lines_img[:,:,:,0] 113 | lines_img = np.expand_dims(lines_img, 3) 114 | 115 | color_img = color_img[:,:,:] * lines_img[0,:,:,:] 116 | colors_img = imageblur(color_img, True) 117 | colors_img = np.array([colors_img]) / 255.0 118 | colors_img = colors_img[:,:,:,0:3] 119 | generated = c.sess.run(c.generated_images, feed_dict={c.line_images: lines_img, c.color_images: colors_img}) 120 | cnt = cv2.imencode(".png",generated[0]*255)[1] 121 | return base64.b64encode(cnt) 122 | 123 | @route('/upload_lineonly', method='POST') 124 | def do_uploadc(): 125 | print "Got it" 126 | # lines = request.files.get('lines') 127 | # colors = request.files.get('colors') 128 | line_data = request.forms.get("lines") 129 | line_data = re.sub('^data:image/.+;base64,', '', line_data) 130 | line_s = base64.b64decode(line_data) 131 | line_img = np.fromstring(line_s, dtype=np.uint8) 132 | line_img = cv2.imdecode(line_img, -1) 133 | 134 | lines_img = np.array(cv2.resize(line_img, (512,512))) 135 | lines_img = np.array([lines_img]) / 255.0 136 | lines_img = lines_img[:,:,:,0] 137 | lines_img = np.expand_dims(lines_img, 3) 138 | 139 | lines_img_sm = np.array(cv2.resize(line_img, (256,256))) 140 | lines_img_sm = np.array([lines_img_sm]) / 255.0 141 | lines_img_sm = lines_img_sm[:,:,:,0] 142 | lines_img_sm = np.expand_dims(lines_img_sm, 3) 143 | 144 | random_z = np.random.normal(0, 1, [p.batch_size, p.z_dim]) 145 | 146 | color_img = p.sess.run(p.generated_images, feed_dict={p.line_images: lines_img_sm, p.guessed_z: random_z}) 147 | color_img = np.array([cv2.resize(x, (512,512), interpolation=cv2.INTER_NEAREST) for x in color_img])[0] 148 | 149 | color_img = color_img * 255.0 150 | colors_img = imageblur(color_img, True) 151 | colors_img = np.array([colors_img]) / 255.0 152 | colors_img = colors_img[:,:,:,0:3] 153 | generated = c.sess.run(c.generated_images, feed_dict={c.line_images: lines_img, c.color_images: colors_img}) 154 | cnt = cv2.imencode(".png",generated[0]*255)[1] 155 | return base64.b64encode(cnt) 156 | 157 | run(host="0.0.0.0", port=8000) 158 | -------------------------------------------------------------------------------- /web/colorpicker/css/bootstrap-colorpicker.css.map: -------------------------------------------------------------------------------- 1 | {"version":3,"sources":["src/less/colorpicker.less"],"names":[],"mappings":";;;;;;;;;AAqBA;EACE,YAAA;EACA,aAAA;EAXA,sBAAsB,8CAAtB;EAaA,iBAAA;EACA,WAAA;;AALF,uBAME;EACE,cAAA;EACA,WAAA;EACA,UAAA;EACA,sBAAA;EAfF,0BAAA;EACA,uBAAA;EACA,kBAAA;EAeE,kBAAA;EACA,MAAA;EACA,OAAA;EACA,qBAAA;;AAfJ,uBAME,EAUE;EACE,cAAA;EACA,WAAA;EACA,UAAA;EACA,sBAAA;EAzBJ,0BAAA;EACA,uBAAA;EACA,kBAAA;;AA6BF;AACA;EACE,WAAA;EACA,aAAA;EACA,WAAA;EACA,kBAAA;EACA,gBAAA;EACA,kBAAA;;AAGF,gBAAiB;AACjB,kBAAmB;EACjB,cAAA;EACA,WAAA;EACA,gBAAA;EACA,0BAAA;EACA,kBAAA;EACA,MAAA;EACA,OAAA;EACA,WAAA;EACA,gBAAA;;AAGF;EA1DE,sBAAsB,uCAAtB;;AA8DF;EA9DE,sBAAsB,yCAAtB;EAgEA,aAAA;;AAGF;AACA;AACA;EACE,wBAAA;;AAGF;EACE,YAAA;EACA,gBAAA;EACA,eAAA;EAxEA,0BAAA;EACA,uBAAA;EACA,kBAAA;EAwEA,aAAA;;AAGF,YAAY;AACZ,YAAY;EACV,cAAA;EACA,SAAS,EAAT;EACA,cAAA;;AAGF,YAAY;EACV,WAAA;;AAGF,YAAY;EACV,SAAS,EAAT;EACA,qBAAA;EACA,kCAAA;EACA,mCAAA;EACA,6BAAA;EACA,uCAAA;EACA,kBAAA;EACA,SAAA;EACA,SAAA;;AAGF,YAAY;EACV,SAAS,EAAT;EACA,qBAAA;EACA,kCAAA;EACA,mCAAA;EACA,gCAAA;EACA,kBAAA;EACA,SAAA;EACA,SAAA;;AAGF,YAAa;EACX,kBAAA;;AAGF,YAAY;EACV,gBAAA;;AAGF,YAAY,uBAAwB;EAClC,cAAA;;AAGF;EACE,YAAA;EACA,eAAA;EACA,WAAA;EAlIA,sBAAsB,yCAAtB;EAoIA,2BAAA;;AAGF,kBAAmB;EACjB,YAAA;;AAGF;EACE,aAAA;EACA,YAAA;EACA,eAAA;EACA,WAAA;;AAGF,sBAAuB;EACrB,eAAA;EACA,WAAA;EACA,YAAA;EACA,WAAA;;AAGF,sBAAuB,EAAE;EACvB,gBAAA;;AAGF,oBAAqB,mBAAmB;AACxC,oBAAqB,QAAQ;EAC3B,qBAAA;EACA,eAAA;EACA,YAAA;EACA,wBAAA;EACA,WAAA;;AAGF,YAAY;EACV,kBAAA;EACA,qBAAA;EACA,WAAA;EACA,aAAA;;AAGF,YAAY;EACV,YAAA;EACA,gBAAA;EACA,YAAA;;AAGF,YAAY,uBAAwB;EAClC,kBAAA;;AAGF,YAAY,uBAAwB;EAClC,YAAA;;AAGF,YAAY,uBAAwB;AACpC,YAAY,uBAAwB;EAClC,YAAA;EACA,YAAA;EACA,WAAA;EACA,kBAAA;EACA,gBAAA;EACA,kBAAA;;AAGF,YAAY,uBAAwB,iBAAiB;AACrD,YAAY,uBAAwB,mBAAmB;EACrD,cAAA;EACA,YAAA;EACA,mBAAA;EACA,kBAAA;EACA,MAAA;EACA,OAAA;EACA,UAAA;EACA,YAAA;EACA,eAAA;;AAGF,YAAY,uBAAwB;EAlNlC,sBAAsB,kDAAtB;;AAsNF,YAAY,uBAAwB;EAtNlC,sBAAsB,oDAAtB;;AA0NF,kBAAkB;EAChB,UAAA;EACA,UAAA;;AAGF,kBAAkB;EAChB,UAAA;EACA,UAAA;;AAGF,qBAAqB;EACnB,eAAA;EACA,cAAA;;AAGF,qBAAqB;EACnB,eAAA;EACA,cAAA;;AAQA,YAAC;AAAD,kBAAC;AAAD,uBAAC;AAAD,gBAAC;AAAD,sBAAC;EACC,cAAA;;AASF,YAAC;AAAD,kBAAC;AAAD,uBAAC;AAAD,gBAAC;AAAD,sBAAC;EACC,aAAA;;AAIJ,mBAAmB;EACjB,qBAAA","sourcesContent":["/*!\n * Bootstrap Colorpicker v2.5.1\n * https://itsjavi.com/bootstrap-colorpicker/\n *\n * Originally written by (c) 2012 Stefan Petre\n * Licensed under the Apache License v2.0\n * http://www.apache.org/licenses/LICENSE-2.0.txt\n *\n */\n@colorpicker-img-path: \"../img/bootstrap-colorpicker/\";\n\n.bgImg(@imgFilename) {\n background-image: url(\"@{colorpicker-img-path}@{imgFilename}\");\n}\n\n.borderRadius(@size) {\n -webkit-border-radius: @size;\n -moz-border-radius: @size;\n border-radius: @size;\n}\n\n.colorpicker-saturation {\n width: 100px;\n height: 100px;\n .bgImg('saturation.png');\n cursor: crosshair;\n float: left;\n i {\n display: block;\n height: 5px;\n width: 5px;\n border: 1px solid #000;\n .borderRadius(5px);\n position: absolute;\n top: 0;\n left: 0;\n margin: -4px 0 0 -4px;\n b {\n display: block;\n height: 5px;\n width: 5px;\n border: 1px solid #fff;\n .borderRadius(5px);\n }\n }\n}\n\n.colorpicker-hue,\n.colorpicker-alpha {\n width: 15px;\n height: 100px;\n float: left;\n cursor: row-resize;\n margin-left: 4px;\n margin-bottom: 4px;\n}\n\n.colorpicker-hue i,\n.colorpicker-alpha i {\n display: block;\n height: 1px;\n background: #000;\n border-top: 1px solid #fff;\n position: absolute;\n top: 0;\n left: 0;\n width: 100%;\n margin-top: -1px;\n}\n\n.colorpicker-hue {\n .bgImg('hue.png');\n}\n\n.colorpicker-alpha {\n .bgImg('alpha.png');\n display: none;\n}\n\n.colorpicker-saturation,\n.colorpicker-hue,\n.colorpicker-alpha {\n background-size: contain;\n}\n\n.colorpicker {\n padding: 4px;\n min-width: 130px;\n margin-top: 1px;\n .borderRadius(4px);\n z-index: 2500;\n}\n\n.colorpicker:before,\n.colorpicker:after {\n display: table;\n content: \"\";\n line-height: 0;\n}\n\n.colorpicker:after {\n clear: both;\n}\n\n.colorpicker:before {\n content: '';\n display: inline-block;\n border-left: 7px solid transparent;\n border-right: 7px solid transparent;\n border-bottom: 7px solid #ccc;\n border-bottom-color: rgba(0, 0, 0, 0.2);\n position: absolute;\n top: -7px;\n left: 6px;\n}\n\n.colorpicker:after {\n content: '';\n display: inline-block;\n border-left: 6px solid transparent;\n border-right: 6px solid transparent;\n border-bottom: 6px solid #ffffff;\n position: absolute;\n top: -6px;\n left: 7px;\n}\n\n.colorpicker div {\n position: relative;\n}\n\n.colorpicker.colorpicker-with-alpha {\n min-width: 140px;\n}\n\n.colorpicker.colorpicker-with-alpha .colorpicker-alpha {\n display: block;\n}\n\n.colorpicker-color {\n height: 10px;\n margin-top: 5px;\n clear: both;\n .bgImg('alpha.png');\n background-position: 0 100%;\n}\n\n.colorpicker-color div {\n height: 10px;\n}\n\n.colorpicker-selectors {\n display: none;\n height: 10px;\n margin-top: 5px;\n clear: both;\n}\n\n.colorpicker-selectors i {\n cursor: pointer;\n float: left;\n height: 10px;\n width: 10px;\n}\n\n.colorpicker-selectors i + i {\n margin-left: 3px;\n}\n\n.colorpicker-element .input-group-addon i,\n.colorpicker-element .add-on i {\n display: inline-block;\n cursor: pointer;\n height: 16px;\n vertical-align: text-top;\n width: 16px;\n}\n\n.colorpicker.colorpicker-inline {\n position: relative;\n display: inline-block;\n float: none;\n z-index: auto;\n}\n\n.colorpicker.colorpicker-horizontal {\n width: 110px;\n min-width: 110px;\n height: auto;\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-saturation {\n margin-bottom: 4px;\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-color {\n width: 100px;\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-hue,\n.colorpicker.colorpicker-horizontal .colorpicker-alpha {\n width: 100px;\n height: 15px;\n float: left;\n cursor: col-resize;\n margin-left: 0px;\n margin-bottom: 4px;\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-hue i,\n.colorpicker.colorpicker-horizontal .colorpicker-alpha i {\n display: block;\n height: 15px;\n background: #ffffff;\n position: absolute;\n top: 0;\n left: 0;\n width: 1px;\n border: none;\n margin-top: 0px;\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-hue {\n .bgImg('hue-horizontal.png');\n}\n\n.colorpicker.colorpicker-horizontal .colorpicker-alpha {\n .bgImg('alpha-horizontal.png');\n}\n\n.colorpicker-right:before {\n left: auto;\n right: 6px;\n}\n\n.colorpicker-right:after {\n left: auto;\n right: 7px;\n}\n\n.colorpicker-no-arrow:before {\n border-right: 0;\n border-left: 0;\n}\n\n.colorpicker-no-arrow:after {\n border-right: 0;\n border-left: 0;\n}\n\n.colorpicker,\n.colorpicker-alpha,\n.colorpicker-saturation,\n.colorpicker-hue,\n.colorpicker-selectors {\n &.colorpicker-visible {\n display: block;\n }\n}\n\n.colorpicker,\n.colorpicker-alpha,\n.colorpicker-saturation,\n.colorpicker-hue,\n.colorpicker-selectors {\n &.colorpicker-hidden {\n display: none;\n }\n}\n\n.colorpicker-inline.colorpicker-visible {\n display: inline-block;\n}\n"]} -------------------------------------------------------------------------------- /web/draw.html: -------------------------------------------------------------------------------- 1 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | the color move 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 |
92 | 99 | 100 |
101 |
102 |

Step 1: Create a line image

103 |
104 |
105 |

Option 1: Upload an image to automatically detect edges

106 |
107 |
108 | 109 | 110 | 117 | 123 | 124 |
111 |
112 | 115 |
116 |
118 | or try one of the examples
119 | 120 | 121 | 122 |
125 | 126 |
127 |
128 |
129 |
130 |

Option 2: Draw it

131 |
132 |
133 | 134 | 135 |
136 |
137 |

Step 2: Add color hints (messy is fine)

138 |

Or don't put a hint, and try the auto-color

139 | 140 |
141 | 146 | 147 |

Step 3: Colorize!

148 | 149 | 150 |
151 |
152 |
153 | 154 | 155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | -------------------------------------------------------------------------------- /web/sketch.js: -------------------------------------------------------------------------------- 1 | 2 | // DISCLAIMER: yes this is *very* badly written. but it gets the job done! Make sure to check server.py to understand endpoints. 3 | 4 | var linecanvas = document.getElementById("line"); 5 | var linectx = linecanvas.getContext("2d"); 6 | linectx.lineCap = "round"; 7 | linectx.lineJoin = "round"; 8 | linectx.lineWidth = 3; 9 | 10 | var colorcanvas = document.getElementById("color"); 11 | var colorctx = colorcanvas.getContext("2d"); 12 | colorctx.lineCap = "round"; 13 | colorctx.lineJoin = "round"; 14 | colorctx.lineWidth = 15; 15 | 16 | 17 | colorctx.beginPath(); 18 | colorctx.rect(0, 0, 512, 512); 19 | colorctx.fillStyle = "white"; 20 | colorctx.fill(); 21 | 22 | var lastX; 23 | var lastY; 24 | 25 | var mouseX; 26 | var mouseY; 27 | var canvasOffset = $("#color").offset(); 28 | var offsetX = canvasOffset.left; 29 | var offsetY = canvasOffset.top; 30 | var isMouseDown = false; 31 | 32 | 33 | function handleMouseDown(e) { 34 | canvasOffset = $("#color").offset(); 35 | offsetX = canvasOffset.left; 36 | offsetY = canvasOffset.top; 37 | 38 | mouseX = parseInt(e.pageX - offsetX); 39 | mouseY = parseInt(e.pageY - offsetY); 40 | 41 | // Put your mousedown stuff here 42 | lastX = mouseX; 43 | lastY = mouseY; 44 | isMouseDown = true; 45 | } 46 | 47 | function handleMouseUp(e) { 48 | mouseX = parseInt(e.pageX - offsetX); 49 | mouseY = parseInt(e.pageY - offsetY); 50 | 51 | // Put your mouseup stuff here 52 | isMouseDown = false; 53 | } 54 | function handleMouseOut(e) { 55 | mouseX = parseInt(e.pageX - offsetX); 56 | mouseY = parseInt(e.pageY - offsetY); 57 | 58 | // Put your mouseOut stuff here 59 | isMouseDown = false; 60 | } 61 | 62 | function handleMouseMove(e) 63 | { 64 | canvasOffset = $("#color").offset(); 65 | offsetX = canvasOffset.left; 66 | offsetY = canvasOffset.top; 67 | 68 | // var x = e.pageX - offsetX; 69 | // var y = e.pageY - offsetY; 70 | 71 | mouseX = parseInt(e.pageX - offsetX); 72 | mouseY = parseInt(e.pageY - offsetY); 73 | // Put your mousemove stuff here 74 | if(isMouseDown) 75 | { 76 | if(mode == "pen") 77 | { 78 | linectx.beginPath(); 79 | linectx.globalCompositeOperation = "source-over"; 80 | linectx.moveTo(lastX, lastY); 81 | linectx.lineTo(mouseX, mouseY); 82 | linectx.stroke(); 83 | } 84 | else if(mode == "eraser") 85 | { 86 | linectx.beginPath(); 87 | linectx.globalCompositeOperation = "destination-out"; 88 | linectx.arc(lastX, lastY, 10, 0, Math.PI * 2, false); 89 | linectx.fill(); 90 | } 91 | else 92 | { 93 | colorctx.beginPath(); 94 | colorctx.strokeStyle = mode; 95 | colorctx.globalCompositeOperation = "source-over"; 96 | colorctx.moveTo(lastX, lastY); 97 | colorctx.lineTo(mouseX, mouseY); 98 | colorctx.stroke(); 99 | } 100 | lastX = mouseX; 101 | lastY = mouseY; 102 | } 103 | } 104 | 105 | $("#line").mousedown(function (e) { 106 | handleMouseDown(e); 107 | }); 108 | $("#line").mousemove(function (e) { 109 | handleMouseMove(e); 110 | }); 111 | $("#line").mouseup(function (e) { 112 | handleMouseUp(e); 113 | }); 114 | $("#line").mouseout(function (e) { 115 | handleMouseOut(e); 116 | }); 117 | 118 | var mode = "pen"; 119 | $("#pen").click(function () { 120 | mode = "pen"; 121 | }); 122 | $("#eraser").click(function () { 123 | mode = "eraser"; 124 | }); 125 | 126 | $(document).keypress(function(e) { 127 | console.log(e.which) 128 | if(e.which == 100) { 129 | mode = "pen"; 130 | } 131 | if(e.which == 101) 132 | { 133 | mode = "eraser"; 134 | } 135 | }); 136 | 137 | $("#uploadform").bind('submit', function (e) { 138 | e.preventDefault(); 139 | 140 | console.log("Uploadin"); 141 | var files = document.getElementById('fileselect').files; 142 | var formData = new FormData(); 143 | // Loop through each of the selected files. 144 | for (var i = 0; i < files.length; i++) 145 | { 146 | var file = files[i]; 147 | formData.append('img', file, file.name); 148 | } 149 | $.ajax({ 150 | url: '/upload_toline', 151 | data: formData, 152 | processData: false, 153 | contentType: false, 154 | type: 'POST', 155 | success: function(result){ 156 | var image = new Image(); 157 | image.onload = function() { 158 | colorctx.beginPath(); 159 | colorctx.rect(0, 0, 512, 512); 160 | colorctx.fillStyle = "white"; 161 | colorctx.fill(); 162 | linectx.clearRect(0, 0, 512, 512); 163 | linectx.drawImage(image, 0, 0); 164 | }; 165 | image.src = 'data:image/png;base64,' + result; 166 | } 167 | }); 168 | 169 | return false; 170 | }); 171 | 172 | function submit(url) 173 | { 174 | $("#submit").prop("disabled",true); 175 | $("#submit_autocolor").prop("disabled",true); 176 | $("#submit").html('Processing...'); 177 | $("#submit_autocolor").html('Processing...'); 178 | // change non-opaque pixels to white 179 | var imgData = linectx.getImageData(0,0,512,512); 180 | var data = imgData.data; 181 | var databackup = data.slice(0); 182 | for(var i = 0; i < data.length; i+=4) 183 | { 184 | if(data[i+3]<255) 185 | { 186 | data[i]=255; 187 | data[i+1]=255; 188 | data[i+2]=255; 189 | data[i+3]=255; 190 | } 191 | } 192 | 193 | linectx.putImageData(imgData,0,0); 194 | 195 | var dataURL = linecanvas.toDataURL("image/jpg"); 196 | var dataURLc = colorcanvas.toDataURL("image/jpg"); 197 | 198 | imgData = linectx.getImageData(0,0,512,512); 199 | data = imgData.data; 200 | for(var i = 0; i < data.length; i++) 201 | { 202 | data[i] = databackup[i]; 203 | } 204 | linectx.putImageData(imgData,0,0); 205 | // console.log(dataURL) 206 | 207 | $.ajax({ 208 | url: url, 209 | type: "POST", 210 | data: {colors: dataURLc, lines: dataURL}, 211 | // data: {lines: "meme"}, 212 | success: function (result) { 213 | // console.log("Upload complete!!"); 214 | // console.log(result.length); 215 | // console.log(result); 216 | $('#result').html(''); 217 | $("#submit").prop("disabled",false); 218 | $("#submit_autocolor").prop("disabled",false); 219 | 220 | $("#submit").html('Submit (using color hint)'); 221 | $("#submit_autocolor").html('Submit (auto color)'); 222 | }, 223 | error: function (error) { 224 | console.log("Something went wrong!"); 225 | } 226 | }); 227 | } 228 | 229 | $("#submit").click(function () { 230 | 231 | submit("/upload_canvas") 232 | }); 233 | 234 | $("#submit_autocolor").click(function () { 235 | 236 | submit("/upload_lineonly") 237 | }); 238 | 239 | 240 | 241 | 242 | 243 | 244 | 245 | 246 | 247 | 248 | 249 | 250 | 251 | $('#fileselect').change(function() { 252 | $('#uploadform').submit(); 253 | }); 254 | 255 | $(document).ready(function(){ 256 | $("#sanaebutton").click(function(){ 257 | $.ajax({ 258 | url: '/standard_sanae', 259 | data: "nothing", 260 | processData: false, 261 | contentType: false, 262 | type: 'POST', 263 | success: function(result){ 264 | var image = new Image(); 265 | image.onload = function() { 266 | colorctx.beginPath(); 267 | colorctx.rect(0, 0, 512, 512); 268 | colorctx.fillStyle = "white"; 269 | colorctx.fill(); 270 | linectx.clearRect(0, 0, 512, 512); 271 | linectx.drawImage(image, 0, 0); 272 | }; 273 | image.src = 'data:image/png;base64,' + result; 274 | } 275 | }); 276 | }); 277 | $("#picassobutton").click(function(){ 278 | $.ajax({ 279 | url: '/standard_picasso', 280 | data: "nothing", 281 | processData: false, 282 | contentType: false, 283 | type: 'POST', 284 | success: function(result){ 285 | var image = new Image(); 286 | image.onload = function() { 287 | colorctx.beginPath(); 288 | colorctx.rect(0, 0, 512, 512); 289 | colorctx.fillStyle = "white"; 290 | colorctx.fill(); 291 | linectx.clearRect(0, 0, 512, 512); 292 | linectx.drawImage(image, 0, 0); 293 | }; 294 | image.src = 'data:image/png;base64,' + result; 295 | } 296 | }); 297 | }); 298 | $("#armsbutton").click(function(){ 299 | console.log("clicked"); 300 | $.ajax({ 301 | url: '/standard_armscross', 302 | data: "nothing", 303 | processData: false, 304 | contentType: false, 305 | type: 'POST', 306 | success: function(result){ 307 | var image = new Image(); 308 | image.onload = function() { 309 | colorctx.beginPath(); 310 | colorctx.rect(0, 0, 512, 512); 311 | colorctx.fillStyle = "white"; 312 | colorctx.fill(); 313 | linectx.clearRect(0, 0, 512, 512); 314 | linectx.drawImage(image, 0, 0); 315 | }; 316 | image.src = 'data:image/png;base64,' + result; 317 | } 318 | }); 319 | }); 320 | }); 321 | 322 | 323 | $(function() { 324 | $('#cp7').colorpicker({ 325 | color: '#ffaa00', 326 | container: true, 327 | inline: true 328 | }); 329 | $('#cp7').colorpicker().on('changeColor', function(e) { 330 | mode = e.color.toHex(); 331 | }); 332 | 333 | 334 | }); 335 | -------------------------------------------------------------------------------- /guess_colors.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import os 4 | from glob import glob 5 | import sys 6 | import math 7 | from random import randint 8 | 9 | from utils import * 10 | import utils 11 | 12 | class Palette(): 13 | def __init__(self, imgsize=256, batchsize=4): 14 | 15 | print "Loading Palatte" 16 | 17 | self.batch_size = batchsize 18 | self.batch_size_sqrt = int(math.sqrt(self.batch_size)) 19 | self.image_size = imgsize 20 | self.output_size = imgsize 21 | 22 | self.gf_dim = 64 23 | self.df_dim = 64 24 | self.z_dim = 64 25 | 26 | self.input_colors = 1 27 | self.input_colors2 = 3 28 | self.output_colors = 3 29 | 30 | bnreset() 31 | 32 | self.line_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.input_colors]) 33 | self.real_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size/16, self.image_size/16, self.output_colors]) 34 | 35 | with tf.variable_scope("col"): 36 | z_mean, z_stddev = self.encoder(self.real_images) 37 | samples = tf.random_normal([self.batch_size, self.z_dim], 0, 1, dtype=tf.float32) 38 | self.guessed_z = z_mean + (z_stddev * samples) 39 | 40 | # references: line_images, 41 | self.generated_images = self.generator(self.line_images, self.guessed_z) 42 | 43 | self.g_loss = tf.reduce_mean(tf.abs(self.real_images - self.generated_images)) * 100 44 | self.l_loss = tf.reduce_mean(0.5 * tf.reduce_sum(tf.square(z_mean) + tf.square(z_stddev) - tf.log(tf.square(z_stddev)) - 1, axis=1)) 45 | self.cost = tf.reduce_mean(self.g_loss + self.l_loss) 46 | 47 | t_vars = tf.trainable_variables() 48 | self.g_vars = [var for var in t_vars if ('col' in var.name)] 49 | self.g_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.cost, var_list=self.g_vars) 50 | 51 | def encoder(self, real_imgs): 52 | with tf.variable_scope(tf.get_variable_scope(), reuse=False): 53 | h0 = lrelu(conv2d(real_imgs, self.df_dim, name="e_h0_col")) #128 x 128 x 64 54 | h1 = lrelu(bn(conv2d(h0, self.df_dim, name="e_h1_col"))) #64 x 64 x 64 55 | h2 = lrelu(bn(conv2d(h1, self.df_dim, name="e_h2_col"))) #32 56 | h3 = lrelu(bn(conv2d(h2, self.df_dim, name="e_h3_col"))) #16 57 | h4 = lrelu(bn(conv2d(h3, self.df_dim, name="e_h4_col"))) #8 58 | h5 = lrelu(bn(conv2d(h4, self.df_dim, name="e_h5_col"))) #4 59 | mean = linear(tf.reshape(h5, [self.batch_size, -1]), self.z_dim, "e_mean_col") #(4*4*64) -> 64 60 | stddev = linear(tf.reshape(h5, [self.batch_size, -1]), self.z_dim, "e_stddev_col") #(4*4*64) -> 64 61 | return mean, stddev 62 | 63 | 64 | def generator(self, img_in, z): 65 | with tf.variable_scope(tf.get_variable_scope(), reuse=False): 66 | s = self.output_size 67 | s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128) 68 | 69 | z0 = linear(z, (self.image_size/64)*(self.image_size/64)*self.df_dim, "g_z0_col") # 4 x 4 x 64 70 | z1 = tf.reshape(z0, [self.batch_size, self.image_size/64, self.image_size/64, self.df_dim]) 71 | 72 | # image is (256 x 256 x input_c_dim) 73 | e1 = conv2d(img_in, self.gf_dim, name='g_e1_conv_col') # e1 is (128 x 128 x self.gf_dim) 74 | e2 = bn(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv_col')) # e2 is (64 x 64 x self.gf_dim*2) 75 | e3 = bn(conv2d(lrelu(e2), self.gf_dim*2, name='g_e3_conv_col')) # e3 is (32 x 32 x self.gf_dim*2) 76 | e4 = bn(conv2d(lrelu(e3), self.gf_dim*2, name='g_e4_conv_col')) # e4 is (16 x 16 x self.gf_dim*2) 77 | e5 = bn(conv2d(lrelu(e4), self.gf_dim*2, name='g_e5_conv_col')) # e4 is (8 x 8 x self.gf_dim*2) 78 | e6 = bn(conv2d(lrelu(e5), self.gf_dim*4, name='g_e6_conv_col')) # e4 is (4 x 4 x self.gf_dim*2) 79 | combined = tf.concat(3, [z1, e6]) 80 | e7 = bn(deconv2d(combined, [self.batch_size, self.image_size/32, self.image_size/32, self.gf_dim*4], name='g_e7_conv_col')) # e4 is (8 x 8 x self.gf_dim*2) 81 | e8 = deconv2d(lrelu(e7), [self.batch_size, self.image_size/16, self.image_size/16, 3], name='g_e8_conv_col') # e5 is (16 x 16 x 3) 82 | 83 | return tf.nn.tanh(e8) 84 | 85 | 86 | def imgprocess(self, cimg, sampling=False): 87 | num_segs = 16 88 | seg_len = 256/num_segs 89 | 90 | seg = np.ones((num_segs, num_segs, 3)) 91 | for x in xrange(num_segs): 92 | for y in xrange(num_segs): 93 | seg[x:(x+1), y:(y+1), 0] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 0]) 94 | seg[x:(x+1), y:(y+1), 1] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 1]) 95 | seg[x:(x+1), y:(y+1), 2] = np.average(cimg[x*seg_len:(x+1)*seg_len, y*seg_len:(y+1)*seg_len, 2]) 96 | return seg 97 | 98 | def train(self): 99 | s = tf.Session() 100 | s.run(tf.initialize_all_variables()) 101 | self.loadmodel(s) 102 | 103 | data = glob(os.path.join("imgs", "*.jpg")) 104 | print data[0] 105 | base = np.array([get_image(sample_file) for sample_file in data[0:self.batch_size]]) 106 | 107 | base_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in base]) / 255.0 108 | base_edge = np.expand_dims(base_edge, 3) 109 | 110 | base_colors = np.array([self.imgprocess(ba) for ba in base]) / 255.0 111 | 112 | ims("results/base_line.jpg",merge(base_edge, [self.batch_size_sqrt, self.batch_size_sqrt])) 113 | ims("results/base_colors.jpg",merge_color(np.array([cv2.resize(x, (256,256), interpolation=cv2.INTER_NEAREST) for x in base_colors]), [self.batch_size_sqrt, self.batch_size_sqrt])) 114 | 115 | datalen = len(data) 116 | 117 | for e in xrange(20000): 118 | for i in range(datalen / self.batch_size): 119 | batch_files = data[i*self.batch_size:(i+1)*self.batch_size] 120 | batch = np.array([get_image(batch_file) for batch_file in batch_files]) 121 | 122 | batch_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in batch]) / 255.0 123 | batch_edge = np.expand_dims(batch_edge, 3) 124 | 125 | batch_colors = np.array([self.imgprocess(ba) for ba in batch]) / 255.0 126 | 127 | g_loss, l_loss, _ = self.sess.run([self.g_loss, self.l_loss, self.g_optim], feed_dict={self.real_images: batch_colors, self.line_images: batch_edge}) 128 | 129 | print "%d: [%d / %d] l_loss %f, g_loss %f" % (e, i, (datalen/self.batch_size), l_loss, g_loss) 130 | 131 | if i % 100 == 0: 132 | recreation = self.sess.run(self.generated_images, feed_dict={self.real_images: base_colors, self.line_images: base_edge}) 133 | print recreation.shape 134 | ims("results/"+str(e*100000 + i)+"_base.jpg",merge_color(np.array([cv2.resize(x, (256,256), interpolation=cv2.INTER_NEAREST) for x in recreation]), [self.batch_size_sqrt, self.batch_size_sqrt])) 135 | 136 | recreation = self.sess.run(self.generated_images, feed_dict={self.real_images: batch_colors, self.line_images: batch_edge}) 137 | ims("results/"+str(e*100000 + i)+".jpg",merge_color(np.array([cv2.resize(x, (256,256), interpolation=cv2.INTER_NEAREST) for x in recreation]), [self.batch_size_sqrt, self.batch_size_sqrt])) 138 | ims("results/"+str(e*100000 + i)+"_line.jpg",merge(batch_edge, [self.batch_size_sqrt, self.batch_size_sqrt])) 139 | ims("results/"+str(e*100000 + i)+"_original.jpg",merge_color(np.array([cv2.resize(x, (256,256), interpolation=cv2.INTER_NEAREST) for x in batch_colors]), [self.batch_size_sqrt, self.batch_size_sqrt])) 140 | 141 | if i % 1000 == 0: 142 | self.save("./checkpoint", e*100000 + i) 143 | 144 | def loadmodel(self, sess, load_discrim=True): 145 | self.sess = sess 146 | # self.sess.run(tf.initialize_all_variables()) 147 | 148 | if load_discrim: 149 | self.saver = tf.train.Saver() 150 | else: 151 | self.saver = tf.train.Saver(self.g_vars) 152 | print [v.name for v in self.g_vars] 153 | 154 | if self.load("./checkpoint"): 155 | print "Loaded" 156 | else: 157 | print "Load failed" 158 | 159 | def sample(self): 160 | s = tf.Session() 161 | s.run(tf.initialize_all_variables()) 162 | self.loadmodel(s, False) 163 | 164 | data = glob(os.path.join("imgs", "*.jpg")) 165 | 166 | datalen = len(data) 167 | 168 | for i in range(min(100,datalen / self.batch_size)): 169 | batch_files = data[i*self.batch_size:(i+1)*self.batch_size] 170 | batch = np.array([cv2.resize(imread(batch_file), (256,256)) for batch_file in batch_files]) 171 | batch_normalized = batch/255.0 172 | 173 | random_z = np.random.normal(0, 1, [self.batch_size, self.z_dim]) 174 | 175 | batch_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in batch]) / 255.0 176 | batch_edge = np.expand_dims(batch_edge, 3) 177 | 178 | recreation = self.sess.run(self.generated_images, feed_dict={self.line_images: batch_edge, self.guessed_z: random_z}) 179 | ims("results/sample_"+str(i)+".jpg",merge_color(np.array([cv2.resize(x, (256,256), interpolation=cv2.INTER_NEAREST) for x in recreation]), [self.batch_size_sqrt, self.batch_size_sqrt])) 180 | ims("results/sample_"+str(i)+"_origin.jpg",merge_color(batch_normalized, [self.batch_size_sqrt, self.batch_size_sqrt])) 181 | ims("results/sample_"+str(i)+"_line.jpg",merge_color(batch_edge, [self.batch_size_sqrt, self.batch_size_sqrt])) 182 | 183 | 184 | def save(self, checkpoint_dir, step): 185 | model_name = "model" 186 | model_dir = "tr_colors" 187 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 188 | 189 | if not os.path.exists(checkpoint_dir): 190 | os.makedirs(checkpoint_dir) 191 | 192 | self.saver.save(self.sess, 193 | os.path.join(checkpoint_dir, model_name), 194 | global_step=step) 195 | 196 | def load(self, checkpoint_dir): 197 | print(" [*] Reading checkpoint...") 198 | 199 | model_dir = "tr_colors" 200 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 201 | 202 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 203 | if ckpt and ckpt.model_checkpoint_path: 204 | ckpt_name = os.path.basename(ckpt.model_checkpoint_path) 205 | self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) 206 | return True 207 | else: 208 | return False 209 | 210 | 211 | if __name__ == '__main__': 212 | if len(sys.argv) < 2: 213 | print "Usage: python main.py [train, sample]" 214 | else: 215 | cmd = sys.argv[1] 216 | if cmd == "train": 217 | c = Palette() 218 | c.train() 219 | elif cmd == "sample": 220 | c = Palette(256,1) 221 | c.sample() 222 | else: 223 | print "Usage: python main.py [train, sample]" 224 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import os 4 | from glob import glob 5 | import sys 6 | import math 7 | from random import randint 8 | 9 | from utils import * 10 | 11 | 12 | 13 | class Color(): 14 | def __init__(self, imgsize=256, batchsize=4): 15 | self.batch_size = batchsize 16 | self.batch_size_sqrt = int(math.sqrt(self.batch_size)) 17 | self.image_size = imgsize 18 | self.output_size = imgsize 19 | 20 | self.gf_dim = 64 21 | self.df_dim = 64 22 | 23 | self.input_colors = 1 24 | self.input_colors2 = 3 25 | self.output_colors = 3 26 | 27 | self.l1_scaling = 100 28 | 29 | self.d_bn1 = batch_norm(name='d_bn1') 30 | self.d_bn2 = batch_norm(name='d_bn2') 31 | self.d_bn3 = batch_norm(name='d_bn3') 32 | 33 | self.line_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.input_colors]) 34 | self.color_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.input_colors2]) 35 | self.real_images = tf.placeholder(tf.float32, [self.batch_size, self.image_size, self.image_size, self.output_colors]) 36 | 37 | combined_preimage = tf.concat(3, [self.line_images, self.color_images]) 38 | # combined_preimage = self.line_images 39 | 40 | self.generated_images = self.generator(combined_preimage) 41 | 42 | self.real_AB = tf.concat(3, [combined_preimage, self.real_images]) 43 | self.fake_AB = tf.concat(3, [combined_preimage, self.generated_images]) 44 | 45 | self.disc_true, disc_true_logits = self.discriminator(self.real_AB, reuse=False) 46 | self.disc_fake, disc_fake_logits = self.discriminator(self.fake_AB, reuse=True) 47 | 48 | self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_true_logits, tf.ones_like(disc_true_logits))) 49 | self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake_logits, tf.zeros_like(disc_fake_logits))) 50 | self.d_loss = self.d_loss_real + self.d_loss_fake 51 | 52 | self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(disc_fake_logits, tf.ones_like(disc_fake_logits))) \ 53 | + self.l1_scaling * tf.reduce_mean(tf.abs(self.real_images - self.generated_images)) 54 | 55 | t_vars = tf.trainable_variables() 56 | self.d_vars = [var for var in t_vars if 'd_' in var.name] 57 | self.g_vars = [var for var in t_vars if 'g_' in var.name] 58 | 59 | self.d_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.d_loss, var_list=self.d_vars) 60 | self.g_optim = tf.train.AdamOptimizer(0.0002, beta1=0.5).minimize(self.g_loss, var_list=self.g_vars) 61 | 62 | 63 | def discriminator(self, image, y=None, reuse=False): 64 | # image is 256 x 256 x (input_c_dim + output_c_dim) 65 | if reuse: 66 | tf.get_variable_scope().reuse_variables() 67 | else: 68 | assert tf.get_variable_scope().reuse == False 69 | 70 | h0 = lrelu(conv2d(image, self.df_dim, name='d_h0_conv')) # h0 is (128 x 128 x self.df_dim) 71 | h1 = lrelu(self.d_bn1(conv2d(h0, self.df_dim*2, name='d_h1_conv'))) # h1 is (64 x 64 x self.df_dim*2) 72 | h2 = lrelu(self.d_bn2(conv2d(h1, self.df_dim*4, name='d_h2_conv'))) # h2 is (32 x 32 x self.df_dim*4) 73 | h3 = lrelu(self.d_bn3(conv2d(h2, self.df_dim*8, d_h=1, d_w=1, name='d_h3_conv'))) # h3 is (16 x 16 x self.df_dim*8) 74 | h4 = linear(tf.reshape(h3, [self.batch_size, -1]), 1, 'd_h3_lin') 75 | return tf.nn.sigmoid(h4), h4 76 | 77 | def generator(self, img_in): 78 | s = self.output_size 79 | s2, s4, s8, s16, s32, s64, s128 = int(s/2), int(s/4), int(s/8), int(s/16), int(s/32), int(s/64), int(s/128) 80 | # image is (256 x 256 x input_c_dim) 81 | e1 = conv2d(img_in, self.gf_dim, name='g_e1_conv') # e1 is (128 x 128 x self.gf_dim) 82 | e2 = bn(conv2d(lrelu(e1), self.gf_dim*2, name='g_e2_conv')) # e2 is (64 x 64 x self.gf_dim*2) 83 | e3 = bn(conv2d(lrelu(e2), self.gf_dim*4, name='g_e3_conv')) # e3 is (32 x 32 x self.gf_dim*4) 84 | e4 = bn(conv2d(lrelu(e3), self.gf_dim*8, name='g_e4_conv')) # e4 is (16 x 16 x self.gf_dim*8) 85 | e5 = bn(conv2d(lrelu(e4), self.gf_dim*8, name='g_e5_conv')) # e5 is (8 x 8 x self.gf_dim*8) 86 | 87 | 88 | self.d4, self.d4_w, self.d4_b = deconv2d(tf.nn.relu(e5), [self.batch_size, s16, s16, self.gf_dim*8], name='g_d4', with_w=True) 89 | d4 = bn(self.d4) 90 | d4 = tf.concat(3, [d4, e4]) 91 | # d4 is (16 x 16 x self.gf_dim*8*2) 92 | 93 | self.d5, self.d5_w, self.d5_b = deconv2d(tf.nn.relu(d4), [self.batch_size, s8, s8, self.gf_dim*4], name='g_d5', with_w=True) 94 | d5 = bn(self.d5) 95 | d5 = tf.concat(3, [d5, e3]) 96 | # d5 is (32 x 32 x self.gf_dim*4*2) 97 | 98 | self.d6, self.d6_w, self.d6_b = deconv2d(tf.nn.relu(d5), [self.batch_size, s4, s4, self.gf_dim*2], name='g_d6', with_w=True) 99 | d6 = bn(self.d6) 100 | d6 = tf.concat(3, [d6, e2]) 101 | # d6 is (64 x 64 x self.gf_dim*2*2) 102 | 103 | self.d7, self.d7_w, self.d7_b = deconv2d(tf.nn.relu(d6), [self.batch_size, s2, s2, self.gf_dim], name='g_d7', with_w=True) 104 | d7 = bn(self.d7) 105 | d7 = tf.concat(3, [d7, e1]) 106 | # d7 is (128 x 128 x self.gf_dim*1*2) 107 | 108 | self.d8, self.d8_w, self.d8_b = deconv2d(tf.nn.relu(d7), [self.batch_size, s, s, self.output_colors], name='g_d8', with_w=True) 109 | # d8 is (256 x 256 x output_c_dim) 110 | 111 | return tf.nn.tanh(self.d8) 112 | 113 | 114 | def imageblur(self, cimg, sampling=False): 115 | if sampling: 116 | cimg = cimg * 0.3 + np.ones_like(cimg) * 0.7 * 255 117 | else: 118 | for i in xrange(30): 119 | randx = randint(0,205) 120 | randy = randint(0,205) 121 | cimg[randx:randx+50, randy:randy+50] = 255 122 | return cv2.blur(cimg,(100,100)) 123 | 124 | def train(self): 125 | self.loadmodel() 126 | 127 | data = glob(os.path.join("imgs", "*.jpg")) 128 | print data[0] 129 | base = np.array([get_image(sample_file) for sample_file in data[0:self.batch_size]]) 130 | base_normalized = base/255.0 131 | 132 | base_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in base]) / 255.0 133 | base_edge = np.expand_dims(base_edge, 3) 134 | 135 | base_colors = np.array([self.imageblur(ba) for ba in base]) / 255.0 136 | 137 | ims("results/base.png",merge_color(base_normalized, [self.batch_size_sqrt, self.batch_size_sqrt])) 138 | ims("results/base_line.jpg",merge(base_edge, [self.batch_size_sqrt, self.batch_size_sqrt])) 139 | ims("results/base_colors.jpg",merge_color(base_colors, [self.batch_size_sqrt, self.batch_size_sqrt])) 140 | 141 | datalen = len(data) 142 | 143 | for e in xrange(20000): 144 | for i in range(datalen / self.batch_size): 145 | batch_files = data[i*self.batch_size:(i+1)*self.batch_size] 146 | batch = np.array([get_image(batch_file) for batch_file in batch_files]) 147 | batch_normalized = batch/255.0 148 | 149 | batch_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in batch]) / 255.0 150 | batch_edge = np.expand_dims(batch_edge, 3) 151 | 152 | batch_colors = np.array([self.imageblur(ba) for ba in batch]) / 255.0 153 | 154 | d_loss, _ = self.sess.run([self.d_loss, self.d_optim], feed_dict={self.real_images: batch_normalized, self.line_images: batch_edge, self.color_images: batch_colors}) 155 | g_loss, _ = self.sess.run([self.g_loss, self.g_optim], feed_dict={self.real_images: batch_normalized, self.line_images: batch_edge, self.color_images: batch_colors}) 156 | 157 | print "%d: [%d / %d] d_loss %f, g_loss %f" % (e, i, (datalen/self.batch_size), d_loss, g_loss) 158 | 159 | if i % 100 == 0: 160 | recreation = self.sess.run(self.generated_images, feed_dict={self.real_images: base_normalized, self.line_images: base_edge, self.color_images: base_colors}) 161 | ims("results/"+str(e*100000 + i)+".jpg",merge_color(recreation, [self.batch_size_sqrt, self.batch_size_sqrt])) 162 | 163 | if i % 500 == 0: 164 | self.save("./checkpoint", e*100000 + i) 165 | 166 | def loadmodel(self, load_discrim=True): 167 | self.sess = tf.Session() 168 | self.sess.run(tf.initialize_all_variables()) 169 | 170 | if load_discrim: 171 | self.saver = tf.train.Saver() 172 | else: 173 | self.saver = tf.train.Saver(self.g_vars) 174 | 175 | if self.load("./checkpoint"): 176 | print "Loaded" 177 | else: 178 | print "Load failed" 179 | 180 | def sample(self): 181 | self.loadmodel(False) 182 | 183 | data = glob(os.path.join("imgs", "*.jpg")) 184 | 185 | datalen = len(data) 186 | 187 | for i in range(min(100,datalen / self.batch_size)): 188 | batch_files = data[i*self.batch_size:(i+1)*self.batch_size] 189 | batch = np.array([cv2.resize(imread(batch_file), (512,512)) for batch_file in batch_files]) 190 | batch_normalized = batch/255.0 191 | 192 | batch_edge = np.array([cv2.adaptiveThreshold(cv2.cvtColor(ba, cv2.COLOR_BGR2GRAY), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, blockSize=9, C=2) for ba in batch]) / 255.0 193 | batch_edge = np.expand_dims(batch_edge, 3) 194 | 195 | batch_colors = np.array([self.imageblur(ba,True) for ba in batch]) / 255.0 196 | 197 | recreation = self.sess.run(self.generated_images, feed_dict={self.real_images: batch_normalized, self.line_images: batch_edge, self.color_images: batch_colors}) 198 | ims("results/sample_"+str(i)+".jpg",merge_color(recreation, [self.batch_size_sqrt, self.batch_size_sqrt])) 199 | ims("results/sample_"+str(i)+"_origin.jpg",merge_color(batch_normalized, [self.batch_size_sqrt, self.batch_size_sqrt])) 200 | ims("results/sample_"+str(i)+"_line.jpg",merge_color(batch_edge, [self.batch_size_sqrt, self.batch_size_sqrt])) 201 | ims("results/sample_"+str(i)+"_color.jpg",merge_color(batch_colors, [self.batch_size_sqrt, self.batch_size_sqrt])) 202 | 203 | def save(self, checkpoint_dir, step): 204 | model_name = "model" 205 | model_dir = "tr" 206 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 207 | 208 | if not os.path.exists(checkpoint_dir): 209 | os.makedirs(checkpoint_dir) 210 | 211 | self.saver.save(self.sess, 212 | os.path.join(checkpoint_dir, model_name), 213 | global_step=step) 214 | 215 | def load(self, checkpoint_dir): 216 | print(" [*] Reading checkpoint...") 217 | 218 | model_dir = "tr" 219 | checkpoint_dir = os.path.join(checkpoint_dir, model_dir) 220 | 221 | ckpt = tf.train.get_checkpoint_state(checkpoint_dir) 222 | if ckpt and ckpt.model_checkpoint_path: 223 | ckpt_name = os.path.basename(ckpt.model_checkpoint_path) 224 | self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) 225 | return True 226 | else: 227 | return False 228 | 229 | 230 | if __name__ == '__main__': 231 | if len(sys.argv) < 2: 232 | print "Usage: python main.py [train, sample]" 233 | else: 234 | cmd = sys.argv[1] 235 | if cmd == "train": 236 | c = Color() 237 | c.train() 238 | elif cmd == "sample": 239 | c = Color(512,1) 240 | c.sample() 241 | else: 242 | print "Usage: python main.py [train, sample]" 243 | -------------------------------------------------------------------------------- /web/colorpicker/js/bootstrap-colorpicker.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Colorpicker v2.5.1 3 | * https://itsjavi.com/bootstrap-colorpicker/ 4 | */ 5 | !function(a,b){"function"==typeof define&&define.amd?define(["jquery"],function(a){return b(a)}):"object"==typeof exports?module.exports=b(require("jquery")):jQuery&&!jQuery.fn.colorpicker&&b(jQuery)}(this,function(a){"use strict";var b=function(c,d,e,f,g){this.fallbackValue=e?e&&"undefined"!=typeof e.h?e:this.value={h:0,s:0,b:0,a:1}:null,this.fallbackFormat=f?f:"rgba",this.hexNumberSignPrefix=g===!0,this.value=this.fallbackValue,this.origFormat=null,this.predefinedColors=d?d:{},this.colors=a.extend({},b.webColors,this.predefinedColors),c&&("undefined"!=typeof c.h?this.value=c:this.setColor(String(c))),this.value||(this.value={h:0,s:0,b:0,a:1})};b.webColors={aliceblue:"f0f8ff",antiquewhite:"faebd7",aqua:"00ffff",aquamarine:"7fffd4",azure:"f0ffff",beige:"f5f5dc",bisque:"ffe4c4",black:"000000",blanchedalmond:"ffebcd",blue:"0000ff",blueviolet:"8a2be2",brown:"a52a2a",burlywood:"deb887",cadetblue:"5f9ea0",chartreuse:"7fff00",chocolate:"d2691e",coral:"ff7f50",cornflowerblue:"6495ed",cornsilk:"fff8dc",crimson:"dc143c",cyan:"00ffff",darkblue:"00008b",darkcyan:"008b8b",darkgoldenrod:"b8860b",darkgray:"a9a9a9",darkgreen:"006400",darkkhaki:"bdb76b",darkmagenta:"8b008b",darkolivegreen:"556b2f",darkorange:"ff8c00",darkorchid:"9932cc",darkred:"8b0000",darksalmon:"e9967a",darkseagreen:"8fbc8f",darkslateblue:"483d8b",darkslategray:"2f4f4f",darkturquoise:"00ced1",darkviolet:"9400d3",deeppink:"ff1493",deepskyblue:"00bfff",dimgray:"696969",dodgerblue:"1e90ff",firebrick:"b22222",floralwhite:"fffaf0",forestgreen:"228b22",fuchsia:"ff00ff",gainsboro:"dcdcdc",ghostwhite:"f8f8ff",gold:"ffd700",goldenrod:"daa520",gray:"808080",green:"008000",greenyellow:"adff2f",honeydew:"f0fff0",hotpink:"ff69b4",indianred:"cd5c5c",indigo:"4b0082",ivory:"fffff0",khaki:"f0e68c",lavender:"e6e6fa",lavenderblush:"fff0f5",lawngreen:"7cfc00",lemonchiffon:"fffacd",lightblue:"add8e6",lightcoral:"f08080",lightcyan:"e0ffff",lightgoldenrodyellow:"fafad2",lightgrey:"d3d3d3",lightgreen:"90ee90",lightpink:"ffb6c1",lightsalmon:"ffa07a",lightseagreen:"20b2aa",lightskyblue:"87cefa",lightslategray:"778899",lightsteelblue:"b0c4de",lightyellow:"ffffe0",lime:"00ff00",limegreen:"32cd32",linen:"faf0e6",magenta:"ff00ff",maroon:"800000",mediumaquamarine:"66cdaa",mediumblue:"0000cd",mediumorchid:"ba55d3",mediumpurple:"9370d8",mediumseagreen:"3cb371",mediumslateblue:"7b68ee",mediumspringgreen:"00fa9a",mediumturquoise:"48d1cc",mediumvioletred:"c71585",midnightblue:"191970",mintcream:"f5fffa",mistyrose:"ffe4e1",moccasin:"ffe4b5",navajowhite:"ffdead",navy:"000080",oldlace:"fdf5e6",olive:"808000",olivedrab:"6b8e23",orange:"ffa500",orangered:"ff4500",orchid:"da70d6",palegoldenrod:"eee8aa",palegreen:"98fb98",paleturquoise:"afeeee",palevioletred:"d87093",papayawhip:"ffefd5",peachpuff:"ffdab9",peru:"cd853f",pink:"ffc0cb",plum:"dda0dd",powderblue:"b0e0e6",purple:"800080",red:"ff0000",rosybrown:"bc8f8f",royalblue:"4169e1",saddlebrown:"8b4513",salmon:"fa8072",sandybrown:"f4a460",seagreen:"2e8b57",seashell:"fff5ee",sienna:"a0522d",silver:"c0c0c0",skyblue:"87ceeb",slateblue:"6a5acd",slategray:"708090",snow:"fffafa",springgreen:"00ff7f",steelblue:"4682b4",tan:"d2b48c",teal:"008080",thistle:"d8bfd8",tomato:"ff6347",turquoise:"40e0d0",violet:"ee82ee",wheat:"f5deb3",white:"ffffff",whitesmoke:"f5f5f5",yellow:"ffff00",yellowgreen:"9acd32",transparent:"transparent"},b.prototype={constructor:b,colors:{},predefinedColors:{},getValue:function(){return this.value},setValue:function(a){this.value=a},_sanitizeNumber:function(a){return"number"==typeof a?a:isNaN(a)||null===a||""===a||void 0===a?1:""===a?0:"undefined"!=typeof a.toLowerCase?(a.match(/^\./)&&(a="0"+a),Math.ceil(100*parseFloat(a))/100):1},isTransparent:function(a){return!(!a||!("string"==typeof a||a instanceof String))&&(a=a.toLowerCase().trim(),"transparent"===a||a.match(/#?00000000/)||a.match(/(rgba|hsla)\(0,0,0,0?\.?0\)/))},rgbaIsTransparent:function(a){return 0===a.r&&0===a.g&&0===a.b&&0===a.a},setColor:function(a){if(a=a.toLowerCase().trim()){if(this.isTransparent(a))return this.value={h:0,s:0,b:0,a:0},!0;var b=this.parse(a);b?(this.value=this.value={h:b.h,s:b.s,b:b.b,a:b.a},this.origFormat||(this.origFormat=b.format)):this.fallbackValue&&(this.value=this.fallbackValue)}return!1},setHue:function(a){this.value.h=1-a},setSaturation:function(a){this.value.s=a},setBrightness:function(a){this.value.b=1-a},setAlpha:function(a){this.value.a=Math.round(parseInt(100*(1-a),10)/100*100)/100},toRGB:function(a,b,c,d){0===arguments.length&&(a=this.value.h,b=this.value.s,c=this.value.b,d=this.value.a),a*=360;var e,f,g,h,i;return a=a%360/60,i=c*b,h=i*(1-Math.abs(a%2-1)),e=f=g=c-i,a=~~a,e+=[i,h,0,0,h,i][a],f+=[h,i,i,h,0,0][a],g+=[0,0,h,i,i,h][a],{r:Math.round(255*e),g:Math.round(255*f),b:Math.round(255*g),a:d}},toHex:function(a,b,c,d){0===arguments.length&&(a=this.value.h,b=this.value.s,c=this.value.b,d=this.value.a);var e=this.toRGB(a,b,c,d);if(this.rgbaIsTransparent(e))return"transparent";var f=(this.hexNumberSignPrefix?"#":"")+((1<<24)+(parseInt(e.r)<<16)+(parseInt(e.g)<<8)+parseInt(e.b)).toString(16).slice(1);return f},toHSL:function(a,b,c,d){0===arguments.length&&(a=this.value.h,b=this.value.s,c=this.value.b,d=this.value.a);var e=a,f=(2-b)*c,g=b*c;return g/=f>0&&f<=1?f:2-f,f/=2,g>1&&(g=1),{h:isNaN(e)?0:e,s:isNaN(g)?0:g,l:isNaN(f)?0:f,a:isNaN(d)?0:d}},toAlias:function(a,b,c,d){var e,f=0===arguments.length?this.toHex():this.toHex(a,b,c,d),g="alias"===this.origFormat?f:this.toString(this.origFormat,!1);for(var h in this.colors)if(e=this.colors[h].toLowerCase().trim(),e===f||e===g)return h;return!1},RGBtoHSB:function(a,b,c,d){a/=255,b/=255,c/=255;var e,f,g,h;return g=Math.max(a,b,c),h=g-Math.min(a,b,c),e=0===h?null:g===a?(b-c)/h:g===b?(c-a)/h+2:(a-b)/h+4,e=(e+360)%6*60/360,f=0===h?0:h/g,{h:this._sanitizeNumber(e),s:f,b:g,a:this._sanitizeNumber(d)}},HueToRGB:function(a,b,c){return c<0?c+=1:c>1&&(c-=1),6*c<1?a+(b-a)*c*6:2*c<1?b:3*c<2?a+(b-a)*(2/3-c)*6:a},HSLtoRGB:function(a,b,c,d){b<0&&(b=0);var e;e=c<=.5?c*(1+b):c+b-c*b;var f=2*c-e,g=a+1/3,h=a,i=a-1/3,j=Math.round(255*this.HueToRGB(f,e,g)),k=Math.round(255*this.HueToRGB(f,e,h)),l=Math.round(255*this.HueToRGB(f,e,i));return[j,k,l,this._sanitizeNumber(d)]},parse:function(b){if(0===arguments.length)return!1;var c,d,e=this,f=!1,g="undefined"!=typeof this.colors[b];return g&&(b=this.colors[b].toLowerCase().trim()),a.each(this.stringParsers,function(a,h){var i=h.re.exec(b);return c=i&&h.parse.apply(e,[i]),!c||(f={},d=g?"alias":h.format?h.format:e.getValidFallbackFormat(),f=d.match(/hsla?/)?e.RGBtoHSB.apply(e,e.HSLtoRGB.apply(e,c)):e.RGBtoHSB.apply(e,c),f instanceof Object&&(f.format=d),!1)}),f},getValidFallbackFormat:function(){var a=["rgba","rgb","hex","hsla","hsl"];return this.origFormat&&a.indexOf(this.origFormat)!==-1?this.origFormat:this.fallbackFormat&&a.indexOf(this.fallbackFormat)!==-1?this.fallbackFormat:"rgba"},toString:function(a,c){a=a||this.origFormat||this.fallbackFormat,c=c||!1;var d=!1;switch(a){case"rgb":return d=this.toRGB(),this.rgbaIsTransparent(d)?"transparent":"rgb("+d.r+","+d.g+","+d.b+")";case"rgba":return d=this.toRGB(),"rgba("+d.r+","+d.g+","+d.b+","+d.a+")";case"hsl":return d=this.toHSL(),"hsl("+Math.round(360*d.h)+","+Math.round(100*d.s)+"%,"+Math.round(100*d.l)+"%)";case"hsla":return d=this.toHSL(),"hsla("+Math.round(360*d.h)+","+Math.round(100*d.s)+"%,"+Math.round(100*d.l)+"%,"+d.a+")";case"hex":return this.toHex();case"alias":return d=this.toAlias(),d===!1?this.toString(this.getValidFallbackFormat()):c&&!(d in b.webColors)&&d in this.predefinedColors?this.predefinedColors[d]:d;default:return d}},stringParsers:[{re:/rgb\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*?\)/,format:"rgb",parse:function(a){return[a[1],a[2],a[3],1]}},{re:/rgb\(\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*?\)/,format:"rgb",parse:function(a){return[2.55*a[1],2.55*a[2],2.55*a[3],1]}},{re:/rgba\(\s*(\d{1,3})\s*,\s*(\d{1,3})\s*,\s*(\d{1,3})\s*(?:,\s*(\d*(?:\.\d+)?)\s*)?\)/,format:"rgba",parse:function(a){return[a[1],a[2],a[3],a[4]]}},{re:/rgba\(\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*(?:,\s*(\d*(?:\.\d+)?)\s*)?\)/,format:"rgba",parse:function(a){return[2.55*a[1],2.55*a[2],2.55*a[3],a[4]]}},{re:/hsl\(\s*(\d*(?:\.\d+)?)\s*,\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*?\)/,format:"hsl",parse:function(a){return[a[1]/360,a[2]/100,a[3]/100,a[4]]}},{re:/hsla\(\s*(\d*(?:\.\d+)?)\s*,\s*(\d*(?:\.\d+)?)\%\s*,\s*(\d*(?:\.\d+)?)\%\s*(?:,\s*(\d*(?:\.\d+)?)\s*)?\)/,format:"hsla",parse:function(a){return[a[1]/360,a[2]/100,a[3]/100,a[4]]}},{re:/#?([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/,format:"hex",parse:function(a){return[parseInt(a[1],16),parseInt(a[2],16),parseInt(a[3],16),1]}},{re:/#?([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/,format:"hex",parse:function(a){return[parseInt(a[1]+a[1],16),parseInt(a[2]+a[2],16),parseInt(a[3]+a[3],16),1]}}],colorNameToHex:function(a){return"undefined"!=typeof this.colors[a.toLowerCase()]&&this.colors[a.toLowerCase()]}};var c={horizontal:!1,inline:!1,color:!1,format:!1,input:"input",container:!1,component:".add-on, .input-group-addon",fallbackColor:!1,fallbackFormat:"hex",hexNumberSignPrefix:!0,sliders:{saturation:{maxLeft:100,maxTop:100,callLeft:"setSaturation",callTop:"setBrightness"},hue:{maxLeft:0,maxTop:100,callLeft:!1,callTop:"setHue"},alpha:{maxLeft:0,maxTop:100,callLeft:!1,callTop:"setAlpha"}},slidersHorz:{saturation:{maxLeft:100,maxTop:100,callLeft:"setSaturation",callTop:"setBrightness"},hue:{maxLeft:100,maxTop:0,callLeft:"setHue",callTop:!1},alpha:{maxLeft:100,maxTop:0,callLeft:"setAlpha",callTop:!1}},template:'