├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── assets ├── experts_work.png ├── feature_extraction.png ├── infocolorizer_appendix.pdf ├── more_examples.png ├── system_architect.png └── teaser.png ├── backend ├── ProcessImage.py ├── README.md ├── backbonedetector │ ├── __init__.py │ ├── model.h5 │ └── predict.py ├── flowextractor │ ├── FlowExtraction.py │ └── __init__.py ├── infographicsLoader │ ├── DataComponent.py │ ├── __init__.py │ ├── color_helper.py │ └── infographicsLoader.py ├── libs │ ├── __init__.py │ ├── color_dist_cie2000.py │ └── jump_k_means.py ├── name_color_dict.json ├── requirements.txt ├── run-data-backend.sh ├── serve.py ├── shapedetector │ ├── __init__.py │ └── shapedetector.py ├── treeconstructor │ ├── __init__.py │ ├── helper.py │ ├── stageGetConstructedShapeTree.py │ ├── stageGetInitialTreeNodeMap.py │ ├── stageMergeSimilarColors.py │ ├── stageRemoveTextIconIndex.py │ └── treeConstructor.py ├── utils │ ├── __init__.py │ ├── getinterval.py │ ├── plotImageWithLegend.py │ └── preordertraversal.py ├── vaeacimputation │ ├── VAEAC.py │ ├── __init__.py │ ├── datasets.py │ ├── imputation_networks.py │ ├── mask_generators.py │ ├── nn_utils.py │ ├── prob_utils.py │ ├── train_utils.py │ ├── trained_vaeac_model │ ├── training_groundtruth.tsv │ └── vaeac_impute.py └── variables.py └── frontend ├── babel.config.js ├── package-lock.json ├── package.json ├── public ├── favicon.ico ├── index.html ├── logo.ico └── logo_0.ico ├── src ├── AppSVG.vue ├── assets │ ├── font │ │ ├── Roboto-Black.ttf │ │ └── Roboto-Regular.ttf │ ├── img │ │ ├── 1.jpg │ │ ├── 101.jpg │ │ ├── 129.jpg │ │ ├── 135.jpg │ │ ├── 19.jpg │ │ ├── 2286.jpg │ │ ├── 2411.jpg │ │ ├── 258.jpg │ │ ├── 3032.jpg │ │ ├── 3046.jpg │ │ ├── 3134.jpg │ │ ├── 3135.jpg │ │ ├── 3142.jpg │ │ ├── 3143.jpg │ │ ├── 3227.jpg │ │ ├── 3233.jpg │ │ ├── 3257.jpg │ │ ├── 337.jpg │ │ ├── 3464.jpg │ │ ├── 3465.jpg │ │ ├── 3466.jpg │ │ ├── 3475.jpg │ │ ├── 3495.jpg │ │ ├── 442.jpg │ │ ├── 459.jpg │ │ ├── 478.jpg │ │ ├── 521.jpg │ │ ├── 525.jpg │ │ ├── 621.jpg │ │ ├── 65.jpg │ │ ├── 693.jpg │ │ ├── 70.jpg │ │ ├── 703.jpg │ │ ├── 71.jpg │ │ ├── 718.jpg │ │ ├── 72.jpg │ │ ├── 735.jpg │ │ ├── 757.jpg │ │ ├── 767.jpg │ │ ├── 776.jpg │ │ ├── 777.jpg │ │ ├── 817.jpg │ │ ├── 82.jpg │ │ └── 86.jpg │ ├── logo.png │ ├── shapes │ │ ├── 1.svg │ │ ├── 10.svg │ │ ├── 11.svg │ │ ├── 12.svg │ │ ├── 13.svg │ │ ├── 14.svg │ │ ├── 15.svg │ │ ├── 16.svg │ │ ├── 17.svg │ │ ├── 18.svg │ │ ├── 19.svg │ │ ├── 2.svg │ │ ├── 20.svg │ │ ├── 21.svg │ │ ├── 3.svg │ │ ├── 4.svg │ │ ├── 5.svg │ │ ├── 6.svg │ │ ├── 7.svg │ │ ├── 8.svg │ │ └── 9.svg │ ├── svgs │ │ ├── balance.svg │ │ ├── bank.svg │ │ ├── bills.svg │ │ ├── book.svg │ │ ├── book_2.svg │ │ ├── castle.svg │ │ ├── employees.svg │ │ ├── exchange.svg │ │ ├── film.svg │ │ ├── footprint.svg │ │ ├── loss.svg │ │ ├── meeting.svg │ │ ├── pencil.svg │ │ ├── safebox.svg │ │ ├── skills.svg │ │ ├── strategy.svg │ │ ├── svg1.svg │ │ ├── svg3.svg │ │ ├── svg4.svg │ │ ├── svg5.svg │ │ ├── time.svg │ │ ├── tools.svg │ │ └── wallet.svg │ └── templates │ │ ├── animals.svg │ │ ├── demo.svg │ │ ├── environment.svg │ │ ├── head.svg │ │ └── timeline.svg ├── components │ ├── ContentPanelView │ │ └── ContentPanelView.vue │ ├── InteractiveView │ │ ├── InteractiveView.js │ │ └── InteractiveView.vue │ ├── PaintingView │ │ ├── PaintingView.js │ │ ├── PaintingView.vue │ │ ├── fabricImageFilter.js │ │ └── helper.js │ ├── PaletteView │ │ ├── PaletteView.js │ │ ├── PaletteView.vue │ │ ├── drawPalette.js │ │ └── helper.js │ └── SettingToolBar │ │ └── SettingToolBar.vue ├── data │ ├── contentIcon.js │ ├── contentImage.js │ ├── contentShape.js │ └── contentTemplate.js ├── main.js ├── plugins │ └── vuetify.js ├── service │ ├── config.js │ ├── dataService.js │ ├── store.js │ └── variables.js └── utils │ └── initialize.js └── vue.config.js /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | dist/ 3 | 4 | # frontend 5 | frontend/node_modules 6 | 7 | # backend 8 | backend/app/data 9 | backend/datasets 10 | **/__pycache__/ 11 | 12 | # local env files 13 | .env.local 14 | .env.*.local 15 | 16 | # Log files 17 | npm-debug.log* 18 | yarn-debug.log* 19 | yarn-error.log* 20 | 21 | # Editor directories and files 22 | .idea 23 | .vscode 24 | *.suo 25 | *.ntvs* 26 | *.njsproj 27 | *.sln 28 | *.sw? 29 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/.gitmodules -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # InfoColorizer: Interactive Recommendation of Color Palettes for Infographics 2 | 3 | ![](assets/teaser.png) 4 | 5 | This repository contains code and study materials for the paper: 6 | >Linping Yuan, Ziqi Zhou, Jian Zhao, Yiqiu Guo, Fan Du, Huamin Qu. [InfoColorizer: Interactive Recommendation of Color Palettes for Infographics](https://ieeexplore.ieee.org/document/9444798). IEEE Transactions on Visualization and Computer Graphics, 2021. 7 | 8 | An ArXiv copy can also be downloaded [here](https://arxiv.org/pdf/2102.02041.pdf). 9 | 10 | ---- 11 | ## Introduction 12 | InfoColorizer is a tool that allows users to effectively obtain high-quality color palettes during infographics creation. 13 | It consists of a recommendation engine and a visual interface. 14 | From a large infographic dataset, we extract a set of features that embeds both structure and color information of infographics (the red arrow). 15 | We then train a deep learning model, VAEAC, that characterizes good color design practices in the data, to construct our recommendation engine. 16 | With the visual interface, users can obtain recommended color palettes, specify various color preferences and constraints, preview and edit infographics, 17 | and retrieve new recommendations in an iterative manner (the blue arrows). 18 | 19 | ![The workflow of InfoColorizer.](assets/system_architect.png) 20 | 21 | ## Supplementary Materials 22 | - [Demo Video](https://youtu.be/FZvLt0AAIAI) 23 | - [Appendices](assets/infocolorizer_appendix.pdf) 24 | - [Code](https://github.com/yuanlinping/InfoColorizer) 25 | - [Results generated by participants in the controlled user study and expert interview](https://bit.ly/38zinpV) 26 | 27 | ## Feature Extraction Algorithm 28 | As described in [Section 5.1.1](https://arxiv.org/pdf/2102.02041.pdf), we extract features at different levels and construct a tree that captures spatial relationship of infographic elements. The corresponding code is mainly at [./backend/treeconstructor](backend/treeconstructor). 29 | ![](assets/feature_extraction.png) 30 | 31 | ## Deployment 32 | ### Prerequisite: 33 | [Node](https://nodejs.org/) and [Python](https://www.python.org/) 34 | ### Environment 35 | - Vue 3.6.3 36 | - python 3.6 37 | ### Setup the interface 38 | 39 | ``` 40 | - cd frontend 41 | - npm install 42 | - npm run serve 43 | ``` 44 | 45 | ### Start the server 46 | ``` 47 | - cd backend 48 | - pip install -r requirements.txt (suggest using virtual environment https://docs.python.org/3/tutorial/venv.html) 49 | - bash run-data-backend.sh 50 | ``` 51 | ## Cite this work 52 | ``` 53 | @article{yuan2021infocolorizer, 54 | title={InfoColorizer: Interactive Recommendation of Color Palettes for Infographics}, 55 | author={Yuan, Lin-Ping and Zhou, Ziqi and Zhao, Jian and Guo, Yiqiu and Du, Fan and Qu, Huamin}, 56 | journal={IEEE Transactions on Visualization and Computer Graphics}, 57 | year={2021} 58 | } 59 | ``` 60 | -------------------------------------------------------------------------------- /assets/experts_work.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/experts_work.png -------------------------------------------------------------------------------- /assets/feature_extraction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/feature_extraction.png -------------------------------------------------------------------------------- /assets/infocolorizer_appendix.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/infocolorizer_appendix.pdf -------------------------------------------------------------------------------- /assets/more_examples.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/more_examples.png -------------------------------------------------------------------------------- /assets/system_architect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/system_architect.png -------------------------------------------------------------------------------- /assets/teaser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/assets/teaser.png -------------------------------------------------------------------------------- /backend/ProcessImage.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import os 4 | from flask import Blueprint, jsonify, request, json 5 | 6 | from infographicsLoader import color_helper 7 | from treeconstructor.treeConstructor import featureExtractionPipeline 8 | from utils.preordertraversal import getLeftRightNumber, flattenlightconstructedtree, flattenheavyconstructedtree, addFieldsBackToLightTree 9 | from vaeacimputation import impute 10 | from variables import * 11 | 12 | process_image_api = Blueprint('process_image', __name__) 13 | 14 | SUPPORT_NODE_UPPER = 18 15 | SUPPORT_NODE_LOWER = 3 16 | RETURNED_PALETTE_NUM = 20 17 | 18 | @process_image_api.route('/getFeaturesAndTreeStructure', methods=['POST']) 19 | def getFeaturesAndTreeStructure(): 20 | params = request.get_json() 21 | num_in_img = params.get('numInImg') 22 | img_file_name = os.path.join(DATA_ROOT, INFOGRAPHICS_FOLDER, "{}.jpg".format(num_in_img)) 23 | bb_file_name = os.path.join(DATA_ROOT, BOUNDING_BOX_FOLDER, "{}.txt".format(num_in_img)) 24 | 25 | mode = "4300_infographics" 26 | constructed_tree = featureExtractionPipeline(img_file_name, bb_file_name, mode, num_in_img) 27 | light_constructed_tree = getLeftRightNumber(constructed_tree) 28 | final_constructed_tree, pixel_array_of_node = addFieldsBackToLightTree(light_constructed_tree, constructed_tree, True) 29 | result = {"constructed_tree": final_constructed_tree, "pixel_array_of_node": pixel_array_of_node} 30 | return jsonify(result) 31 | 32 | @process_image_api.route('/getImputationResults', methods=['POST']) 33 | def getImputationResults(): 34 | params = request.get_json() 35 | tree_source = params.get('treeSource') 36 | num_in_img = params.get('numInImg') 37 | modified_tree = params.get('modifiedTree') 38 | bind_array = params.get('bindArray') 39 | 40 | REPEAT_TIME = 5 41 | VECTOR_LEN = 169 # the vector len 42 | # load color pair dicts 43 | dirname = os.path.dirname(__file__) 44 | json_data = open(os.path.join(dirname, "name_color_dict.json"), "r") 45 | name_color_dict = json.load(json_data) 46 | 47 | res = np.zeros((REPEAT_TIME, VECTOR_LEN)) 48 | 49 | res[:,0] = np.full((REPEAT_TIME),num_in_img) 50 | 51 | if tree_source == "img": 52 | img_file_name = os.path.join(DATA_ROOT, INFOGRAPHICS_FOLDER, "{}.jpg".format(num_in_img)) 53 | bb_file_name = os.path.join(DATA_ROOT, BOUNDING_BOX_FOLDER, "{}.txt".format(num_in_img)) 54 | img = cv2.imread(img_file_name) 55 | img_bg_color = color_helper.find_background_color(img) 56 | img_bg_color = img_bg_color / 255. 57 | else: 58 | img_bg_color = np.array([0.0,0.0,0.0]) 59 | 60 | res[:, [1, 2, 3]] = np.repeat(img_bg_color.reshape((1,-1)), REPEAT_TIME, axis=0) 61 | res[:, [4, 5, 6]] = np.repeat(np.array([0, 0.0, 0.0]).reshape((1, -1)), REPEAT_TIME, axis=0) 62 | 63 | flatten_tree = flattenheavyconstructedtree(modified_tree) 64 | ind = 0 65 | node_id = np.array([]) 66 | element_area = np.array([]) 67 | for key, node in flatten_tree.items(): 68 | if ind >= SUPPORT_NODE_UPPER: 69 | break 70 | node_id = np.append(node_id, int(key)) 71 | left_number = node['left_number'] 72 | right_number = node['right_number'] 73 | ty = 1 74 | rh = node['relative_height'] 75 | rw = node['relative_width'] 76 | ra = node['relative_pixel_area'] 77 | element_area = np.append(element_area, 0 if np.isnan(ra) else ra) 78 | res[:, [7 + ind * 9, 8 + ind * 9, 9 + ind * 9, 10 + ind * 9, 11 + ind * 9, 12 + ind * 9]] = np.repeat(np.array([left_number, right_number, ty, rh, rw, ra]).reshape((1,-1)), REPEAT_TIME, axis=0) 79 | rgb_color = node['rgb_user_specific_color'] 80 | if rgb_color == None: 81 | color_name = node['color_name'] 82 | if color_name != None: 83 | available_colors = np.array(name_color_dict[color_name]) 84 | colors = available_colors[np.random.choice(available_colors.shape[0], size=REPEAT_TIME, replace=False)] 85 | else: 86 | colors = np.full((REPEAT_TIME, 3),np.nan) 87 | else: 88 | colors = np.repeat(np.array(rgb_color).reshape(1,-1), REPEAT_TIME, axis=0) 89 | if not np.isnan(np.sum(colors)): 90 | colors = cv2.cvtColor(np.reshape(colors.astype('uint8'), (1, -1, 3)), cv2.COLOR_RGB2LAB) 91 | colors = np.reshape(colors, (-1,3)) 92 | colors = colors / 255. 93 | res[:,[13 + ind * 9, 14 + ind * 9, 15 + ind * 9]] = colors 94 | ind = ind + 1 95 | 96 | 97 | while ind < SUPPORT_NODE_UPPER: 98 | empty = [1, 1, 1, 1, 1, 1, 1, 1, 1] 99 | res[:,[7 + ind * 9, 8 + ind * 9, 9 + ind * 9, 10 + ind * 9, 11 + ind * 9, 12 + ind * 9, 13 + ind * 9, 14 + ind * 9, 15 + ind * 9]] = np.repeat(np.array(empty).reshape((1,-1)), REPEAT_TIME, axis=0) 100 | ind = ind + 1 101 | # ------------get imputation results------------ # 102 | imputation_results = impute(res[:,1:]) 103 | node_num = len(node_id) 104 | color_imputation_results = np.zeros((imputation_results.shape[0],node_num * 3)) 105 | for i in range(0, node_num): 106 | color_imputation_results[:,i * 3] = imputation_results[:,12 + i * 9] * 255 107 | color_imputation_results[:,i * 3 + 1] = imputation_results[:, 13 + i * 9] * 255 108 | color_imputation_results[:,i * 3 + 2] = imputation_results[:, 14 + i * 9] * 255 109 | 110 | color_imputation_results = np.reshape(color_imputation_results, (color_imputation_results.shape[0],-1, 3)) 111 | color_imputation_results = cv2.cvtColor(color_imputation_results.astype('uint8'), cv2.COLOR_LAB2RGB) 112 | 113 | # make the bind element with the same color 114 | uni_bind_flag = np.unique(bind_array) 115 | for flag in uni_bind_flag: 116 | if flag == -1: 117 | continue 118 | element_index_with_flag = np.where(bind_array == flag)[0] 119 | probability = element_area[element_index_with_flag] 120 | probability = probability / sum(probability) 121 | selected_index = np.random.choice(element_index_with_flag, 1, p=probability)[0] 122 | selected_color = color_imputation_results[:,selected_index] 123 | replace_color_array = np.repeat(selected_color,len(element_index_with_flag),axis=0) 124 | replace_color_array = np.reshape(replace_color_array, (color_imputation_results.shape[0], len(element_index_with_flag), 3)) 125 | color_imputation_results[:,element_index_with_flag] = replace_color_array 126 | 127 | np.random.shuffle(color_imputation_results) 128 | result = {'rgb_imputation_results' : color_imputation_results.tolist(), 'corresponding_ids': node_id.tolist()} 129 | 130 | return jsonify(result) -------------------------------------------------------------------------------- /backend/README.md: -------------------------------------------------------------------------------- 1 | Coming soon. 2 | 12 | -------------------------------------------------------------------------------- /backend/backbonedetector/__init__.py: -------------------------------------------------------------------------------- 1 | from .predict import predict -------------------------------------------------------------------------------- /backend/backbonedetector/model.h5: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/backend/backbonedetector/model.h5 -------------------------------------------------------------------------------- /backend/backbonedetector/predict.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import cv2 4 | from tensorflow import keras 5 | from variables import * 6 | import os 7 | 8 | def predict(img): 9 | dirname = os.path.dirname(__file__) 10 | model_path = os.path.join(dirname, 'model.h5') 11 | model = keras.models.load_model(model_path) 12 | img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) 13 | img = cv2.resize(img, (300, 300)) 14 | X = [] 15 | X.append(img) 16 | X = np.array(X, dtype="uint8") 17 | X = X.reshape(1, 300, 300, 1) 18 | predicted_img = model.predict(X) 19 | index = np.argmax(predicted_img) 20 | return index -------------------------------------------------------------------------------- /backend/flowextractor/__init__.py: -------------------------------------------------------------------------------- 1 | from .FlowExtraction import VIFflow -------------------------------------------------------------------------------- /backend/infographicsLoader/DataComponent.py: -------------------------------------------------------------------------------- 1 | from infographicsLoader.color_helper import * 2 | 3 | class DataComponent(): 4 | def __init__(self, params, img): 5 | """ 6 | :param params: 5 figures in one line read from bounding box txt 7 | :param img: in bgr; rows x columns = height x width 8 | """ 9 | self._img_height = img.shape[0] 10 | self._img_width = img.shape[1] 11 | 12 | self.type = int(params[0]) 13 | 14 | self.location = { 15 | "x": float(params[1]), # x: left -> right 16 | "y": float(params[2]), # y: top -> bottom 17 | "w": float(params[3]), # w: left -> right 18 | "h": float(params[4]) # h: top -> bottom 19 | } 20 | 21 | [[tl_i, tl_j], [br_i, br_j]] = self.getTopLeftAndBottomRightConnerIJ() 22 | 23 | self.conner = { ## TopLeftAndBottomRightConner used in img[i,j] 24 | "tl_i": tl_i, 25 | "tl_j": tl_j, 26 | "br_i": br_i, 27 | "br_j": br_j 28 | } 29 | 30 | bb_content = img[tl_i:br_i, tl_j:br_j] 31 | 32 | self.colors = kmeans_main_colors_in_bounding_boxes(bb_content) 33 | 34 | def getTopLeftAndBottomRightConnerIJ(self): 35 | """ 36 | x,y is for opencv img coordinate. X: left to right => width; Y: top to bottom => height 37 | x,y is the center point 38 | we first need to calculate two conners (x,y) 39 | then turn (x,y) to (i,j). basically, (i,j) = (y,x) 40 | :return: 41 | """ 42 | tl_x = int((self.location["x"] - self.location["w"]/2) * self._img_width) 43 | tl_y = int((self.location["y"] - self.location["h"]/2) * self._img_height) 44 | 45 | br_x = int((self.location["x"] + self.location["w"]/2) * self._img_width) 46 | br_y = int((self.location["y"] + self.location["h"]/2) * self._img_height) 47 | 48 | tl_x = max(tl_x, 0) 49 | tl_y = max(tl_y, 0) 50 | br_x = min(self._img_width - 1, br_x) 51 | br_y = min(self._img_height - 1, br_y) 52 | 53 | return [[tl_y, tl_x],[br_y, br_x]] # [[tl_i, tl_j],[br_i, br_j]] 54 | 55 | class DataComponentForTimelineFormat(): 56 | def __init__(self, params, img): 57 | """ 58 | :param params: 6 figures in one line read from bounding box txt 59 | :param img: in bgr; rows x columns = height x width 60 | """ 61 | self._img_height = img.shape[0] 62 | self._img_width = img.shape[1] 63 | 64 | self.conner = { ## TopLeftAndBottomRightConner used in img[i,j] 65 | "tl_i": int(params[1]), 66 | "tl_j": int(params[0]), 67 | "br_i": int(params[3]), 68 | "br_j": int(params[2]), 69 | } 70 | 71 | tl_i, tl_j, br_i, br_j = self.conner["tl_i"], self.conner["tl_j"],self.conner["br_i"],self.conner["br_j"] 72 | self.type = int(params[4]) 73 | 74 | self.confidence = float(params[5]) 75 | bb_content = img[tl_i:br_i, tl_j:br_j] 76 | 77 | self.colors = kmeans_main_colors_in_bounding_boxes(bb_content) -------------------------------------------------------------------------------- /backend/infographicsLoader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/backend/infographicsLoader/__init__.py -------------------------------------------------------------------------------- /backend/infographicsLoader/color_helper.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | from libs import dist_cie2000 4 | from sklearn.cluster import DBSCAN, KMeans 5 | from variables import * 6 | 7 | 8 | def find_background_color(img): 9 | """ 10 | :param img: in BGR 11 | :return: in BGR 12 | white -> different white 13 | """ 14 | MARGIN = 3 15 | height, width, channel = img.shape 16 | top = img[MARGIN,:,:] 17 | left = img[:,MARGIN,:] 18 | right = img[:, width - MARGIN, :] 19 | bottom = img[height - MARGIN, :, :] 20 | edge_colors = np.concatenate((top, left, right, bottom), axis=0) 21 | unique_colors, frequencies = np.unique(edge_colors, axis=0, return_counts=True) 22 | return unique_colors[np.argmax(frequencies)] 23 | 24 | def kmeans_main_colors_in_bounding_boxes(img): 25 | """ 26 | 27 | :param img: the output of cv2.imread(); see https://github.com/algolia/color-extractor 28 | :return: bgr color 29 | use lab + euclidean 30 | """ 31 | bg_color = find_background_color(img) 32 | return { 33 | "bg_color": bg_color, 34 | "main_color": bg_color, 35 | "other_color": bg_color 36 | } 37 | 38 | 39 | def kmeans_main_colors_in_whole_images(img): 40 | """ 41 | :param img: the output of cv2.imread(); see https://github.com/algolia/color-extractor 42 | :return: bgr color; without background colors (the most one) 43 | use lab + euclidean 44 | """ 45 | reshape_img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).reshape((-1,3)) 46 | kmeans = KMeans(n_clusters=COLOR_CONSIDERED_NUM + 1, max_iter=100, tol = 1.0) 47 | kmeans.fit(reshape_img) 48 | square_dist_sum, labels, cluster_center = kmeans.inertia_, kmeans.labels_, kmeans.cluster_centers_ 49 | 50 | cluster_center = cluster_center.astype('uint8') 51 | cluster_center = cv2.cvtColor(cluster_center.reshape(1,-1,3), cv2.COLOR_LAB2BGR).reshape(-1,3) 52 | 53 | unique_labels, frequencies = np.unique(labels, return_counts=True) 54 | unique_labels = [l for l,f in sorted(zip(unique_labels, frequencies), key=lambda x:x[1], reverse=True)] 55 | 56 | k_main_colors = np.array([[0,0,0]]) 57 | for i in range(COLOR_CONSIDERED_NUM + 1): 58 | k_main_colors = np.append(k_main_colors, cluster_center[unique_labels[i]]) 59 | 60 | k_main_colors = k_main_colors.reshape((-1,3)) 61 | k_main_colors = np.delete(k_main_colors, 0, axis=0) 62 | k_main_colors = np.delete(k_main_colors, 0, axis=0) # delete background color 63 | return k_main_colors 64 | 65 | 66 | 67 | def getColorsWithinSameTypeElements(samples, feature): 68 | """ 69 | 70 | :param samples: ELements with same types; like all body_text 71 | :param feature: "bg_color", "main_color", "other_color" 72 | :return: 73 | """ 74 | original_colors = np.array([[0,0,0]]) 75 | for element in samples: 76 | original_colors = np.append(original_colors, element.colors[feature].reshape((1,3)), axis=0) 77 | original_colors =np.delete(original_colors, 0, axis=0) 78 | cluster = DBSCAN(eps=PERCEPTUAL_DIS_FOR_DISTINCT_THEME_COLORS, min_samples=1, metric=dist_cie2000).fit(original_colors) 79 | n_cluster = len(set(cluster.labels_)) - (1 if -1 in cluster.labels_ else 0) 80 | 81 | cluster_centers = np.array([[0,0,0]]) 82 | for cl in range(n_cluster): 83 | indices = np.where(cluster.labels_ == cl) 84 | cluster_centers = np.append(cluster_centers, np.array([original_colors[indices].mean(axis = 0)]), axis=0) 85 | cluster_centers = np.delete(cluster_centers, 0, axis=0) 86 | return n_cluster, cluster_centers.astype('uint8') 87 | -------------------------------------------------------------------------------- /backend/infographicsLoader/infographicsLoader.py: -------------------------------------------------------------------------------- 1 | from variables import * 2 | from infographicsLoader.DataComponent import DataComponent, DataComponentForTimelineFormat 3 | import csv 4 | import cv2 5 | 6 | def constructDataComponents(img_file_name, bb_file_name): 7 | img = cv2.imread(img_file_name) 8 | 9 | infographics_data_components = { 10 | "title": [], 11 | "index": [], 12 | "body_text": [], 13 | "icon": [], 14 | "arrow": [] 15 | } 16 | 17 | with open(bb_file_name) as f: 18 | elements = csv.reader(f, delimiter=" ") 19 | for params in elements: 20 | curElem = DataComponent(params, img) 21 | if curElem.type in TITLE: 22 | infographics_data_components["title"].append(curElem) 23 | elif curElem.type in BODY_TEXT: 24 | infographics_data_components["body_text"].append(curElem) 25 | elif curElem.type in INDEX: 26 | infographics_data_components["index"].append(curElem) 27 | elif curElem.type in ICON: 28 | infographics_data_components["icon"].append(curElem) 29 | elif curElem.type in ARROW: 30 | infographics_data_components["arrow"].append(curElem) 31 | else: 32 | print("The element doesn't belong to any classes.") 33 | 34 | f.close() 35 | 36 | return infographics_data_components 37 | 38 | def constructDataComponentsForTimelineFormat(img_file_name, bb_file_name): 39 | img = cv2.imread(img_file_name) 40 | 41 | infographics_data_components = { 42 | "event_mark": [], 43 | "event_text": [], 44 | "annotation_mark": [], 45 | "annotation_text": [], 46 | "icon": [], 47 | "index":[], 48 | "main_body":[] 49 | } 50 | 51 | with open(bb_file_name) as f: 52 | elements = csv.reader(f, delimiter=" ") 53 | for params in elements: 54 | curElem = DataComponentForTimelineFormat(params, img) 55 | if curElem.type in EVENT_MARK: 56 | infographics_data_components["event_mark"].append(curElem) 57 | elif curElem.type in EVENT_TEXT: 58 | infographics_data_components["event_text"].append(curElem) 59 | elif curElem.type in ANNOTATION_MARK: 60 | infographics_data_components["annotation_mark"].append(curElem) 61 | elif curElem.type in ANNOTATION_TEXT: 62 | infographics_data_components["annotation_text"].append(curElem) 63 | elif curElem.type in ICON_2: 64 | infographics_data_components["icon"].append(curElem) 65 | elif curElem.type in INDEX_2: 66 | infographics_data_components["index"].append(curElem) 67 | elif curElem.type in MAIN_BODY: 68 | infographics_data_components["main_body"].append(curElem) 69 | else: 70 | print("The element doesn't belong to any classes.") 71 | 72 | f.close() 73 | 74 | return infographics_data_components -------------------------------------------------------------------------------- /backend/libs/__init__.py: -------------------------------------------------------------------------------- 1 | from .color_dist_cie2000 import dist_cie2000 2 | from .jump_k_means import Cluster -------------------------------------------------------------------------------- /backend/libs/color_dist_cie2000.py: -------------------------------------------------------------------------------- 1 | from colormath.color_objects import sRGBColor, LabColor 2 | from colormath.color_conversions import convert_color 3 | from colormath.color_diff import delta_e_cie2000 4 | 5 | def dist_cie2000(bgr1, bgr2): 6 | """ 7 | 8 | :param bgr1: for example [227, 236, 250] 9 | :param bgr2: for example [67, 99, 161] 10 | :return: delta_e_cie2000 distance 11 | """ 12 | color1_rgb = sRGBColor(bgr1[2] / 255.0, bgr1[1] / 255.0, bgr1[0] / 255.0) 13 | 14 | color2_rgb = sRGBColor(bgr2[2] / 255.0, bgr2[1] / 255.0, bgr2[0] / 255.0) 15 | 16 | # Convert from RGB to Lab Color Space 17 | color1_lab = convert_color(color1_rgb, LabColor) 18 | 19 | # Convert from RGB to Lab Color Space 20 | color2_lab = convert_color(color2_rgb, LabColor) 21 | 22 | # Find the color difference 23 | delta_e = delta_e_cie2000(color1_lab, color2_lab) 24 | 25 | return delta_e -------------------------------------------------------------------------------- /backend/libs/jump_k_means.py: -------------------------------------------------------------------------------- 1 | from sklearn.cluster import KMeans 2 | 3 | class Cluster(): 4 | """ 5 | see: https://github.com/algolia/color-extractor 6 | Use the K-Means algorithm to group pixels by clusters. The algorithm tries 7 | to determine the optimal number of clusters for the given pixels. 8 | """ 9 | def __init__(self, settings=None): 10 | if settings is None: 11 | self._settings = self._default_settings() 12 | else: 13 | self._settings = settings 14 | 15 | self._kmeans_args = { 16 | 'max_iter': 50, 17 | 'tol': 1.0, 18 | } 19 | 20 | def get(self, img): 21 | a = self._settings['algorithm'] 22 | if a == 'kmeans': 23 | return self._jump(img) 24 | else: 25 | raise ValueError('Unknown algorithm {}'.format(a)) 26 | 27 | def _kmeans(self, img, k): 28 | kmeans = KMeans(n_clusters=k, **self._kmeans_args) 29 | kmeans.fit(img) 30 | return kmeans.inertia_, kmeans.labels_, kmeans.cluster_centers_ 31 | 32 | def _jump(self, img): 33 | npixels = img.size 34 | 35 | best = None 36 | prev_distorsion = 0 37 | largest_diff = float('-inf') 38 | 39 | for k in range(self._settings['min_k'], self._settings['max_k']): 40 | compact, labels, centers = self._kmeans(img, k) 41 | distorsion = Cluster._square_distorsion(npixels, compact, 1.5) 42 | diff = prev_distorsion - distorsion 43 | prev_distorsion = distorsion 44 | 45 | if diff > largest_diff: 46 | largest_diff = diff 47 | best = k, labels, centers 48 | 49 | return best 50 | 51 | @staticmethod 52 | def _default_settings(): 53 | return { 54 | 'min_k': 2, 55 | 'max_k': 3, 56 | 'algorithm': 'kmeans', 57 | } 58 | 59 | @staticmethod 60 | def _square_distorsion(npixels, compact, y): 61 | return pow(compact / npixels, -y) 62 | -------------------------------------------------------------------------------- /backend/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask == 1.1.2 2 | flask-cors==3.0.8 3 | Pillow == 7.2.0 4 | colormath == 3.0.0 5 | colour-science==0.3.15 6 | imutils == 0.5.3 7 | numpy == 1.19.1 8 | opencv-python == 4.1.2.30 9 | pandas == 1.1.0 10 | scikit-learn == 0.22.2 11 | scipy == 1.5.2 12 | tensorflow == 1.15.0 13 | torch == 1.3.1 14 | torchvision == 0.4.2 15 | tqdm == 4.48.0 16 | pymongo==3.5.1 17 | -------------------------------------------------------------------------------- /backend/run-data-backend.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export FLASK_APP=serve.py 3 | export FLASK_DEBUG=0 4 | flask run --host=127.0.0.1 --port=5000 5 | 6 | -------------------------------------------------------------------------------- /backend/serve.py: -------------------------------------------------------------------------------- 1 | from flask_cors import CORS 2 | from flask import Flask, send_file, request, jsonify, render_template, send_from_directory, Response 3 | from ProcessImage import process_image_api 4 | 5 | import json 6 | 7 | app = Flask(__name__) 8 | cors = CORS(app) 9 | 10 | app.register_blueprint(process_image_api) 11 | 12 | # ################################################################################ route 13 | 14 | if __name__ == '__main__': 15 | app.run(debug=True) 16 | -------------------------------------------------------------------------------- /backend/shapedetector/__init__.py: -------------------------------------------------------------------------------- 1 | from .shapedetector import ShapeDetector -------------------------------------------------------------------------------- /backend/shapedetector/shapedetector.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | 3 | class ShapeDetector: 4 | def __init__(self): 5 | pass 6 | 7 | def detect(self, c): 8 | shape = "unidentified" 9 | shape_type_code = -1 10 | peri = cv2.arcLength(c, True) 11 | approx = cv2.approxPolyDP(c, 0.04 * peri, True) 12 | if len(approx) == 3: 13 | shape = "triangle" 14 | shape_type_code = 3 15 | elif len(approx) == 4: 16 | (x, y, w, h) = cv2.boundingRect(approx) 17 | ar = w / float(h) 18 | shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle" 19 | # if the shape is a pentagon, it will have 5 vertices 20 | shape_type_code = 1 if ar >= 0.95 and ar <= 1.05 else 2 21 | elif len(approx) == 5: 22 | shape = "pentagon" 23 | shape_type_code = 5 24 | else: 25 | shape = "circle" 26 | shape_type_code = 6 27 | return shape_type_code -------------------------------------------------------------------------------- /backend/treeconstructor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/backend/treeconstructor/__init__.py -------------------------------------------------------------------------------- /backend/treeconstructor/helper.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import copy 4 | 5 | def cvtBRG2LAB(arr, dim): 6 | """ 7 | :param arr: [b,g,r] 8 | :param dim: 1D: [b,g,r] 2D: [[b,g,r]] 3D:[[[b,g,r]]] 9 | :return: 10 | """ 11 | if dim == "1D": 12 | arr = np.array(arr).reshape((1,1,3)).astype('uint8') 13 | arr = cv2.cvtColor(arr, cv2.COLOR_BGR2LAB).reshape(3,) 14 | elif dim == "2D": 15 | arr = np.array(arr).reshape((1, -1, 3)).astype('uint8') 16 | arr = cv2.cvtColor(arr, cv2.COLOR_BGR2LAB).reshape(-1,3) 17 | else: 18 | arr = cv2.cvtColor(arr, cv2.COLOR_BGR2LAB) 19 | return arr 20 | 21 | 22 | def getStatistic(samples, feature): 23 | """ 24 | :param samples: ELements with same types; like all body_text 25 | :param feature: x, y, w, h ... 26 | :return: 27 | """ 28 | 29 | values = [] 30 | for element in samples: 31 | values.append(element.location[feature]) 32 | return { 33 | "max": np.amax(values), 34 | "avg": np.average(values), 35 | "min":np.amin(values), 36 | "std": np.std(values) 37 | } 38 | 39 | def mergeWhiteGrayMaskIfNecessary(white_mask, gray_mask, low_thres, high_thres): 40 | margin_mask = np.zeros_like(white_mask) 41 | height, width = margin_mask.shape 42 | MARGIN = 2 43 | margin_mask[MARGIN,:] = 1 44 | margin_mask[:,MARGIN] = 1 45 | margin_mask[:, width - MARGIN] = 1 46 | margin_mask[height - MARGIN, :] = 1 47 | 48 | white_in_edge = np.logical_and(margin_mask == 1, white_mask == 1) 49 | gray_in_edge = np.logical_and(margin_mask == 1, gray_mask == 1) 50 | thre = 0.25 * width 51 | if (np.count_nonzero(white_in_edge) > thre and np.count_nonzero(gray_in_edge) > thre) \ 52 | or (np.count_nonzero(white_mask) > high_thres and np.count_nonzero(gray_mask) < low_thres): 53 | white_mask[gray_mask == 1] = 1 54 | gray_mask[gray_mask == 1] = 0 55 | elif np.count_nonzero(white_mask) < low_thres and np.count_nonzero(gray_mask) > high_thres: 56 | gray_mask[white_mask == 1] = 1 57 | white_mask[white_mask == 1] = 0 58 | return white_mask.astype(int), gray_mask.astype(int) 59 | 60 | # -----------------helper------------------- 61 | def getOneAvgColorForOneMask(mask, key, image): 62 | """ 63 | :param mask: segmented img 64 | :param key: label of roi 65 | :return: the avg color of this roi 66 | """ 67 | label_colors = image[mask == key] 68 | if len(label_colors) == 0: 69 | color = None 70 | else: 71 | color = np.average(label_colors, axis=0) 72 | return color 73 | 74 | 75 | def getAvgColorsBasedOnTreeNodeMap(masks, image): 76 | """ 77 | :param masks: segmented img 78 | :return: the avg colors of all roi with label >= 0 79 | """ 80 | max_label = np.max(masks) 81 | uni_labels = range(0, max_label + 1) 82 | colors = np.array([]) 83 | for label in uni_labels: 84 | color = getOneAvgColorForOneMask(masks, label, image) 85 | if color is None: 86 | color = [255, 255, 255] 87 | colors = np.append(colors, color) 88 | return colors.reshape((-1, 3)) 89 | 90 | def getColoredTemplateForCompared(h, w, lab_color): 91 | L = np.full((h, w), lab_color[0]) 92 | a = np.full((h, w), lab_color[1]) 93 | b = np.full((h, w), lab_color[2]) 94 | return cv2.merge([L, a, b]) 95 | 96 | def getBoundaryOfCells(pts): 97 | """ 98 | :param pts: n * 2 numpy array; each 1 * 2 is a cell. (i,j) in array coordinate 99 | :return: x, y, w, h in img coordinate 100 | """ 101 | [y_min, x_min] = np.min(pts, axis=0) 102 | [y_max, x_max] = np.max(pts, axis=0) 103 | return x_min, y_min, x_max - x_min, y_max - y_min 104 | 105 | # -----------------show results to files------------------- 106 | def displayComponents_2(masks, file_path, image, resized=False): 107 | """ 108 | :param masks: segemented img; like tree_node_map in this code 109 | :param file_path: file name for saving pictures 110 | :param resized: 111 | :return: 112 | """ 113 | colors = getAvgColorsBasedOnTreeNodeMap(masks, image).reshape(-1) 114 | colors = np.append(colors, [0, 128, 128]) # not recognized is black; for pixel with labels = -1 115 | colors = np.reshape(cv2.cvtColor(np.reshape(colors.astype('uint8'), (1, -1, 3)), cv2.COLOR_LAB2BGR), (-1,3)) 116 | display = colors[masks] 117 | if resized: 118 | display = cv2.resize(display, (700,700)) 119 | 120 | cv2.imwrite(file_path, np.hstack((display, cv2.cvtColor(image.astype('uint8'), cv2.COLOR_LAB2BGR)))) 121 | 122 | def displayComponents_3(masks, file_path, image, image_2, image_3, resized=False): 123 | """ 124 | :param masks: segemented img; like tree_node_map in this code 125 | :param file_path: file name for saving pictures 126 | :param resized: 127 | :return: 128 | """ 129 | colors = getAvgColorsBasedOnTreeNodeMap(masks, image).reshape(-1) 130 | colors = np.append(colors, [0, 128, 128]) # not recognized is black; for pixel with labels = -1 131 | colors = np.reshape(cv2.cvtColor(np.reshape(colors.astype('uint8'), (1, -1, 3)), cv2.COLOR_LAB2BGR), (-1,3)) 132 | display = colors[masks] 133 | if resized: 134 | display = cv2.resize(display, (700,700)) 135 | 136 | max_label = np.max(masks) 137 | random_colors = np.array([]) 138 | for i in range(0, max_label + 1): 139 | color = np.random.randint(0, 255, (3)).tolist() 140 | random_colors = np.append(random_colors, color) 141 | random_colors = np.append(random_colors, [0,128,128]) 142 | random_colors = np.reshape(cv2.cvtColor(np.reshape(random_colors.astype('uint8'), (1, -1, 3)), cv2.COLOR_LAB2BGR), (-1,3)) 143 | random_display = random_colors[masks] 144 | 145 | cv2.imwrite(file_path, np.hstack((display, random_display, cv2.cvtColor(image.astype('uint8'), cv2.COLOR_LAB2BGR), cv2.cvtColor(image_2.astype('uint8'), cv2.COLOR_LAB2BGR), image_3))) 146 | 147 | def displayMask(mask, key, name): 148 | """ 149 | :param mask: segmented img 150 | :param key: label of roi 151 | :param name: file name for saving the picture 152 | :return: in the picture, the white part is roi 153 | """ 154 | bw = copy.deepcopy(mask) 155 | bw[mask == key] = 255 156 | cv2.imwrite("masks/black_{}.png".format(name), bw) 157 | 158 | def displayColorfulMask(mask, key, image, name): 159 | """ 160 | :param mask: segmented img 161 | :param key: label of roi 162 | :param name: file name for saving the picture 163 | :return: in the picture, the colored part is roi 164 | """ 165 | color = getOneAvgColorForOneMask(mask, key, image) 166 | color = np.reshape(cv2.cvtColor(np.reshape(color.astype('uint8'), (1, -1, 3)), cv2.COLOR_LAB2BGR), (-1)) 167 | colored_mask = np.full(image.shape, 255) 168 | colored_mask[mask == key] = color 169 | cv2.imwrite("masks/colorful_{}.png".format(name), colored_mask) 170 | 171 | -------------------------------------------------------------------------------- /backend/treeconstructor/stageGetInitialTreeNodeMap.py: -------------------------------------------------------------------------------- 1 | import os 2 | import colour 3 | from glob import glob 4 | import re 5 | from variables import * 6 | from treeconstructor.helper import * 7 | 8 | import logging 9 | logger = logging.getLogger('stageGetInitialTreeNodeMap') 10 | logger.setLevel(logging.DEBUG) 11 | 12 | 13 | def getInitialTreeNodeMap(image, hsv_image): 14 | def getCurrentColorForMask(ind, image): 15 | """ 16 | :param ind: the index of the seed color 17 | :return: avg color around the seed color 18 | """ 19 | mid_color = image[ind[0], ind[1]] 20 | kernel_size = 5 21 | margin = int(kernel_size / 2) 22 | u_i, b_i = max(ind[0] - margin, 0), min(ind[0] + margin + 1, height) 23 | l_j, r_j = max(ind[1] - margin, 0), min(ind[1] + margin + 1, width) 24 | neighbours = image[u_i:b_i, l_j:r_j] 25 | B = getColoredTemplateForCompared(neighbours.shape[0], neighbours.shape[1], mid_color) 26 | dists = colour.delta_E(neighbours, B) 27 | colors = neighbours[dists < PERCEPTUAL_DIS_FOR_DISTINCT_BG] 28 | if len(colors) < int(kernel_size * kernel_size / 2): 29 | return False, [] 30 | return True, np.average(colors, axis=0).astype('uint8') 31 | 32 | def getMaskOfAColor(median_color, image, visited_map): 33 | """ 34 | :param median_color: lab color 35 | :return: 36 | A mask; 0 areas refer to areas not this color; 1 areas refer to areas with this color 37 | """ 38 | 39 | compared_color_template = getColoredTemplateForCompared(height, width, median_color) 40 | distance = colour.delta_E(image, compared_color_template) 41 | 42 | mask = np.where(visited_map == 1, 0, np.where(distance < PERCEPTUAL_DIS_FOR_DISTINCT_BG, 1, 0)) 43 | 44 | return mask 45 | 46 | # --- start --- 47 | node_ind = 0 48 | height, width = image.shape[:2] 49 | visited_map = np.zeros((height, width)) 50 | tree_node_map = np.full((height, width), -1) 51 | 52 | h,s,v = cv2.split(hsv_image) 53 | ## white 54 | white_mask = np.logical_and(s <= WHITE_S_THREH, v >= WHITE_V_THREH) 55 | visited_map[white_mask] = 1 56 | white_mask = white_mask.astype(int) 57 | 58 | gray_mask = np.logical_and(s <= WHITE_S_THREH, np.logical_and(v > BLACK_V_THREH, v < WHITE_V_THREH)) 59 | visited_map[gray_mask] = 1 60 | gray_mask = gray_mask.astype(int) 61 | 62 | black_mask = (v <= BLACK_V_THREH) 63 | visited_map[black_mask] = 1 64 | black_mask = black_mask.astype(int) 65 | 66 | white_mask, gray_mask = mergeWhiteGrayMaskIfNecessary(white_mask, gray_mask, 0.1 * height * width, 0.3 * height * width) 67 | 68 | 69 | while (visited_map == 0).any(): 70 | # find cur_color directly by finding the first unvisited pixel 71 | flag = True 72 | cur_first_unvisited_index = np.argwhere(visited_map == 0)[0] 73 | cur_color = image[cur_first_unvisited_index[0], cur_first_unvisited_index[1]] 74 | 75 | ## find mask 76 | if flag: 77 | mask = getMaskOfAColor(cur_color, image, visited_map) 78 | visited_map[mask == 1] = 1 79 | tree_node_map[mask == 1] = node_ind ## node_ind is one greater than node_ind_of_interest 80 | node_ind = node_ind + 1 81 | else: 82 | break 83 | 84 | return tree_node_map, [white_mask, gray_mask, black_mask] -------------------------------------------------------------------------------- /backend/treeconstructor/stageMergeSimilarColors.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import os 3 | import colour 4 | from glob import glob 5 | import re 6 | from sklearn.neighbors.kde import KernelDensity 7 | from scipy.signal import argrelextrema 8 | import numpy as np 9 | 10 | from treeconstructor.helper import * 11 | from variables import * 12 | 13 | 14 | import logging 15 | logger = logging.getLogger('stageMergeSimilarColors') 16 | logger.setLevel(logging.DEBUG) 17 | 18 | 19 | # merge: combined colors with similar hues; combined all white/gray/black into one white/gray/black 20 | # merged_tree_node_map: -1 indicates the pixels are not with seed colors or similar to any other areas; white/gray/black are annotated by wgb_node_index 21 | # merged_img: white indicates merged_tree_node_map == -1 22 | 23 | def mergeSimilarColors(tree_node_map, image, name, white_gray_black_masks): 24 | def getMergedAverageColor(label_range, node_in_this_cluster): 25 | labels = np.arange(0, label_range) 26 | label_of_roi = labels[node_in_this_cluster] 27 | color = np.average(image[np.isin(tree_node_map, label_of_roi)], axis=0) 28 | return color 29 | 30 | def runKDE(fit_arr, compared_arr, start_label_ind, output_label_arr, output_cluster_centers, type, bandwidth=3): 31 | """ 32 | :param fit_arr: 1d array which kde is run on. Like the selected v arrays or h arrays 33 | :param compared_arr: orignal h or v array 34 | :param start_label_ind: 35 | :param output_label_arr: 36 | :param output_cluster_centers: 37 | :param bandwidth: 38 | :return: 39 | """ 40 | kde = KernelDensity(kernel='gaussian', bandwidth=bandwidth).fit(fit_arr.reshape(-1, 1)) 41 | xgrid = np.linspace(fit_arr.min(), fit_arr.max()) 42 | e = kde.score_samples(xgrid.reshape(-1, 1)) 43 | mi, ma = argrelextrema(e, np.less)[0], argrelextrema(e, np.greater)[0] 44 | mi = xgrid[mi] 45 | 46 | inner_label_ind = start_label_ind 47 | 48 | if len(mi) != 0: 49 | for i, low in enumerate(mi): 50 | if i == 0: 51 | if type == "v": 52 | in_this_cluster = (s <= WHITE_S_THREH) & (compared_arr < low) 53 | elif type == "h": 54 | in_this_cluster = (output_label_arr == -1) & (compared_arr < low) 55 | else: 56 | if type == "v": 57 | in_this_cluster = (s <= WHITE_S_THREH) & (compared_arr < low) & (compared_arr >= mi[i - 1]) 58 | elif type == "h": 59 | in_this_cluster = (output_label_arr == -1) & (compared_arr < low) & (compared_arr >= mi[i - 1]) 60 | output_label_arr[in_this_cluster] = inner_label_ind 61 | color = getMergedAverageColor(len(colors), in_this_cluster) 62 | output_cluster_centers = np.append(output_cluster_centers, color) 63 | inner_label_ind = inner_label_ind + 1 64 | 65 | if type == "v": 66 | in_this_cluster = (s <= WHITE_S_THREH) & (compared_arr >= mi[-1]) 67 | elif type == "h": 68 | in_this_cluster = (output_label_arr == -1) & (compared_arr >= mi[-1]) 69 | 70 | output_label_arr[in_this_cluster] = inner_label_ind 71 | color = getMergedAverageColor(len(colors), in_this_cluster) 72 | output_cluster_centers = np.append(output_cluster_centers, color) 73 | 74 | else: 75 | in_this_cluster = output_label_arr == -1 76 | output_label_arr[in_this_cluster] = inner_label_ind 77 | color = getMergedAverageColor(len(colors), in_this_cluster) 78 | output_cluster_centers = np.append(output_cluster_centers, color) 79 | 80 | 81 | return inner_label_ind + 1, output_label_arr, output_cluster_centers 82 | 83 | def splitGrayAndWhite(start_label_ind, output_label_arr, output_cluster_centers): 84 | inner_label_ind = start_label_ind 85 | wg_arr = np.array([]) 86 | inner_label_ind = inner_label_ind + 1 87 | # white 88 | in_this_cluster = (s <= WHITE_S_THREH) & (v >= WHITE_V_THREH) 89 | if np.count_nonzero(in_this_cluster.astype(int)) != 0: 90 | output_label_arr[in_this_cluster] = inner_label_ind 91 | color = getMergedAverageColor(len(colors), in_this_cluster) 92 | output_cluster_centers = np.append(output_cluster_centers, color) 93 | wg_arr = np.append(wg_arr, inner_label_ind) 94 | inner_label_ind = inner_label_ind + 1 95 | else: 96 | wg_arr = np.append(wg_arr, -2) 97 | 98 | # gray 99 | in_this_cluster = (s <= WHITE_S_THREH) & (v < WHITE_V_THREH) 100 | if np.count_nonzero(in_this_cluster.astype(int)) != 0: 101 | output_label_arr[in_this_cluster]= inner_label_ind 102 | color = getMergedAverageColor(len(colors), in_this_cluster) 103 | output_cluster_centers = np.append(output_cluster_centers, color) 104 | wg_arr = np.append(wg_arr, inner_label_ind) 105 | else: 106 | wg_arr = np.append(wg_arr, -2) 107 | 108 | return inner_label_ind, output_label_arr, output_cluster_centers, wg_arr 109 | 110 | 111 | colors = getAvgColorsBasedOnTreeNodeMap(tree_node_map, image) 112 | 113 | if (len(colors) >= 1): 114 | hsv_colors = cv2.cvtColor(cv2.cvtColor(np.reshape(colors.astype('uint8'), (1,-1,3)), cv2.COLOR_LAB2BGR), cv2.COLOR_BGR2HSV) 115 | hsv_colors = np.reshape(hsv_colors, (-1,3)) 116 | h = hsv_colors[:,0] 117 | s = hsv_colors[:,1] 118 | v = hsv_colors[:,2] 119 | 120 | labels = np.full(h.shape, -1) 121 | label_ind = 0 122 | cluster_centers = np.array([]) 123 | 124 | wgb_node_ind = np.array([]) 125 | 126 | ## kde 127 | colorful_h = h[labels == -1] 128 | 129 | if(len(colorful_h) !=0): 130 | label_ind, labels, cluster_centers = runKDE(colorful_h, h, label_ind, labels, cluster_centers,"h", 3) 131 | 132 | merged_tree_map = labels[tree_node_map] 133 | merged_tree_map[tree_node_map == -1] = -1 134 | 135 | start_node = np.max(labels) + 1 136 | 137 | else: 138 | merged_tree_map = tree_node_map.copy() 139 | wgb_node_ind = np.array([]) 140 | cluster_centers = np.array([]) 141 | start_node = 0 142 | 143 | ## add white/gray/black 144 | for mm in white_gray_black_masks: 145 | if (mm == 1).any(): 146 | wgb_node_ind = np.append(wgb_node_ind, start_node) 147 | color = getOneAvgColorForOneMask(mm, 1, image) 148 | merged_tree_map[mm == 1] = start_node 149 | cluster_centers = np.append(cluster_centers, color, axis=0) 150 | start_node = start_node + 1 151 | else: 152 | wgb_node_ind = np.append(wgb_node_ind, -2) 153 | 154 | print("node in merged tree map: {}".format(start_node)) 155 | print("wgb node index") 156 | print(wgb_node_ind) 157 | 158 | cluster_centers = np.reshape(cluster_centers, (-1, 3)) 159 | cluster_centers = np.append(cluster_centers, [[0, 128, 128]], axis=0) # for those with tree_node_map == -1 160 | 161 | merged_image = cluster_centers[merged_tree_map] 162 | 163 | return merged_image, merged_tree_map, wgb_node_ind -------------------------------------------------------------------------------- /backend/treeconstructor/stageRemoveTextIconIndex.py: -------------------------------------------------------------------------------- 1 | from infographicsLoader.infographicsLoader import * 2 | 3 | import cv2 4 | from glob import glob 5 | import os 6 | import numpy as np 7 | import re 8 | import shutil 9 | 10 | import logging 11 | logger = logging.getLogger('stageRemoveTextIconIndex') 12 | logger.setLevel(logging.DEBUG) 13 | logging.disable(logging.DEBUG) # the flag 14 | # if logger.isEnabledFor(logging.DEBUG): # for block 15 | 16 | inx = 0 17 | def rm_text_icon_index_in_bb(img, datacomponent): 18 | tl_i, tl_j, br_i, br_j = datacomponent.conner["tl_i"], datacomponent.conner["tl_j"], datacomponent.conner["br_i"], datacomponent.conner["br_j"], 19 | bg_color = datacomponent.colors["bg_color"] 20 | bg_color = tuple([int(x) for x in bg_color]) 21 | 22 | bb_content = img[tl_i:br_i, tl_j:br_j] 23 | 24 | # ----- remove text/icon/index ----- # 25 | gray = cv2.cvtColor(bb_content, cv2.COLOR_BGR2GRAY) 26 | 27 | gray_threshold = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1] 28 | 29 | uni, fre = np.unique(gray_threshold, return_counts=1) 30 | uni = [u[0] for u in sorted(zip(uni,fre), key = lambda x:x[1])] 31 | if len(uni) > 1 and uni[0] < uni[1]: 32 | gray_threshold = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] 33 | 34 | ## dilation 35 | dilate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) 36 | dilate = cv2.dilate(gray_threshold, dilate_kernel, iterations=1) 37 | 38 | ## replace color 39 | 40 | bg_dilate = cv2.dilate(dilate, dilate_kernel, iterations=1) 41 | bg_dilate[dilate == 255] = 0 # outer dilation removing the small dilation 42 | near_bg_color = np.mean(bb_content[bg_dilate == 255], axis=0).astype('uint8') 43 | bb_content[dilate == 255] = near_bg_color 44 | 45 | ## smooth, making it more similar to near colors 46 | for i in range(10): 47 | bb_content = cv2.medianBlur(bb_content, 5) 48 | 49 | img[tl_i:br_i, tl_j:br_j] = bb_content 50 | 51 | if logger.isEnabledFor(logging.DEBUG): 52 | global inx 53 | cv2.imwrite("rm_tii_debug/{}.png".format(inx), np.vstack((gray, gray_threshold, dilate))) 54 | inx = inx + 1 55 | 56 | return img 57 | 58 | # API 59 | def remove_text_icon_index_from_image(img_file_name, bb_file_name, mode): 60 | img = cv2.imread(img_file_name) 61 | 62 | # try: 63 | if mode == "4300_infographics": 64 | infographics_data_components = constructDataComponents(img_file_name, bb_file_name) 65 | else: 66 | infographics_data_components = constructDataComponentsForTimelineFormat(img_file_name, bb_file_name) 67 | for key, values in infographics_data_components.items(): 68 | if mode == "all_infographics": 69 | if key in ["event_mark", "annotation_mark", "main_body"]: 70 | continue 71 | for curDTComp in values: 72 | img = rm_text_icon_index_in_bb(img, curDTComp) 73 | 74 | return img, infographics_data_components 75 | 76 | 77 | if __name__ == "__main__": 78 | # mode = "4300_infographics" 79 | mode = "all_infographics" 80 | data_root = DATA_ROOT if mode == "4300_infographics" else DATA_ROOT_2 81 | 82 | finish_removing_folder = os.path.join(data_root, REMOVED_TEXT_ICON_INDEX_FOLDER) 83 | 84 | if not os.path.exists(finish_removing_folder): 85 | os.makedirs(finish_removing_folder) 86 | 87 | info_files = sorted(glob(os.path.join(data_root, BOUNDING_BOX_FOLDER, "*.txt"))) 88 | for img_file_name in info_files: 89 | file_sequence_order = re.findall(r'\d+', img_file_name.split('/')[-1])[0] 90 | file_sequence_order = str(file_sequence_order) 91 | img_file_name = os.path.join(data_root, INFOGRAPHICS_FOLDER, file_sequence_order+".jpg") 92 | bb_file_name = os.path.join(data_root, BOUNDING_BOX_FOLDER, file_sequence_order+".txt") 93 | 94 | if os.path.exists(os.path.join(finish_removing_folder, file_sequence_order + ".jpg")): 95 | logger.debug("skip " + file_sequence_order) 96 | continue 97 | 98 | try: 99 | img,_ = remove_text_icon_index_from_image(img_file_name, bb_file_name, mode) 100 | cv2.imwrite(os.path.join(finish_removing_folder, file_sequence_order + ".jpg"), img) 101 | except: 102 | print(file_sequence_order) 103 | 104 | -------------------------------------------------------------------------------- /backend/treeconstructor/treeConstructor.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | from glob import glob 4 | 5 | from treeconstructor.helper import * 6 | from variables import * 7 | 8 | from treeconstructor.stageRemoveTextIconIndex import remove_text_icon_index_from_image 9 | from treeconstructor.stageGetInitialTreeNodeMap import getInitialTreeNodeMap 10 | from treeconstructor.stageMergeSimilarColors import mergeSimilarColors 11 | from treeconstructor.stageGetConstructedShapeTree import getConstructedShapeTree 12 | 13 | def featureExtractionPipeline(img_file_name, bb_file_name, mode, num_in_img): 14 | # ----stage 1----- 15 | img, infographics_data_components = remove_text_icon_index_from_image(img_file_name, bb_file_name, mode) 16 | 17 | # ----stage 1.5 some preprocessing----- 18 | img = cv2.resize(img, (300,300)) 19 | height, width = img.shape[:2] 20 | small_noise_threshold = 0.001 * height * width 21 | 22 | 23 | ## img preprocessing 24 | img = cv2.medianBlur(img, 5) 25 | img = cv2.blur(img, (3, 3)) 26 | 27 | hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) 28 | 29 | img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB) 30 | 31 | 32 | # ----stage 2----- 33 | tree_node_map, wgb_masks = getInitialTreeNodeMap(img, hsv_img) 34 | 35 | # ----stage 3----- 36 | 37 | merged_img, merged_tree_node_map, wgb_node_index = mergeSimilarColors(tree_node_map, img, str(num_in_img), wgb_masks) # -1 in tree_node_map still is -1 in merged_tree_node_map 38 | 39 | # ----stage 4----- 40 | constructed_tree, final_tree_node_map = getConstructedShapeTree(img, small_noise_threshold, merged_img, merged_tree_node_map, wgb_node_index) 41 | 42 | # ----print------- 43 | orignal_img = cv2.imread(img_file_name) 44 | orignal_img = cv2.resize(orignal_img,(300,300)) 45 | displayComponents_3(final_tree_node_map, "./masks/{}.jpg".format(num_in_img), merged_img, img, orignal_img) 46 | 47 | return constructed_tree 48 | 49 | 50 | if __name__ == "__main__": 51 | # mode = "4300_infographics" 52 | mode = "all_infographics" 53 | data_root = DATA_ROOT if mode == "4300_infographics" else DATA_ROOT_2 54 | 55 | if not os.path.exists("masks"): 56 | os.makedirs("masks") 57 | 58 | num_in_img = 644 59 | img_file_name = os.path.join(data_root, INFOGRAPHICS_FOLDER, "{}.jpg".format(num_in_img)) 60 | bb_file_name = os.path.join(data_root, BOUNDING_BOX_FOLDER, "{}.txt".format(num_in_img)) 61 | num_in_img = img_file_name.split("/")[-1].split(".")[0] 62 | featureExtractionPipeline(img_file_name, bb_file_name, mode, num_in_img) -------------------------------------------------------------------------------- /backend/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/backend/utils/__init__.py -------------------------------------------------------------------------------- /backend/utils/getinterval.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | def getinterval(flow): 4 | acc_internal = 0 5 | for i in range(len(flow) - 1): 6 | acc_internal += math.sqrt(math.pow(abs(flow[i][1] - flow[i+1][1]), 2) + math.pow(abs(flow[i][2] - flow[i+1][2]), 2)) 7 | return acc_internal -------------------------------------------------------------------------------- /backend/utils/plotImageWithLegend.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import math 4 | import logging 5 | logger = logging.getLogger('tester') 6 | logger.setLevel(logging.DEBUG) 7 | 8 | class Montage(object): 9 | def __init__(self,initial_image): 10 | self.montage = initial_image 11 | self.height, self.width= self.montage.shape[:2] 12 | 13 | def append(self,image): 14 | new_image = np.full((image.shape[0], self.width, 3),255) 15 | try: 16 | new_image[:,:image.shape[1],:] = image 17 | except: 18 | logging.debug("fail to append elements or legends") 19 | self.montage = np.vstack((self.montage, new_image)) 20 | 21 | def show(self): 22 | cv2.imshow('montage',self.montage) 23 | cv2.waitKey() 24 | cv2.destroyAllWindows() 25 | 26 | def generate_color_legend(colors, legend_width): 27 | """ 28 | 29 | :param colors: n colors x 3; in bgr 30 | :param legend_width: 31 | :return: 32 | """ 33 | gap = math.ceil(legend_width / len(colors)) 34 | legend = np.full((1+15+1+5, legend_width, 3),255) 35 | 36 | legend[0,:,:] = [0,0,0] # white 37 | for i in range(0, legend_width, gap): 38 | legend[1:16, i: i + gap, :] = colors[math.floor(i/gap)] 39 | legend[16,:,:] = [0,0,0] # 40 | return legend.astype('uint8') 41 | 42 | 43 | def stitchElementsAndExtractedColors(img, annotated_img, infographics_elements, img_theme_colors, output_file): 44 | """ 45 | 46 | :param img: img without annotation 47 | :param annotated_img: 48 | :param infographics_elements: {"title": [], "arrow":[]} 49 | :param img_theme_colors: 2D array, in bgr; 50 | :param output_file: 51 | :return: 52 | """ 53 | resize_height, resize_width = np.multiply(img.shape[:2], 0.5).astype('int') 54 | annotated_img = cv2.resize(annotated_img, (resize_width, resize_height)) ## resize(_, (width, height)) 55 | m = Montage(annotated_img) 56 | 57 | bg_legend = generate_color_legend(img_theme_colors, resize_width) 58 | m.append(bg_legend) 59 | 60 | for key, values in infographics_elements.items(): 61 | for curElem in values: 62 | tl_i, tl_j, br_i, br_j = curElem.conner["tl_i"], curElem.conner["tl_j"], curElem.conner["br_i"], curElem.conner["br_j"] 63 | bb_content = img[tl_i:br_i, tl_j:br_j] 64 | 65 | colors = [curElem.colors["bg_color"], curElem.colors["main_color"], curElem.colors["other_color"]] 66 | 67 | legend = generate_color_legend(colors, resize_width) 68 | 69 | m.append(bb_content) 70 | m.append(legend) 71 | 72 | cv2.imwrite(output_file, m.montage) 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /backend/vaeacimputation/__init__.py: -------------------------------------------------------------------------------- 1 | from .vaeac_impute import impute -------------------------------------------------------------------------------- /backend/vaeacimputation/datasets.py: -------------------------------------------------------------------------------- 1 | from os.path import join, exists, isdir 2 | 3 | import torch 4 | from torch.utils.data import Dataset 5 | from torchvision.datasets.folder import default_loader 6 | from torchvision.transforms import CenterCrop, Compose, Normalize, ToTensor 7 | 8 | from .mask_generators import ImageMaskGenerator 9 | 10 | 11 | def compute_normalization(data, one_hot_max_sizes): 12 | """ 13 | Compute the normalization parameters (i. e. mean to subtract and std 14 | to divide by) for each feature of the dataset. 15 | For categorical features mean is zero and std is one. 16 | i-th feature is denoted to be categorical if one_hot_max_sizes[i] >= 2. 17 | Returns two vectors: means and stds. 18 | """ 19 | norm_vector_mean = torch.zeros(len(one_hot_max_sizes)) 20 | norm_vector_std = torch.ones(len(one_hot_max_sizes)) 21 | for i, size in enumerate(one_hot_max_sizes): 22 | if size >= 2: 23 | continue 24 | v = data[:, i] 25 | v = v[~torch.isnan(v)] 26 | vmin, vmax = v.min(), v.max() 27 | vmean = v.mean() 28 | vstd = v.std() 29 | norm_vector_mean[i] = vmean 30 | norm_vector_std[i] = vstd 31 | return norm_vector_mean, norm_vector_std 32 | 33 | 34 | class CelebA(Dataset): 35 | """CelebA dataset.""" 36 | 37 | def __init__(self, root_dir, partition_file, mode, transform=None): 38 | """ 39 | Args: 40 | root_dir (string): Directory with all the images. 41 | partition_file (string): File with the partition list. 42 | mode (string): Used part of dataset: 43 | train, test or valid. 44 | transform (callable, 45 | optional): Optional transform to be applied 46 | on a sample. 47 | """ 48 | if not exists(root_dir): 49 | err = 'Celeba aligned images directory is not found: %s' % root_dir 50 | raise FileNotFoundError(err) 51 | if not isdir(root_dir): 52 | err = '%s must be a directory with aligned images' % root_dir 53 | raise NotADirectoryError(err) 54 | if not exists(partition_file): 55 | err = 'Celeba partition file is not found: %s' % partition_file 56 | raise FileNotFoundError(err) 57 | 58 | self.root_dir = root_dir 59 | self.partition = { 60 | 'train': [], 61 | 'test': [], 62 | 'valid': [] 63 | } 64 | part = { 65 | '0': 'train', 66 | '1': 'valid', 67 | '2': 'test' 68 | } 69 | for line in open(partition_file): 70 | if not line.strip(): 71 | continue 72 | filename, part_id = line.strip().split(' ') 73 | self.partition[part[part_id]].append(filename) 74 | if mode not in self.partition.keys(): 75 | err = "Mode must be 'train', 'valid' or 'test', " 76 | err += "but %s got instead." 77 | err = err % str(mode) 78 | raise ValueError(err) 79 | self.mode = mode 80 | self.transform = transform 81 | 82 | def __len__(self): 83 | return len(self.partition[self.mode]) 84 | 85 | def __getitem__(self, idx): 86 | img_name = join(self.root_dir, 87 | self.partition[self.mode][idx]) 88 | image = default_loader(img_name) 89 | 90 | if self.transform is not None: 91 | image = self.transform(image) 92 | 93 | return image 94 | 95 | 96 | class LengthBounder(Dataset): 97 | """Dataset wrapper which bounds the length of the underlying dataset.""" 98 | def __init__(self, dataset, max_length): 99 | self.dataset = dataset 100 | self.max_length = max_length 101 | 102 | def __len__(self): 103 | return min(len(self.dataset), self.max_length) 104 | 105 | def __getitem__(self, idx): 106 | return self.dataset[idx] 107 | 108 | 109 | class ZipDatasets(Dataset): 110 | """ 111 | Dataset wrapper which returns a list of objects 112 | from a number of datasets. 113 | It behaves like standard zip(dataset_1, dataset_2, ...), 114 | i. e. ZipDataset(dataset_1, dataset_2, ...)[i] is 115 | [dataset_1[i], dataset_2[i], ...] 116 | """ 117 | def __init__(self, *args): 118 | self.args = args 119 | 120 | def __len__(self): 121 | return min(len(arg) for arg in self.args) 122 | 123 | def __getitem__(self, idx): 124 | return [arg[idx] for arg in self.args] 125 | 126 | 127 | class GeneratorDataset(Dataset): 128 | """ 129 | Generates dataset by applying generator to each object 130 | of the original dataset. 131 | Used to generate masks for inpainting on the test set. 132 | """ 133 | def __init__(self, generator, original_dataset, batch_size=16): 134 | self.generator = generator 135 | self.batch_size = batch_size 136 | self.original_dataset = original_dataset 137 | 138 | self.size = len(original_dataset) 139 | self.data = [] 140 | idx = 0 141 | for idx in range(0, self.size, self.batch_size): 142 | cond_batch = [] 143 | for j in range(self.batch_size): 144 | cond = original_dataset[min(j + idx, 145 | len(original_dataset) - 1)] 146 | cond_batch.append(cond[None]) 147 | cond_batch = torch.cat(cond_batch) 148 | batch = generator(cond_batch) 149 | self.data.append(batch) 150 | self.data = torch.cat(self.data) 151 | 152 | def __len__(self): 153 | return self.size 154 | 155 | def __getitem__(self, idx): 156 | return self.data[idx] 157 | 158 | 159 | def load_dataset(name): 160 | """ 161 | Returns dataset for image inpainting. 162 | Now returns only CelebA dataset (train, validation and test parts of it) 163 | and generated masks for the test part. 164 | """ 165 | celeba_transforms = Compose([ 166 | CenterCrop(128), 167 | ToTensor(), 168 | Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)) 169 | ]) 170 | celeba_root_dir = '/dbstore/datasets/celebA' # change it for your system! 171 | celeba_img_dir = join(celeba_root_dir, 'img_align_celeba') 172 | celeba_partition = join(celeba_root_dir, 'list_eval_partition.txt') 173 | 174 | if name == 'celeba_train': 175 | return CelebA( 176 | celeba_img_dir, 177 | celeba_partition, 178 | 'train', 179 | celeba_transforms 180 | ) 181 | elif name == 'celeba_val': 182 | # in order to speed up training we restrict validation set 183 | # to have only 1024 images 184 | return LengthBounder(CelebA( 185 | celeba_img_dir, 186 | celeba_partition, 187 | 'valid', 188 | celeba_transforms), 1024) 189 | elif name == 'celeba_test': 190 | # in order to demonstrate the inpainting results we don't need 191 | # the whole test set, so we use 256 test images only 192 | return LengthBounder(CelebA( 193 | celeba_img_dir, 194 | celeba_partition, 195 | 'test', 196 | celeba_transforms), 256) 197 | elif name == 'celeba_inpainting_masks': 198 | return GeneratorDataset(ImageMaskGenerator(), 199 | load_dataset('celeba_test')) 200 | else: 201 | raise ValueError('Unknown dataset %s' % str(name)) 202 | -------------------------------------------------------------------------------- /backend/vaeacimputation/imputation_networks.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.optim import Adam 3 | 4 | from .mask_generators import MCARGenerator 5 | from .nn_utils import ResBlock, MemoryLayer, SkipConnection 6 | from .prob_utils import CategoricalToOneHotLayer, GaussianCategoricalLoss, \ 7 | GaussianCategoricalSampler, SetGaussianSigmasToOne 8 | 9 | 10 | def get_imputation_networks(one_hot_max_sizes): 11 | """ 12 | This function builds neural networks for imputation given 13 | the list of one-hot max sizes of the dataset features. 14 | It returns a dictionary with those neural networks together with 15 | reconstruction log probability function, optimizer constructor, 16 | sampler from the generator output, mask generator, batch size, 17 | and scale factor for the stability of the variational lower bound 18 | optimization. 19 | """ 20 | 21 | width = 256 22 | depth = 10 23 | latent_dim = 64 24 | 25 | # Proposal network 26 | proposal_layers = [ 27 | CategoricalToOneHotLayer(one_hot_max_sizes + 28 | [0] * len(one_hot_max_sizes), 29 | list(range(len(one_hot_max_sizes)))), 30 | nn.Linear(sum(max(1, x) for x in one_hot_max_sizes) + 31 | len(one_hot_max_sizes) * 2, 32 | width), 33 | nn.LeakyReLU(), 34 | ] 35 | for i in range(depth): 36 | proposal_layers.append( 37 | SkipConnection( 38 | nn.Linear(width, width), 39 | nn.LeakyReLU(), 40 | ) 41 | ) 42 | proposal_layers.append( 43 | nn.Linear(width, latent_dim * 2) 44 | ) 45 | proposal_network = nn.Sequential(*proposal_layers) 46 | 47 | # Prior network 48 | prior_layers = [ 49 | CategoricalToOneHotLayer(one_hot_max_sizes + 50 | [0] * len(one_hot_max_sizes)), 51 | MemoryLayer('#input'), 52 | nn.Linear(sum(max(1, x) for x in one_hot_max_sizes) + 53 | len(one_hot_max_sizes), 54 | width), 55 | nn.LeakyReLU(), 56 | ] 57 | for i in range(depth): 58 | prior_layers.append( 59 | SkipConnection( 60 | # skip-connection from prior network to generative network 61 | MemoryLayer('#%d' % i), 62 | nn.Linear(width, width), 63 | nn.LeakyReLU(), 64 | ) 65 | ) 66 | prior_layers.extend([ 67 | MemoryLayer('#%d' % depth), 68 | nn.Linear(width, latent_dim * 2), 69 | ]) 70 | prior_network = nn.Sequential(*prior_layers) 71 | 72 | # Generative network 73 | generative_layers = [ 74 | nn.Linear(64, 256), 75 | nn.LeakyReLU(), 76 | ] 77 | for i in range(depth + 1): 78 | generative_layers.append( 79 | SkipConnection( 80 | # skip-connection from prior network to generative network 81 | MemoryLayer('#%d' % (depth - i), True), 82 | nn.Linear(width * 2, width), 83 | nn.LeakyReLU(), 84 | ) 85 | ) 86 | generative_layers.extend([ 87 | MemoryLayer('#input', True), 88 | nn.Linear(width + sum(max(1, x) for x in one_hot_max_sizes) + 89 | len(one_hot_max_sizes), 90 | sum(max(2, x) for x in one_hot_max_sizes)), 91 | SetGaussianSigmasToOne(one_hot_max_sizes), 92 | ]) 93 | generative_network = nn.Sequential(*generative_layers) 94 | 95 | return { 96 | 'batch_size': 64, 97 | 98 | 'reconstruction_log_prob': GaussianCategoricalLoss(one_hot_max_sizes), 99 | 100 | 'sampler': GaussianCategoricalSampler(one_hot_max_sizes, 101 | sample_most_probable=False), 102 | 103 | 'vlb_scale_factor': 1 / len(one_hot_max_sizes), 104 | 105 | 'optimizer': lambda parameters: Adam(parameters, lr=3e-4), 106 | 107 | 'mask_generator': MCARGenerator(0.2), 108 | 109 | 'proposal_network': proposal_network, 110 | 111 | 'prior_network': prior_network, 112 | 113 | 'generative_network': generative_network, 114 | } 115 | -------------------------------------------------------------------------------- /backend/vaeacimputation/nn_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch import nn 3 | 4 | 5 | class ResBlock(nn.Module): 6 | """ 7 | Usual full pre-activation ResNet bottleneck block. 8 | For more information see 9 | He, K., Zhang, X., Ren, S., & Sun, J. (2016, October). 10 | Identity mappings in deep residual networks. 11 | European Conference on Computer Vision (pp. 630-645). 12 | Springer, Cham. 13 | ArXiv link: https://arxiv.org/abs/1603.05027 14 | """ 15 | 16 | def __init__(self, outer_dim, inner_dim): 17 | super().__init__() 18 | self.net = nn.Sequential( 19 | nn.BatchNorm2d(outer_dim), 20 | nn.LeakyReLU(), 21 | nn.Conv2d(outer_dim, inner_dim, 1), 22 | nn.BatchNorm2d(inner_dim), 23 | nn.LeakyReLU(), 24 | nn.Conv2d(inner_dim, inner_dim, 3, 1, 1), 25 | nn.BatchNorm2d(inner_dim), 26 | nn.LeakyReLU(), 27 | nn.Conv2d(inner_dim, outer_dim, 1), 28 | ) 29 | 30 | def forward(self, input): 31 | return input + self.net(input) 32 | 33 | 34 | class SkipConnection(nn.Module): 35 | """ 36 | Skip-connection over the sequence of layers in the constructor. 37 | The module passes input data sequentially through these layers 38 | and then adds original data to the result. 39 | """ 40 | def __init__(self, *args): 41 | super().__init__() 42 | self.inner_net = nn.Sequential(*args) 43 | 44 | def forward(self, input): 45 | return input + self.inner_net(input) 46 | 47 | 48 | class MemoryLayer(nn.Module): 49 | """ 50 | If output=False, this layer stores its input in a static class dictionary 51 | `storage` with the key `id` and then passes the input to the next layer. 52 | If output=True, this layer takes stored tensor from a static storage. 53 | If add=True, it returns sum of the stored vector and an input, 54 | otherwise it returns their concatenation. 55 | If the tensor with specified `id` is not in `storage` when the layer 56 | with output=True is called, it would cause an exception. 57 | 58 | The layer is used to make skip-connections inside nn.Sequential network 59 | or between several nn.Sequential networks without unnecessary code 60 | complication. 61 | The usage pattern is 62 | ``` 63 | net1 = nn.Sequential( 64 | MemoryLayer('#1'), 65 | MemoryLayer('#0.1'), 66 | nn.Linear(512, 256), 67 | nn.LeakyReLU(), 68 | MemoryLayer('#0.1', output=True, add=False), 69 | # here add cannot be True because the dimensions mismatch 70 | nn.Linear(768, 256), 71 | # the dimension after the concatenation with skip-connection 72 | # is 512 + 256 = 768 73 | ) 74 | net2 = nn.Sequential( 75 | nn.Linear(512, 512), 76 | MemoryLayer('#1', output=True, add=True), 77 | ... 78 | ) 79 | b = net1(a) 80 | d = net2(c) 81 | # net2 must be called after net1, 82 | # otherwise tensor '#1' will not be in `storage` 83 | ``` 84 | """ 85 | 86 | storage = {} 87 | 88 | def __init__(self, id, output=False, add=False): 89 | super().__init__() 90 | self.id = id 91 | self.output = output 92 | self.add = add 93 | 94 | def forward(self, input): 95 | if not self.output: 96 | self.storage[self.id] = input 97 | return input 98 | else: 99 | if self.id not in self.storage: 100 | err = 'MemoryLayer: id \'%s\' is not initialized. ' 101 | err += 'You must execute MemoryLayer with the same id ' 102 | err += 'and output=False before this layer.' 103 | raise ValueError(err) 104 | stored = self.storage[self.id] 105 | if not self.add: 106 | data = torch.cat([input, stored], 1) 107 | else: 108 | data = input + stored 109 | return data 110 | -------------------------------------------------------------------------------- /backend/vaeacimputation/train_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from tqdm import tqdm 3 | 4 | 5 | def extend_batch(batch, dataloader, batch_size): 6 | """ 7 | If the batch size is less than batch_size, extends it with 8 | data from the dataloader until it reaches the required size. 9 | Here batch is a tensor. 10 | Returns the extended batch. 11 | """ 12 | while batch.shape[0] != batch_size: 13 | dataloader_iterator = iter(dataloader) 14 | nw_batch = next(dataloader_iterator) 15 | if nw_batch.shape[0] + batch.shape[0] > batch_size: 16 | nw_batch = nw_batch[:batch_size - batch.shape[0]] 17 | batch = torch.cat([batch, nw_batch], 0) 18 | return batch 19 | 20 | 21 | def extend_batch_tuple(batch, dataloader, batch_size): 22 | """ 23 | The same as extend_batch, but here the batch is a list of tensors 24 | to be extended. All tensors are assumed to have the same first dimension. 25 | Returns the extended batch (i. e. list of extended tensors). 26 | """ 27 | while batch[0].shape[0] != batch_size: 28 | dataloader_iterator = iter(dataloader) 29 | nw_batch = next(dataloader_iterator) 30 | if nw_batch[0].shape[0] + batch[0].shape[0] > batch_size: 31 | nw_batch = [nw_t[:batch_size - batch[0].shape[0]] 32 | for nw_t in nw_batch] 33 | batch = [torch.cat([t, nw_t], 0) for t, nw_t in zip(batch, nw_batch)] 34 | return batch 35 | 36 | 37 | def get_validation_iwae(val_dataloader, mask_generator, batch_size, 38 | model, num_samples, verbose=False): 39 | """ 40 | Compute mean IWAE log likelihood estimation of the validation set. 41 | Takes validation dataloader, mask generator, batch size, model (VAEAC) 42 | and number of IWAE latent samples per object. 43 | Returns one float - the estimation. 44 | """ 45 | cum_size = 0 46 | avg_iwae = 0 47 | iterator = val_dataloader 48 | if verbose: 49 | iterator = tqdm(iterator) 50 | for batch in iterator: 51 | init_size = batch.shape[0] 52 | batch = extend_batch(batch, val_dataloader, batch_size) 53 | mask = mask_generator(batch) 54 | if next(model.parameters()).is_cuda: 55 | batch = batch.cuda() 56 | mask = mask.cuda() 57 | with torch.no_grad(): 58 | iwae = model.batch_iwae(batch, mask, num_samples)[:init_size] 59 | avg_iwae = (avg_iwae * (cum_size / (cum_size + iwae.shape[0])) + 60 | iwae.sum() / (cum_size + iwae.shape[0])) 61 | cum_size += iwae.shape[0] 62 | if verbose: 63 | iterator.set_description('Validation IWAE: %g' % avg_iwae) 64 | return float(avg_iwae) 65 | -------------------------------------------------------------------------------- /backend/vaeacimputation/trained_vaeac_model: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/backend/vaeacimputation/trained_vaeac_model -------------------------------------------------------------------------------- /backend/vaeacimputation/vaeac_impute.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | from copy import deepcopy 3 | from importlib import import_module 4 | from math import ceil 5 | # from os.path import exists, join 6 | import os 7 | from sys import stderr 8 | import warnings 9 | 10 | import numpy as np 11 | import pandas as pd 12 | import torch 13 | from torch.utils.data import DataLoader 14 | from tqdm import tqdm 15 | 16 | from .datasets import compute_normalization 17 | from .imputation_networks import get_imputation_networks 18 | from .train_utils import extend_batch, get_validation_iwae 19 | from .VAEAC import VAEAC 20 | 21 | warnings.filterwarnings("ignore", category=UserWarning) 22 | 23 | num_imputations = 10 24 | one_hot_max_sizes = [1, 1, 1, 1, 1, 1, 1, 112, 1, 1, 1, 1, 1, 1, 1, 2, 61, 1, 1, 1, 1, 1, 1, 1, 4, 41, 1, 1, 1, 1, 1, 1, 1, 6, 27, 1, 1, 1, 1, 1, 1, 1, 8, 35, 1, 1, 1, 1, 1, 1, 1, 10, 47, 1, 1, 1, 1, 1, 1, 1, 12, 111, 1, 1, 1, 1, 1, 1, 1, 14, 110, 1, 1, 1, 1, 1, 1, 1, 16, 37, 1, 1, 1, 1, 1, 1, 1, 18, 31, 1, 1, 1, 1, 1, 1, 1, 20, 67, 1, 1, 1, 1, 1, 1, 1, 22, 29, 1, 1, 1, 1, 1, 1, 1, 24, 37, 1, 1, 1, 1, 1, 1, 1, 26, 39, 1, 1, 1, 1, 1, 1, 1, 28, 49, 1, 1, 1, 1, 1, 1, 1, 30, 59, 1, 1, 1, 1, 1, 1, 1, 32, 59, 1, 1, 1, 1, 1, 1, 1, 34, 47, 1, 1, 1, 1, 1, 1, 1] 25 | model_name = "trained_vaeac_model" 26 | def impute(imputed_data): 27 | input_size = len(imputed_data) 28 | # Read and normalize input data 29 | dirname = os.path.dirname(__file__) 30 | 31 | load_data = np.loadtxt(os.path.join(dirname, 'training_groundtruth.tsv'), delimiter='\t') 32 | raw_data = np.concatenate((imputed_data, load_data), axis=0) 33 | raw_data = torch.from_numpy(raw_data).float() 34 | norm_mean, norm_std = compute_normalization(raw_data, one_hot_max_sizes) 35 | norm_std = torch.max(norm_std, torch.tensor(1e-9)) 36 | data = (raw_data - norm_mean[None]) / norm_std[None] 37 | 38 | verbose = True 39 | # Non-zero number of workers cause nasty warnings because of some bug in 40 | # multiprocess library. It might be fixed now, but anyway there is no need 41 | # to have a lot of workers for dataloader over in-memory tabular data. 42 | num_workers = 0 43 | 44 | # design all necessary networks and learning parameters for the dataset 45 | networks = get_imputation_networks(one_hot_max_sizes) 46 | 47 | # build VAEAC on top of returned network, optimizer on top of VAEAC, 48 | # extract optimization parameters and mask generator 49 | model = VAEAC( 50 | networks['reconstruction_log_prob'], 51 | networks['proposal_network'], 52 | networks['prior_network'], 53 | networks['generative_network'] 54 | ) 55 | optimizer = networks['optimizer'](model.parameters()) 56 | batch_size = networks['batch_size'] 57 | mask_generator = networks['mask_generator'] 58 | vlb_scale_factor = networks.get('vlb_scale_factor', 1) 59 | 60 | checkpoint = torch.load(os.path.join(dirname, model_name)) 61 | 62 | model.load_state_dict(checkpoint['model_state_dict']) 63 | 64 | # build dataloader for the whole input data 65 | dataloader = DataLoader(data, batch_size=batch_size, 66 | shuffle=False, num_workers=num_workers, 67 | drop_last=False) 68 | 69 | 70 | # prepare the store for the imputations 71 | results = [] 72 | for i in range(num_imputations): 73 | results.append([]) 74 | 75 | iterator = dataloader 76 | if verbose: 77 | iterator = tqdm(iterator) 78 | 79 | # impute missing values for all input data 80 | for batch in iterator: 81 | 82 | # if batch size is less than batch_size, extend it with objects 83 | # from the beginning of the dataset 84 | batch_extended = torch.tensor(batch) 85 | batch_extended = extend_batch(batch_extended, dataloader, batch_size) 86 | 87 | # compute the imputation mask 88 | mask_extended = torch.isnan(batch_extended).float() 89 | 90 | # compute imputation distributions parameters 91 | with torch.no_grad(): 92 | samples_params = model.generate_samples_params(batch_extended, 93 | mask_extended, 94 | num_imputations) 95 | samples_params = samples_params[:batch.shape[0]] 96 | 97 | # make a copy of batch with zeroed missing values 98 | mask = torch.isnan(batch) 99 | batch_zeroed_nans = torch.tensor(batch) 100 | batch_zeroed_nans[mask] = 0 101 | 102 | # impute samples from the generative distributions into the data 103 | # and save it to the results 104 | for i in range(num_imputations): 105 | sample_params = samples_params[:, i] 106 | sample = networks['sampler'](sample_params) 107 | sample[(~mask).byte()] = 0 108 | sample += batch_zeroed_nans 109 | results[i].append(torch.tensor(sample, device='cpu')) 110 | 111 | # concatenate all batches into one [n x K x D] tensor, 112 | # where n in the number of objects, K is the number of imputations 113 | # and D is the dimensionality of one object 114 | for i in range(len(results)): 115 | results[i] = torch.cat(results[i]).unsqueeze(1) 116 | result = torch.cat(results, 1) 117 | 118 | # reshape result, undo normalization and save it 119 | result = result.view(result.shape[0] * result.shape[1], result.shape[2]) 120 | result = result * norm_std[None] + norm_mean[None] 121 | 122 | part_imputations = result[0:num_imputations*input_size] 123 | 124 | return part_imputations.numpy() 125 | 126 | -------------------------------------------------------------------------------- /backend/variables.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | PERCEPTUAL_DIS_FOR_DISTINCT_BG = 4.0 4 | PERCEPTUAL_DIS_FOR_DISTINCT_BIG = 10.0 5 | 6 | PERCEPTUAL_DIS_FOR_DISTINCT_THEME_COLORS = 15.0 7 | 8 | WHITE_S_THREH = 20 * 255 / 100 9 | WHITE_V_THREH = 90 * 255 / 100 10 | BLACK_V_THREH = 25 * 255 / 100 11 | 12 | dirname = os.path.dirname(__file__) 13 | DATA_ROOT = os.path.join(dirname, "datasets/4300_infographics/") 14 | DATA_ROOT_2 = os.path.join(dirname,"datasets/all_infographics/") 15 | 16 | INFOGRAPHICS_FOLDER = "0_infographics/" 17 | BOUNDING_BOX_FOLDER = "0_bounding_box/" 18 | 19 | ANNOTATED_FOLDER = "0_annotated_infographics" 20 | STITCH_FOLDER = "feature_v1_res/" 21 | REMOVED_TEXT_ICON_INDEX_FOLDER = "2_removed_text_icon_index" 22 | 23 | # ------- 4300 infographics (refer to Lu et al. for the meaning of the following numbers)------- 24 | INDEX = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 25 | 11, 12, 13, 14, 15, 16, 17, 18, 19, 26 | 20, 21, 22, 23, 24, 25, 26] 27 | 28 | ARROW = [27, 28, 29, 30, 31, 32, 33, 34] 29 | 30 | BODY_TEXT = [35] 31 | 32 | ICON = [36] 33 | 34 | TITLE = [37] 35 | 36 | # ------- all infographics ------- 37 | # bounding box of all infographics detected by yolo using format of Zhu et al. [timeline] 38 | EVENT_MARK = [0] 39 | EVENT_TEXT = [1] # equal to text above 40 | ANNOTATION_MARK = [2] 41 | ANNOTATION_TEXT = [3] # equal to text above 42 | ICON_2 = [4] # equal to icon above 43 | INDEX_2 = [5] # equal to index above 44 | MAIN_BODY = [6] 45 | 46 | # ---- others ----- 47 | 48 | LOCATION = ["x", "y", "w", "h"] 49 | STATISTIC = ["max", "avg", "min", "std"] 50 | 51 | COLOR_CONSIDERED_NUM = 5 52 | 53 | BACKBONE_TYPE_NAMES = ["Unrecognized", "LandScape", "Pulse", "Portrait", "Spiral", "Clock", "Star", "Bowl", "Dome", "Down-ladder", "Up-ladder", "Left-wing", "Right-wing"] -------------------------------------------------------------------------------- /frontend/babel.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | presets: [ 3 | '@vue/app' 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /frontend/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "humor-frontend", 3 | "version": "0.1.0", 4 | "private": true, 5 | "scripts": { 6 | "serve": "vue-cli-service serve", 7 | "build": "vue-cli-service build", 8 | "lint": "vue-cli-service lint" 9 | }, 10 | "dependencies": { 11 | "axios": "^0.19.2", 12 | "bootstrap": "^4.4.1", 13 | "core-js": "^2.6.11", 14 | "d3": "^5.15.0", 15 | "element-ui": "^2.13.0", 16 | "fabric": "^3.6.3", 17 | "jquery": "^3.4.1", 18 | "lodash": "^4.17.15", 19 | "popper.js": "^1.16.1", 20 | "tabulator-tables": "^4.5.3", 21 | "video.js": "^7.6.6", 22 | "vue": "^2.6.11", 23 | "vue-easycm": "^1.0.4", 24 | "vuetify": "^2.2.20", 25 | "vuex": "^3.1.2" 26 | }, 27 | "devDependencies": { 28 | "@vue/cli-plugin-babel": "^3.12.1", 29 | "@vue/cli-plugin-eslint": "^3.12.1", 30 | "@vue/cli-service": "^4.1.2", 31 | "babel-eslint": "^10.0.3", 32 | "deepmerge": "^4.2.2", 33 | "eslint": "^5.16.0", 34 | "eslint-plugin-vue": "^5.0.0", 35 | "fibers": "^4.0.2", 36 | "material-design-icons-iconfont": "^5.0.1", 37 | "node-sass": "^4.13.1", 38 | "sass": "^1.26.3", 39 | "sass-loader": "^8.0.2", 40 | "vue-template-compiler": "^2.6.11" 41 | }, 42 | "eslintConfig": { 43 | "root": true, 44 | "env": { 45 | "node": true 46 | }, 47 | "extends": [ 48 | "plugin:vue/essential", 49 | "eslint:recommended" 50 | ], 51 | "rules": { 52 | "no-console": 0 53 | }, 54 | "parserOptions": { 55 | "parser": "babel-eslint" 56 | } 57 | }, 58 | "postcss": { 59 | "plugins": { 60 | "autoprefixer": {} 61 | } 62 | }, 63 | "browserslist": [ 64 | "> 1%", 65 | "last 2 versions" 66 | ] 67 | } 68 | -------------------------------------------------------------------------------- /frontend/public/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/public/favicon.ico -------------------------------------------------------------------------------- /frontend/public/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | InfoColorizer 10 | 11 | 12 | 13 | 17 |
18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /frontend/public/logo.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/public/logo.ico -------------------------------------------------------------------------------- /frontend/public/logo_0.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/public/logo_0.ico -------------------------------------------------------------------------------- /frontend/src/AppSVG.vue: -------------------------------------------------------------------------------- 1 | 26 | 27 | 45 | 46 | 47 | 65 | -------------------------------------------------------------------------------- /frontend/src/assets/font/Roboto-Black.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/font/Roboto-Black.ttf -------------------------------------------------------------------------------- /frontend/src/assets/font/Roboto-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/font/Roboto-Regular.ttf -------------------------------------------------------------------------------- /frontend/src/assets/img/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/1.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/101.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/101.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/129.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/129.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/135.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/135.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/19.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/19.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/2286.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/2286.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/2411.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/2411.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/258.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/258.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3032.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3032.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3046.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3046.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3134.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3134.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3135.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3135.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3142.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3142.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3143.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3143.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3227.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3227.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3233.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3233.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3257.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3257.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/337.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/337.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3464.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3464.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3465.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3465.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3466.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3466.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3475.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3475.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/3495.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/3495.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/442.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/442.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/459.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/459.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/478.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/478.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/521.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/521.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/525.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/525.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/621.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/621.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/65.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/65.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/693.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/693.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/70.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/70.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/703.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/703.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/71.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/71.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/718.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/718.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/72.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/72.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/735.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/735.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/757.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/757.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/767.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/767.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/776.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/776.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/777.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/777.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/817.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/817.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/82.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/82.jpg -------------------------------------------------------------------------------- /frontend/src/assets/img/86.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/img/86.jpg -------------------------------------------------------------------------------- /frontend/src/assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yuanlinping/InfoColorizer/499bf1ca0e477a358b68545c4f3dd3dbaa51bec8/frontend/src/assets/logo.png -------------------------------------------------------------------------------- /frontend/src/assets/shapes/1.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/10.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/11.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/12.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/13.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/14.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/15.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/16.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/17.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/18.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/19.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/2.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/20.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/21.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/3.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/4.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/5.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/6.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/7.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/8.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/shapes/9.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/balance.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/bank.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/bills.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/book.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 12 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/book_2.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/castle.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/employees.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 9 | 12 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/exchange.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 8 | 10 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/film.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/footprint.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/loss.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/meeting.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 8 | 11 | 12 | 15 | 16 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/pencil.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/safebox.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 8 | 9 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/skills.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 8 | 11 | 14 | 18 | 22 | 26 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/strategy.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/svg1.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/svg3.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/svg4.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 6 | 17 | 18 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/svg5.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/time.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 13 | 14 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/tools.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /frontend/src/assets/svgs/wallet.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /frontend/src/assets/templates/demo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 16 | 17 | 18 | 19 | 20 | 21 | 27 | 30 | 38 | 56 | 57 | 59 | 01 60 | 02 61 | 03 62 | 04 63 | 05 64 | 65 | -------------------------------------------------------------------------------- /frontend/src/assets/templates/environment.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 15 | 16 | 18 | 20 | 22 | 24 | 26 | 28 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | -------------------------------------------------------------------------------- /frontend/src/assets/templates/head.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 15 | 16 | 18 | 20 | 21 | 22 | 23 | 01 24 | 02 25 | 03 26 | 04 27 | 28 | -------------------------------------------------------------------------------- /frontend/src/assets/templates/timeline.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 19 | 20 | 21 | 22 | 23 | 24 | 31 | 35 | My weekend 36 | 37 | 38 | 39 | 40 | 43 | 49 | 50 | -------------------------------------------------------------------------------- /frontend/src/components/ContentPanelView/ContentPanelView.vue: -------------------------------------------------------------------------------- 1 | 46 | 47 | 85 | 86 | -------------------------------------------------------------------------------- /frontend/src/components/InteractiveView/InteractiveView.js: -------------------------------------------------------------------------------- 1 | // import store from "../../service/store.js"; 2 | /* global $ _ */ 3 | 4 | export default { 5 | name: "palettesView", 6 | data() { 7 | return {}; 8 | }, 9 | computed: { 10 | imageUrl() { 11 | return this.$store.getters.getImageUrl; 12 | } 13 | }, 14 | methods: { 15 | displayInputImage(imageUrl) { // used for computing areas of elements by using inputImageCanvas 16 | let img = new Image(); 17 | img.src = imageUrl; 18 | console.log(img.src) 19 | img.onload = function () { 20 | let canv = $("#inputImageCanvas")[0]; 21 | // canv.width = img.width; 22 | // canv.height = img.height; 23 | canv.width = 300; 24 | canv.height = 300; 25 | 26 | let ctx = canv.getContext('2d'); 27 | console.log("before") 28 | console.log(ctx) 29 | ctx.drawImage(this, 0, 0) 30 | console.log("after") 31 | console.log(ctx) 32 | let imageData = ctx.getImageData(0, 0, canv.width, canv.height); 33 | imageData = imageData.data; 34 | imageData = _.chunk(imageData, 4); 35 | let white_count = _.reduce(imageData, function (count, ele) { 36 | let sum = _.reduce(ele, function (s, e) { return s + e }, 0) 37 | if (sum == 0) { 38 | return count + 1; 39 | } else { 40 | return count; 41 | } 42 | }, 0); 43 | console.log("ctx") 44 | console.log(imageData); 45 | console.log(white_count); 46 | } 47 | } 48 | }, 49 | watch: { 50 | imageUrl(newImageUrl) { 51 | this.displayInputImage(newImageUrl) 52 | } 53 | } 54 | } -------------------------------------------------------------------------------- /frontend/src/components/InteractiveView/InteractiveView.vue: -------------------------------------------------------------------------------- 1 | 6 | 7 | > 8 | -------------------------------------------------------------------------------- /frontend/src/components/PaintingView/PaintingView.vue: -------------------------------------------------------------------------------- 1 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /frontend/src/components/PaintingView/fabricImageFilter.js: -------------------------------------------------------------------------------- 1 | import { fabric } from "fabric"; 2 | 3 | export default { 4 | getNewHightFilter(pixelArr) { 5 | fabric.Image.filters.Highlight = fabric.util.createClass( 6 | fabric.Image.filters.BaseFilter, 7 | { 8 | type: "Highlight", 9 | mask: [], 10 | applyTo2d: function (options) { 11 | let imageData = options.imageData, 12 | data = imageData.data, 13 | len = data.length; 14 | 15 | for (let i = 0; i < len; i += 4) { 16 | data[i + 3] = 50; 17 | } 18 | 19 | for (let p = 0; p < this.mask.length; p++) { 20 | let pixel = this.mask[p]; 21 | let startI = (pixel[0] * imageData.width + pixel[1]) * 4; 22 | data[startI + 3] = 255; 23 | } 24 | } 25 | } 26 | ); 27 | 28 | fabric.Image.filters.Highlight.fromObject = 29 | fabric.Image.filters.BaseFilter.fromObject; 30 | 31 | return new fabric.Image.filters.Highlight({ mask: pixelArr }) 32 | }, 33 | 34 | getNewRecoloredFilter(pixelArr, targetColor, iid) { 35 | fabric.Image.filters.Recolor = fabric.util.createClass( 36 | fabric.Image.filters.BaseFilter, 37 | { 38 | type: "Recolor", 39 | newColor: [], // array 40 | mask: [], 41 | id: null, 42 | applyTo2d: function (options) { 43 | let imageData = options.imageData, 44 | data = imageData.data; 45 | 46 | for (let p = 0; p < this.mask.length; p++) { 47 | let pixel = this.mask[p]; 48 | let startI = (pixel[0] * imageData.width + pixel[1]) * 4; 49 | data[startI + 0] = this.newColor[0]; 50 | data[startI + 1] = this.newColor[1]; 51 | data[startI + 2] = this.newColor[2]; 52 | data[startI + 3] = 255; 53 | } 54 | } 55 | } 56 | ); 57 | 58 | fabric.Image.filters.Recolor.fromObject = 59 | fabric.Image.filters.BaseFilter.fromObject; 60 | 61 | return new fabric.Image.filters.Recolor({ mask: pixelArr, newColor: targetColor, id: iid }) 62 | } 63 | 64 | } 65 | -------------------------------------------------------------------------------- /frontend/src/components/PaintingView/helper.js: -------------------------------------------------------------------------------- 1 | // import { fabric } from "fabric"; 2 | 3 | /*global _ $*/ 4 | export default { 5 | calculateRelativePixelArea(imageUrl, width, height, fabricObj) { 6 | let img = new Image(); 7 | img.src = imageUrl; 8 | img.onload = function () { 9 | let canv = $("#inputImageCanvas")[0]; 10 | canv.width = width; 11 | canv.height = height; 12 | let ctx = canv.getContext('2d'); 13 | ctx.drawImage(this, 0, 0) 14 | let imageData = ctx.getImageData(0, 0, canv.width, canv.height); 15 | imageData = imageData.data; 16 | imageData = _.chunk(imageData, 4); 17 | let whiteCount = _.reduce(imageData, function (count, ele) { 18 | let sum = _.reduce(ele, function (s, e) { return s + e }, 0) 19 | if (sum == 0) { 20 | return count + 1; 21 | } else { 22 | return count; 23 | } 24 | }, 0); 25 | ctx.clearRect(0, 0, canv.width, canv.height); 26 | 27 | fabricObj["relative_pixel_area"] = 28 | (canv.width * canv.height - whiteCount) / 29 | (canv.width * canv.height); 30 | } 31 | }, 32 | 33 | getPureTreeStructureFromSvg(objsInTheGroup, totalHeight, totlaWidth) { // equal to featureExtractionPipeline in the backend 34 | let constructedTree = {}; 35 | for (let i = objsInTheGroup.length - 1; i >= 0; i--) { 36 | let childObj = objsInTheGroup[i]; 37 | let bound = childObj.getBoundingRect(); 38 | constructedTree[childObj.id] = { 39 | id: childObj.id, 40 | top: bound.top, 41 | left: bound.left, 42 | color: childObj.get("fill"), 43 | relative_height: childObj.get("height") / totalHeight, 44 | relative_width: childObj.get("width") / totlaWidth, 45 | relative_pixel_area: childObj.get("relative_pixel_area") 46 | }; 47 | if (i == 0) { 48 | constructedTree[childObj.id].father_id = 0; // 0 currently is not corresponding to any fabric object 49 | continue; 50 | } 51 | for (let j = i - 1; j >= 0; j--) { 52 | let curFatherObj = objsInTheGroup[j]; 53 | if (childObj.isContainedWithinObject(curFatherObj)) { 54 | constructedTree[childObj.id].father_id = curFatherObj.id; 55 | break; 56 | } else if (j == 0) { 57 | constructedTree[childObj.id].father_id = 0; 58 | } else { 59 | continue; 60 | } 61 | } 62 | } 63 | return constructedTree; // no key "0" in this constructedTree 64 | }, 65 | 66 | getTreeInD3Format(tree) { 67 | let treeInD3 = _.mapValues(_.groupBy(tree, "father_id"), v => _.sortBy(v, ["left", "top"], ["asc", "asc"])); 68 | _.map(treeInD3, (children, key) => { 69 | if (treeInD3[key] != undefined) { 70 | dfs(children, treeInD3); 71 | } 72 | }) 73 | 74 | let postTreeInD3 = {} 75 | _.map(treeInD3, (children, key) => { 76 | postTreeInD3.id = key; 77 | postTreeInD3.children = children; 78 | 79 | }) 80 | postTreeInD3 = postTreeInD3.children[0]; 81 | return postTreeInD3; 82 | 83 | function dfs(children, treeD3) { 84 | for (let i = 0; i < children.length; i++) { 85 | let element = children[i]; 86 | let curId = element.id; 87 | let curChildren = treeD3[curId]; 88 | if (curChildren == undefined) { 89 | element["children"] = [] 90 | } else { 91 | delete treeD3[curId]; 92 | let res = dfs(curChildren, treeD3); 93 | element["children"] = res 94 | } 95 | } 96 | return children; 97 | } 98 | }, 99 | 100 | getTreeWithLightRightNumber(tree) { 101 | let treeWithLR = tree; 102 | let number = 0; 103 | 104 | treeWithLR["left_number"] = number; 105 | number = number + 1; 106 | let temp = treeWithLR["children"]; 107 | leftRightNumberDFS(temp) 108 | treeWithLR["right_number"] = number 109 | return treeWithLR; 110 | 111 | 112 | 113 | function leftRightNumberDFS(children) { 114 | if (children == []) { return } 115 | _.forEach(children, child => { 116 | child["left_number"] = number 117 | number = number + 1 118 | let cur_children = child["children"] 119 | leftRightNumberDFS(cur_children) 120 | child["right_number"] = number 121 | number = number + 1 122 | }); 123 | return 124 | } 125 | }, 126 | 127 | findIndexOfBindedElementGivenAFlag(bindInfoArr, flag) { 128 | return bindInfoArr.reduce((arr, ele, ind) => { 129 | if (ele == flag) { arr.push(ind); } 130 | return arr; 131 | }, []) 132 | }, 133 | } -------------------------------------------------------------------------------- /frontend/src/components/PaletteView/PaletteView.vue: -------------------------------------------------------------------------------- 1 | 135 | 136 | 137 | -------------------------------------------------------------------------------- /frontend/src/components/PaletteView/helper.js: -------------------------------------------------------------------------------- 1 | /*global d3*/ 2 | export default { 3 | getOriginalColorsAndDepthFromTreeInD3HierarchyFormat(tree, source) { 4 | let root = d3.hierarchy(tree); 5 | let originalColors = [] 6 | root.eachBefore(d => { 7 | originalColors.push({ 8 | id: d.data.id, 9 | depth: d.depth, 10 | color: (source == "img" ? d.data.rgb_color : d.data.color) 11 | }) 12 | }) 13 | return originalColors 14 | }, 15 | initializeElementBindInfoArray(len) { 16 | let arr = []; 17 | for (let i = 0; i < len; i++) { arr.push(-1); } 18 | return arr 19 | }, 20 | getObjIdArrayInSameOrderWithPalettes(originalColors) { 21 | let arr = []; 22 | for (let i = 0; i < originalColors.length; i++) { 23 | arr.push(originalColors[i].id) 24 | } 25 | return arr; 26 | }, 27 | 28 | getInitialUserPreferencesFromOriginalColors(originalColors) { 29 | let userPreferences = []; 30 | for (let i = 0; i < originalColors.length; i++) { 31 | let value = originalColors[i]; 32 | userPreferences.push({ 33 | id: value.id, 34 | depth: value.depth, 35 | specificColor: undefined, 36 | colorName: undefined, 37 | }) 38 | } 39 | return userPreferences; 40 | }, 41 | 42 | getTreeNodeInContructredTreeGivenId(tree, id) { 43 | let root = d3.hierarchy(tree); 44 | let target; 45 | root.each(d => { 46 | if (d.data.id == id) { 47 | target = d.data; 48 | } 49 | }) 50 | return target; 51 | }, 52 | 53 | findIndexOfBindedElementGivenAFlag(bindInfoArr, flag) { 54 | return bindInfoArr.reduce((arr, ele, ind) => { 55 | if (ele == flag) { arr.push(ind); } 56 | return arr; 57 | }, []) 58 | }, 59 | } -------------------------------------------------------------------------------- /frontend/src/components/SettingToolBar/SettingToolBar.vue: -------------------------------------------------------------------------------- 1 | 94 | 95 | 96 | 97 | 176 | 177 | -------------------------------------------------------------------------------- /frontend/src/data/contentIcon.js: -------------------------------------------------------------------------------- 1 | const svgData = [ 2 | { icon: require('../assets/svgs/svg1.svg'), url: "../../assets/svgs/svg1.svg" }, 3 | { icon: require('../assets/svgs/svg3.svg'), url: "../../assets/svgs/svg3.svg" }, 4 | { icon: require('../assets/svgs/svg4.svg'), url: "../../assets/svgs/svg4.svg" }, 5 | { icon: require('../assets/svgs/svg5.svg'), url: "../../assets/svgs/svg5.svg" }, 6 | { icon: require('../assets/svgs/balance.svg'), url: "../../assets/svgs/balance.svg" }, 7 | { icon: require('../assets/svgs/bank.svg'), url: "../../assets/svgs/bank.svg" }, 8 | { icon: require('../assets/svgs/exchange.svg'), url: "../../assets/svgs/exchange.svg" }, 9 | { icon: require('../assets/svgs/skills.svg'), url: "../../assets/svgs/skills.svg" }, 10 | { icon: require('../assets/svgs/bills.svg'), url: "../../assets/svgs/bills.svg" }, 11 | { icon: require('../assets/svgs/book.svg'), url: "../../assets/svgs/book.svg" }, 12 | { icon: require('../assets/svgs/loss.svg'), url: "../../assets/svgs/loss.svg" }, 13 | { icon: require('../assets/svgs/pencil.svg'), url: "../../assets/svgs/pencil.svg" }, 14 | { icon: require('../assets/svgs/safebox.svg'), url: "../../assets/svgs/safebox.svg" }, 15 | { icon: require('../assets/svgs/strategy.svg'), url: "../../assets/svgs/strategy.svg" }, 16 | { icon: require('../assets/svgs/time.svg'), url: "../../assets/svgs/time.svg" }, 17 | { icon: require('../assets/svgs/tools.svg'), url: "../../assets/svgs/tools.svg" }, 18 | ] 19 | export default svgData; -------------------------------------------------------------------------------- /frontend/src/data/contentImage.js: -------------------------------------------------------------------------------- 1 | let nums = [1, 101, 129, 135, 19, 2286, 2411, 258, 3032, 3046, 3134, 3135, 3142, 3143, 3227, 3233, 3257, 337, 3464, 3465, 3466, 3475, 3495, 442, 459, 478, 521, 525, 621, 65, 693, 70, 703, 71, 718, 72, 735, 757, 767, 776, 777, 817, 82, 86] 2 | 3 | let svgData = []; 4 | for (let i = 0; i < nums.length; i++) { 5 | svgData.push({ 6 | icon: require('../assets/img/' + nums[i] + ".jpg"), numInImg: nums[i] 7 | }) 8 | } 9 | export default svgData; -------------------------------------------------------------------------------- /frontend/src/data/contentShape.js: -------------------------------------------------------------------------------- 1 | let shapeNodeData = [ 2 | { icon: require('../assets/shapes/1.svg') }, 3 | { icon: require('../assets/shapes/2.svg') }, 4 | { icon: require('../assets/shapes/3.svg') }, 5 | { icon: require('../assets/shapes/4.svg') }, 6 | { icon: require('../assets/shapes/5.svg') }, 7 | { icon: require('../assets/shapes/6.svg') }, 8 | { icon: require('../assets/shapes/7.svg') }, 9 | { icon: require('../assets/shapes/8.svg') }, 10 | { icon: require('../assets/shapes/9.svg') }, 11 | { icon: require('../assets/shapes/10.svg') }, 12 | { icon: require('../assets/shapes/11.svg') }, 13 | { icon: require('../assets/shapes/12.svg') }, 14 | { icon: require('../assets/shapes/13.svg') }, 15 | { icon: require('../assets/shapes/14.svg') }, 16 | { icon: require('../assets/shapes/15.svg') }, 17 | { icon: require('../assets/shapes/16.svg') }, 18 | { icon: require('../assets/shapes/17.svg') }, 19 | { icon: require('../assets/shapes/18.svg') }, 20 | { icon: require('../assets/shapes/19.svg') }, 21 | { icon: require('../assets/shapes/20.svg') }, 22 | { icon: require('../assets/shapes/21.svg') }, 23 | ] 24 | 25 | export default shapeNodeData -------------------------------------------------------------------------------- /frontend/src/data/contentTemplate.js: -------------------------------------------------------------------------------- 1 | const templateData = [ 2 | { icon: require('../assets/templates/demo.svg') }, 3 | { icon: require('../assets/templates/head.svg') }, 4 | { icon: require('../assets/templates/animals.svg') }, 5 | { icon: require('../assets/templates/timeline.svg') }, 6 | { icon: require('../assets/templates/environment.svg') }, 7 | ] 8 | export default templateData; -------------------------------------------------------------------------------- /frontend/src/main.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | import AppSvg from './AppSVG.vue' 3 | import Element from "element-ui"; 4 | import locale from 'element-ui/lib/locale/lang/en' 5 | import vuetify from '@/plugins/vuetify' 6 | import VueEasyCm from 'vue-easycm' 7 | import store from "./service/store.js" 8 | 9 | import init from "./utils/initialize"; 10 | import 'element-ui/lib/theme-chalk/index.css' 11 | 12 | init(); 13 | 14 | Vue.config.productionTip = false 15 | 16 | Vue.use(Element, { locale }); 17 | Vue.use(VueEasyCm); 18 | 19 | new Vue({ 20 | el: "#app", 21 | store, 22 | vuetify, 23 | render: h => h(AppSvg), 24 | }) -------------------------------------------------------------------------------- /frontend/src/plugins/vuetify.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | import Vuetify from 'vuetify' 3 | import 'vuetify/dist/vuetify.min.css' 4 | import 'material-design-icons-iconfont/dist/material-design-icons.css' 5 | // (note) icons are from: https://material.io/resources/icons/?style=baseline 6 | 7 | Vue.use(Vuetify) 8 | 9 | const opts = { 10 | icons: { 11 | iconfont: 'md', 12 | }, 13 | } 14 | 15 | export default new Vuetify(opts) -------------------------------------------------------------------------------- /frontend/src/service/config.js: -------------------------------------------------------------------------------- 1 | export const canvasConf = { 2 | fireRightClick: true, 3 | } 4 | 5 | export const objectConf = { 6 | "Circle": { 7 | left: Math.floor(Math.random() * canvasConf.width / 2), 8 | top: Math.floor(Math.random() * canvasConf.height / 2), 9 | radius: 20, 10 | stroke: "black", 11 | fill: "white" 12 | }, 13 | "Ellipse": { 14 | left: Math.floor(Math.random() * canvasConf.width / 2), 15 | top: Math.floor(Math.random() * canvasConf.height / 2), 16 | rx: 30, 17 | ry: 20, 18 | stroke: "black", 19 | fill: "white" 20 | }, 21 | "line": { 22 | x1: 20, 23 | y1: 20, 24 | x2: 20, 25 | y2: 100, 26 | width: 2, 27 | stroke: "black", 28 | fill: "white" 29 | }, 30 | "Rect": { 31 | left: Math.floor(Math.random() * canvasConf.width / 2), 32 | top: Math.floor(Math.random() * canvasConf.height / 2), 33 | width: 50, 34 | height: 50, 35 | stroke: "black", 36 | fill: "white", 37 | angle: 0 38 | }, 39 | "Triangle": { 40 | left: Math.floor(Math.random() * canvasConf.width / 2), 41 | top: Math.floor(Math.random() * canvasConf.height / 2), 42 | width: 50, 43 | height: 50, 44 | stroke: "black", 45 | fill: "white", 46 | angle: 0 47 | } 48 | } 49 | 50 | export const TreeViewConf = { 51 | padding_left: 35, 52 | padding: 15, 53 | padding_bottom: 25 54 | } 55 | 56 | export const tableviewConf = { 57 | padding: 5, 58 | tableRowHeight_two: 45, 59 | legendHeight: 25, 60 | normalColorPalette: "outline:none", 61 | selectedColorPalette: "outline:3px solid orange", 62 | columnGap: 5 63 | }; 64 | -------------------------------------------------------------------------------- /frontend/src/service/dataService.js: -------------------------------------------------------------------------------- 1 | import axios from 'axios' 2 | 3 | const GET_REQUEST = 'GET' 4 | const POST_REQUEST = 'POST' 5 | const dataServerUrl = process.env.DATA_SERVER_URL || 'http://127.0.0.1:5000' 6 | 7 | function request(url, params, type, callback) { 8 | let func 9 | if (type === GET_REQUEST) { 10 | func = axios.get 11 | } else if (type === POST_REQUEST) { 12 | func = axios.post 13 | } 14 | 15 | func(url, params).then((response) => { 16 | if (response.status === 200) { 17 | callback(response) 18 | } else { 19 | console.error(response) /* eslint-disable-line */ 20 | } 21 | }) 22 | .catch((error) => { 23 | console.error(error) /* eslint-disable-line */ 24 | }) 25 | } 26 | 27 | function getFeaturesAndTreeStructure(numInImg, callback) { 28 | const url = `${dataServerUrl}/getFeaturesAndTreeStructure` 29 | let params = { "numInImg": numInImg } 30 | request(url, params, POST_REQUEST, callback); 31 | } 32 | 33 | function getImputationResults(treeSource, numInImg, modifiedTree, bindArray, callback) { 34 | const url = `${dataServerUrl}/getImputationResults` 35 | let params = { "treeSource": treeSource, "numInImg": numInImg, "modifiedTree": modifiedTree, "bindArray": bindArray } 36 | request(url, params, POST_REQUEST, callback); 37 | } 38 | 39 | export default { 40 | dataServerUrl, 41 | getFeaturesAndTreeStructure, 42 | getImputationResults 43 | } 44 | -------------------------------------------------------------------------------- /frontend/src/service/store.js: -------------------------------------------------------------------------------- 1 | import Vue from 'vue' 2 | import vuex from 'vuex' 3 | import { objectConf } from "../service/config.js" 4 | Vue.use(vuex) 5 | 6 | export default new vuex.Store({ 7 | state: { 8 | imageUrl: "", 9 | imageName: "", 10 | objectConf: objectConf, 11 | /** create new object */ 12 | newSvgToCreated: "", 13 | newImgToCreated: "", 14 | newTextToCreated: false, 15 | /** interaction between tree and painting view*/ 16 | // constructed tree from svg/img in painting view 17 | RequestTreeConstruction: false, 18 | treeSource: null, // value: svg or img 19 | ConstructedTree: {}, 20 | // mouseover tree node and then highlight corresponding area in painting view 21 | hoverTreeObjID: -1, 22 | RemoveAllHightlightFilter: false, 23 | 24 | /** paletteView */ 25 | OriginalColorsInPaletteView: [], 26 | ObjIdArrayInSameOrderWithPalettesInPaletteView: [], 27 | /** user preference*/ 28 | //click tree node and then modify the color of curresponding tree node and area 29 | toBeModifiedTreeObjID: -1, // abandoned in version of PaletteView; influenced for both svg and image; id in tree = id of fabric obj id (svg) = id of each shape/element (img) 30 | 31 | UserColorName: null, 32 | userSpecificColor: null, // in hex 33 | CurElementsMultiSelected: [], 34 | ElementBindInfoArray: [], 35 | 36 | /** imputation results*/ 37 | RgbImputationResultsAndNodeIds: {}, // contains 'rgb_imputation_results', 'corresponding_ids' 38 | BookmarkPalettesInPaletteView: [], 39 | SelectedColorPaletteIndex: null, 40 | SelectedBookmarkIndex: null, 41 | 42 | /** save recolored infographics */ 43 | SaveRecoloredInfogState: false, 44 | }, 45 | mutations: { 46 | changeImageUrl(state, url) { 47 | state.imageUrl = url; 48 | }, 49 | changeImageName(state, name) { 50 | state.imageName = name; 51 | }, 52 | changeObjectConf(state, objectType, feature, value) { 53 | state.objectConf[objectType][feature] = value 54 | }, 55 | 56 | changeNewSvgToCreated(state, url) { 57 | state.newSvgToCreated = url; 58 | }, 59 | changeNewImgToCreated(state, url) { 60 | state.newImgToCreated = url; 61 | }, 62 | changeNewTextToCreated(state, par) { 63 | state.newTextToCreated = par 64 | }, 65 | 66 | changeHoverTreeObjID(state, objID) { 67 | state.hoverTreeObjID = objID 68 | }, 69 | 70 | changeRemoveAllHightlightFilter(state, val) { 71 | state.RemoveAllHightlightFilter = val; 72 | }, 73 | 74 | 75 | changeRequestTreeConstruction(state, val) { 76 | state.RequestTreeConstruction = val; 77 | }, 78 | 79 | changeTreeSource(state, source) { 80 | state.treeSource = source 81 | }, 82 | changeConstructedTree(state, tree) { 83 | state.ConstructedTree = tree; 84 | }, 85 | 86 | changeOriginalColorsInPaletteView(state, val) { 87 | state.OriginalColorsInPaletteView = val; 88 | }, 89 | changeObjIdArrayInSameOrderWithPalettesInPaletteView(state, val) { 90 | state.ObjIdArrayInSameOrderWithPalettesInPaletteView = val; 91 | }, 92 | 93 | changeToBeModifiedTreeObjID(state, id) { 94 | state.toBeModifiedTreeObjID = id; 95 | }, 96 | 97 | changeUserSpecificColor(state, color) { 98 | state.userSpecificColor = color 99 | }, 100 | 101 | changeUserColorName(state, val) { 102 | state.UserColorName = val; 103 | }, 104 | 105 | changeCurElementsMultiSelected(state, val) { 106 | state.CurElementsMultiSelected = val; 107 | }, 108 | changeElementBindInfoArray(state, val) { 109 | state.ElementBindInfoArray = val; 110 | }, 111 | 112 | changeRgbImputationResultsAndNodeIds(state, obj) { 113 | state.RgbImputationResultsAndNodeIds = obj; 114 | }, 115 | changeBookmarkPalettesInPaletteView(state, val) { 116 | state.BookmarkPalettesInPaletteView = val; 117 | }, 118 | changeSelectedColorPaletteIndex(state, ind) { 119 | state.SelectedColorPaletteIndex = ind; 120 | }, 121 | 122 | changeSelectedBookmarkIndex(state, ind) { 123 | state.SelectedBookmarkIndex = ind; 124 | }, 125 | changeSaveRecoloredInfogState(state, val) { 126 | state.SaveRecoloredInfogState = val; 127 | } 128 | 129 | }, 130 | getters: { 131 | getImageUrl: state => state.imageUrl, 132 | getImageName: state => state.imageName, 133 | getObjectConf: state => state.objectConf, 134 | 135 | getNewSvgToCreated: state => state.newSvgToCreated, 136 | getNewImgToCreated: state => state.newImgToCreated, 137 | getNewTextToCreated: state => state.newTextToCreated, 138 | 139 | getHoverTreeObjID: state => state.hoverTreeObjID, 140 | getRemoveAllHightlightFilter: state => state.RemoveAllHightlightFilter, 141 | getRequestTreeConstruction: state => state.RequestTreeConstruction, 142 | getTreeSource: state => state.treeSource, 143 | getConstructedTree: state => state.ConstructedTree, 144 | 145 | getOriginalColorsInPaletteView: state => state.OriginalColorsInPaletteView, 146 | getObjIdArrayInSameOrderWithPalettesInPaletteView: state => state.ObjIdArrayInSameOrderWithPalettesInPaletteView, 147 | getToBeModifiedTreeObjID: state => state.toBeModifiedTreeObjID, 148 | getUserColorName: state => state.UserColorName, 149 | getUserSpecificColor: state => state.userSpecificColor, 150 | getCurElementsMultiSelected: state => state.CurElementsMultiSelected, 151 | getElementBindInfoArray: state => state.ElementBindInfoArray, 152 | 153 | getRgbImputationResultsAndNodeIds: state => state.RgbImputationResultsAndNodeIds, 154 | getBookmarkPalettesInPaletteView: state => state.BookmarkPalettesInPaletteView, 155 | getSelectedColorPaletteIndex: state => state.SelectedColorPaletteIndex, 156 | getSelectedBookmarkIndex: state => state.SelectedBookmarkIndex, 157 | 158 | getSaveRecoloredInfogState: state => state.SaveRecoloredInfogState 159 | } 160 | }); 161 | -------------------------------------------------------------------------------- /frontend/src/service/variables.js: -------------------------------------------------------------------------------- 1 | export const contextMenuInCanvas = [{ 2 | text: "Arrange", 3 | children: [{ 4 | text: "Bring Forward", 5 | }, { 6 | text: "Bring Backward" 7 | }, { 8 | text: "Sent to Front" 9 | }, { 10 | text: "Sent to Back" 11 | }] 12 | }, { 13 | text: "Group", 14 | }, { 15 | text: "Ungroup", 16 | }, { 17 | text: "Duplicate", 18 | }, { 19 | text: "Delete", 20 | }] 21 | 22 | export const SPECIFICCOLORSUBTITUTION = [240, 240, 240] 23 | 24 | export const COLORPICKERPREDEFINED = [ 25 | "#fee5d9", "#feedde", "#edf8e9", "#eff3ff", "#f2f0f7", "#f7f7f7", "#7fc97f", "#1b9e77", "#b3e2cd", "#8dd3c7", 26 | "#fcbba1", "#fdd0a2", "#c7e9c0", "#c6dbef", "#dadaeb", "#d9d9d9", "#beaed4", "#d95f02", "#fdcdac", "#ffffb3", 27 | "#fc9272", "#fdae6b", "#a1d99b", "#9ecae1", "#bcbddc", "#bdbdbd", "#fdc086", "#7570b3", "#cbd5e8", "#bebada", 28 | "#fb6a4a", "#fd8d3c", "#74c476", "#6baed6", "#9e9ac8", "#969696", "#ffff99", "#e7298a", "#f4cae4", "#fb8072", 29 | "#ef3b2c", "#f16913", "#41ab5d", "#4292c6", "#807dba", "#737373", "#386cb0", "#66a61e", "#e6f5c9", "#80b1d3", 30 | "#cb181d", "#d94801", "#238b45", "#2171b5", "#6a51a3", "#525252", "#f0027f", "#e6ab02", "#fff2ae", "#fdb462", 31 | "#99000d", "#8c2d04", "#005a32", "#084594", "#4a1486", "#252525", "#bf5b17", "#a6761d", "#f1e2cc", "#b3de69" 32 | ] 33 | 34 | 35 | export const COLORNAMES = ["green", "blue", "purple", "red", "pink", "yellow", "orange", "brown", "teal", "lightblue", "grey", 36 | "limegreen", "magenta", "lightgreen", "brightgreen", "skyblue", "cyan", "turquoise", "darkblue", "darkgreen", 37 | "aqua", "olive", "navyblue", "lavender", "fuchsia", "black", "royalblue", "violet", "hotpink", "tan", 38 | "forestgreen", "lightpurple", "neongreen", "yellowgreen", "maroon", "darkpurple", "salmon", "peach", "beige", 39 | "lime", "seafoamgreen", "mustard", "brightblue", "lilac", "seagreen", "palegreen", "bluegreen", "mint", 40 | "lightbrown", "mauve", "darkred", "greyblue", "burntorange", "darkpink", "indigo", "periwinkle", "bluegrey", 41 | "lightpink", "aquamarine", "gold", "brightpurple", "grassgreen", "redorange", "bluepurple", "greygreen", 42 | "kellygreen", "puke", "rose", "darkteal", "babyblue", "paleblue", "greenyellow", "brickred", "lightgrey", 43 | "darkgrey", "white", "brightpink", "chartreuse", "purpleblue", "royalpurple", "burgundy", "goldenrod", 44 | "darkbrown", "lightorange", "darkorange", "redbrown", "paleyellow", "plum", "offwhite", "pinkpurple", 45 | "darkyellow", "lightyellow", "mustardyellow", "brightred", "peagreen", "khaki", "orangered", "crimson", 46 | "deepblue", "springgreen", "cream", "palepink", "yelloworange", "deeppurple", "pinkred", "pastelgreen", 47 | "sand", "rust", "lightred", "taupe", "armygreen", "robinseggblue", "huntergreen", "greenblue", "lightteal", 48 | "cerulean", "flesh", "orangebrown", "slateblue", "slate", "coral", "blueviolet", "ochre", "leafgreen", 49 | "electricblue", "seablue", "midnightblue", "steelblue", "brick", "palepurple", "mediumblue", "burntsienna", 50 | "darkmagenta", "eggplant", "sage", "darkturquoise", "puce", "bloodred", "neonpurple", "mossgreen", 51 | "terracotta", "oceanblue", "yellowbrown", "brightyellow", "dustyrose", "applegreen", "neonpink", "skin", 52 | "cornflowerblue", "lightturquoise", "wine", "deepred", "azure", "light", "calm", "exciting", "positive", "negative", "serious", "playful", "trustworthy"]; 53 | 54 | export const BINDINGCOLORS = ["#e41a1c", 55 | "#377eb8", 56 | "#4daf4a", 57 | "#984ea3", 58 | "#ff7f00", 59 | "#ffff33", 60 | "#a65628", 61 | "#f781bf", "#fbb4ae", "#b3cde3", "#ccebc5", "#decbe4", "#fed9a6", "#e5d8bd", "#fddaec"] -------------------------------------------------------------------------------- /frontend/src/utils/initialize.js: -------------------------------------------------------------------------------- 1 | export default function () { 2 | // import jquery and bootstrap 3 | var $ = require("jquery"); 4 | window.jQuery = window.$ = $; 5 | require("bootstrap"); 6 | 7 | // import lodash 8 | var _ = require("lodash"); 9 | window._ = _; 10 | 11 | // import d3 12 | var d3 = require("d3"); 13 | window.d3 = d3; 14 | 15 | d3.cvtColorXXX2Rgb = function (xxx) { 16 | let newColor = d3.rgb(xxx); 17 | return [newColor['r'], newColor['g'], newColor['b']]; 18 | } 19 | 20 | d3.cvtColorXXX2Lab = function (xxx) { // xxx is in other color space 21 | let newColor = d3.lab(xxx) 22 | return [newColor['l'], newColor['a'], newColor['b']]; 23 | } 24 | 25 | d3.geom = geom; 26 | 27 | d3.translate = function (x, y) { 28 | return geom.transform 29 | .begin() 30 | .translate(x, y) 31 | .end(); 32 | }; 33 | } 34 | 35 | var geom = { 36 | transform: { 37 | value: "", 38 | begin: function () { 39 | this.value = ""; 40 | return this; 41 | }, 42 | end: function () { 43 | return this.value; 44 | }, 45 | translate: function (dx, dy) { 46 | this.value += "translate(" + dx + "," + dy + ")"; 47 | return this; 48 | }, 49 | rotate: function (theta, x0, y0) { 50 | this.value += "rotate(" + theta + "," + x0 + "," + y0 + ")"; 51 | return this; 52 | }, 53 | scale: function (s) { 54 | this.value += "scale(" + s + ")"; 55 | return this; 56 | }, 57 | scaleXY: function (fx, fy) { 58 | this.value += "scale(" + fx + "," + fy + ")"; 59 | return this; 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /frontend/vue.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | devServer: { 3 | disableHostCheck: true, 4 | }, 5 | configureWebpack: { 6 | performance: { 7 | hints: false 8 | }, 9 | optimization: { 10 | splitChunks: { 11 | minSize: 10000, 12 | maxSize: 250000, 13 | } 14 | } 15 | } 16 | } 17 | --------------------------------------------------------------------------------