├── data ├── val │ └── .gitkeep ├── test │ ├── a │ │ └── .gitkeep │ └── b │ │ └── .gitkeep └── train │ └── .gitkeep ├── model ├── __init__.py ├── hook.py ├── metrics.py ├── embedding.py ├── helper.py ├── input_fn.py ├── model_fn.py └── attention.py ├── pic ├── attnconv.png ├── textcnn.png ├── transformer_enc.png └── attnconv_all_in_one.png ├── chinese_vectors ├── readme.md └── build_word_vectors.py ├── Pipfile ├── params.yaml ├── ensemble └── result.py ├── readme.md ├── .gitignore ├── preprocess_data.py ├── main.py └── Pipfile.lock /data/val/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /model/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/test/a/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/test/b/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /data/train/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pic/attnconv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Peter-Chou/ai_challenger_2018_sentiment_analysis/HEAD/pic/attnconv.png -------------------------------------------------------------------------------- /pic/textcnn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Peter-Chou/ai_challenger_2018_sentiment_analysis/HEAD/pic/textcnn.png -------------------------------------------------------------------------------- /pic/transformer_enc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Peter-Chou/ai_challenger_2018_sentiment_analysis/HEAD/pic/transformer_enc.png -------------------------------------------------------------------------------- /pic/attnconv_all_in_one.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Peter-Chou/ai_challenger_2018_sentiment_analysis/HEAD/pic/attnconv_all_in_one.png -------------------------------------------------------------------------------- /chinese_vectors/readme.md: -------------------------------------------------------------------------------- 1 | # 说明 2 | 3 | 简体中文vector用的是:https://github.com/Embedding/Chinese-Word-Vectors 4 | 里的Word2vec / Skip-Gram with Negative Sampling 内容选择微博 (Word + Character + Ngram) -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | tensorflow = "==1.10.0" 8 | pandas = "*" 9 | tqdm = "*" 10 | jieba = "*" 11 | matplotlib = "*" 12 | pyyaml = "*" 13 | 14 | [dev-packages] 15 | pylint = "*" 16 | "autopep8" = "*" 17 | "flake8" = "*" 18 | rope = "*" 19 | isort = "*" 20 | pytest = "*" 21 | ipykernel = "*" 22 | 23 | [requires] 24 | python_version = "3.6" 25 | -------------------------------------------------------------------------------- /model/hook.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import tensorflow as tf 4 | 5 | from model.helper import set_logger 6 | 7 | logger = set_logger('train.log') 8 | 9 | 10 | class _LoggerHook(tf.train.SessionRunHook): 11 | """logs loss and steps""" 12 | 13 | def __init__(self, loss, gstep, lr, print_n_step): 14 | self._loss = loss 15 | self._print_n_step = print_n_step 16 | self._gstep = gstep 17 | self._lr = lr 18 | 19 | def begin(self): 20 | self._step = -1 21 | self._start_time = time.time() 22 | 23 | def before_run(self, run_context): 24 | self._step += 1 25 | return tf.train.SessionRunArgs([self._loss, self._gstep, self._lr]) 26 | 27 | def after_run(self, run_context, run_values): 28 | if self._step % self._print_n_step == 0: 29 | current_time = time.time() 30 | duration = current_time - self._start_time 31 | self._start_time = current_time 32 | 33 | loss_value = run_values.results[0] 34 | current_gstep = run_values.results[1] 35 | lr = run_values.results[2] 36 | logger.info( 37 | f"step: {current_gstep:>7}\t loss: {loss_value:.2f}\t lr: {lr:0.7f}\t spent: {duration:.1f} seconds") 38 | -------------------------------------------------------------------------------- /params.yaml: -------------------------------------------------------------------------------- 1 | # preprocess data parameters 2 | max_len: 500 3 | chinese_word_size: 195198 # add to vocab 4 | multi_categories: 20 5 | num_sentiment: 4 6 | vector_path: "./chinese_vectors/vectors.npy" 7 | 8 | # dataset parameters 9 | # batch_size: 1 10 | batch_size: 32 11 | prefetch: 2 12 | buffer_size: 120000 13 | 14 | # embed / position vectors parameters 15 | embed_size: 300 16 | hidden_size: 60 # reduce word dimension to 100 17 | 18 | # AttnConv parameters 19 | 20 | ## transformer parameters 21 | num_attention_stacks: 3 22 | num_heads: 2 23 | 24 | ## Convolution parameters 25 | filter_size_list: 26 | - 10 27 | # - 20 28 | # - 50 29 | # - 100 30 | num_filters: 64 # num of feature maps generated by a filter 31 | inner_dense_outshape: 32 | 33 | # Optimization parameters 34 | label_smooth: true 35 | epsilon: 0.005 36 | use_regularizer: true 37 | reg_const: 0.001 38 | max_norm: 5 39 | dropout_rate: 0.1 40 | 41 | # learning rate parameters 42 | learning_rate: 0.01 43 | momentum: 0.7 44 | first_decay_steps: 3000 45 | t_mul: 2.0 # t_mul times longer than previous time spent 46 | m_mul: 0.9 # m_mul ** i * learning_rate when ith restart start 47 | alpha: 0.0 # minimum lr during decay 48 | 49 | # training parameters 50 | train_steps: 45000 51 | # train_steps: 92000 52 | random_seed: 1024 53 | 54 | 55 | # hook parameters 56 | print_n_step: 20 57 | save_n_step: 1000 58 | keep_checkpoint_max: 20 -------------------------------------------------------------------------------- /model/metrics.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | 4 | EPSILON = 1e-6 5 | 6 | 7 | def average_macro_f1(labels, predictions): 8 | """计算 average F1 9 | 10 | Args: 11 | labels (tensor): (batch, num_categories, num_sentiments) 12 | predictions (tensor): (batch, num_categories, num_sentiments) 13 | 14 | Returns: 15 | (average_f1_scalar, update_op_group) 16 | """ 17 | 18 | batch_size = predictions.get_shape().as_list()[0] 19 | categories = predictions.get_shape().as_list()[1] 20 | sentiments = predictions.get_shape().as_list()[2] 21 | 22 | update_op_list = [] 23 | f1_list = [] 24 | 25 | with tf.variable_scope("macro_f1"): 26 | for category in range(categories): 27 | for sentiment in range(sentiments): 28 | precision, precision_update_op = tf.metrics.precision( 29 | labels=labels[:, category, sentiment], 30 | predictions=predictions[:, category, sentiment], 31 | name=f"p_{category}_{sentiment}") 32 | 33 | recall, recall_update_op = tf.metrics.recall( 34 | labels=labels[:, category, sentiment], 35 | predictions=predictions[:, category, sentiment], 36 | name=f"r_{category}_{sentiment}") 37 | 38 | f1 = 2 * (precision * recall) / (precision + recall + EPSILON) 39 | 40 | f1_list.append(f1) 41 | update_op_list.extend([precision_update_op, recall_update_op]) 42 | 43 | f1_list = tf.stack(f1_list) 44 | 45 | return tf.reduce_mean(f1_list), tf.group(*update_op_list) 46 | -------------------------------------------------------------------------------- /ensemble/result.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | import os 3 | import random 4 | from collections import Counter 5 | from time import time 6 | 7 | import numpy as np 8 | import pandas as pd 9 | from tqdm import tqdm 10 | 11 | random.seed(10) 12 | tables = [] 13 | WORKERS = 8 14 | 15 | 16 | for file in os.listdir(): 17 | if file.split(sep='.')[-1] == "csv": 18 | df = pd.read_csv(file) 19 | tables.append(df) 20 | 21 | 22 | df_new = (pd.concat(tables) 23 | .groupby(level=0) 24 | .apply( 25 | lambda g: pd.Series({i: np.hstack(g[i].values) for i in df.columns}))) 26 | df_new.iloc[:, :2] = df.iloc[:, :2] 27 | 28 | 29 | def vote_func(data): 30 | (data, i) = data 31 | for row in range(data.shape[0]): 32 | for col in range(2, data.shape[1]): 33 | counter = Counter(data.iloc[row, col]) 34 | max_count = counter.most_common(1)[0][1] 35 | candidates = [] 36 | for res, count in counter.items(): 37 | if count == max_count: 38 | candidates.append(res) 39 | data.iloc[row, col] = random.choice(candidates) 40 | return i, data 41 | 42 | 43 | if __name__ == "__main__": 44 | start_time = time() 45 | pool = multiprocessing.Pool(processes=WORKERS) 46 | result = pool.map(vote_func, [(d, i) for i, d in enumerate(np.array_split(df_new, WORKERS))]) 47 | pool.close() 48 | result = sorted(result, key=lambda x: x[0]) 49 | df_ensemble = pd.concat([i[1] for i in result]) 50 | print("--- {:0.2f} seconds ---".format(time() - start_time)) 51 | 52 | print(df_ensemble.iloc[0, 2:]) 53 | df_ensemble.to_csv("ensembles.csv", index=False) 54 | -------------------------------------------------------------------------------- /chinese_vectors/build_word_vectors.py: -------------------------------------------------------------------------------- 1 | """生成word转idx字典,以及vectors向量集""" 2 | 3 | import argparse 4 | import bz2 5 | import csv 6 | import json 7 | import os 8 | 9 | import numpy as np 10 | 11 | VOCAB_FILE = "./sgns.weibo.bigram-char.bz2" 12 | 13 | 14 | def build_word_idx_and_vectors(file_path): 15 | """从word—vectors文件中导出 word_idx_map, vectors matrix (对vector进行 16 | Frobenius Norm标准化) 17 | 18 | Args: 19 | file_path (str): word-vector文件地址 20 | 21 | """ 22 | 23 | word_idx_dict = dict() 24 | with bz2.open(file_path, 'rt', newline='', encoding='utf-8', errors='ignore') as f: 25 | reader = csv.reader(f, delimiter=' ', quoting=csv.QUOTE_NONE) 26 | next(reader) 27 | vectors = [] 28 | word_idx_dict[""] = 0 # add 's id is zero 29 | vectors.append([0.0] * 300) 30 | for idx, (word, *vector) in enumerate(reader, start=1): 31 | word_idx_dict[word] = idx 32 | vector = np.asarray([float(x) for x in vector if x != '']) 33 | # // vector = vector / np.linalg.norm(vector) # Frobenius norm 34 | vectors.append(vector) 35 | vectors = np.asarray(vectors) 36 | 37 | dict_save_path = os.path.join(os.path.dirname( 38 | file_path), "word_idx_table.json") 39 | vectors_save_path = os.path.join(os.path.dirname( 40 | file_path), "vectors.npy") 41 | 42 | np.save(vectors_save_path, vectors) 43 | with open(dict_save_path, 'w', encoding='utf-8') as f: 44 | json.dump(word_idx_dict, f, indent=4, ensure_ascii=False) 45 | 46 | 47 | def main(): 48 | build_word_idx_and_vectors(VOCAB_FILE) 49 | 50 | 51 | if __name__ == '__main__': 52 | main() 53 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # 细粒度用户评论情感分析 (全球AI挑战赛 2018) 2 | 3 | ## 项目简介 4 | 5 | 根据客户的评论,对20个方面进行情感分析(-2:未提及、-1:负面、0:中性、1:正面) 6 | 解决问题思路:将这20个多分类任务问题看作一个 **多任务学习** 问题来搭建模型 7 | **解决方案模型**: SemEval-2018中论文 ([Attention-based Convolutional Neural Networks for Multi-label Emotion Classification](http://aclweb.org/anthology/S18-1019)) 的实现及应用。 8 | 9 | ## 预处理 10 | 11 | ### 繁体转简体 12 | 13 | 使用opencc 将文件中的繁体转换成简体 14 | 15 | ```sh 16 | opencc -i data/train/sentiment_analysis_trainingset.csv -o data/train/train_sc.csv -c t2s.json 17 | opencc -i data/val/sentiment_analysis_validationset.csv -o data/val/val_sc.csv -c t2s.json 18 | opencc -i data/test/a/sentiment_analysis_testa.csv -o data/test/a/a_sc.csv -c t2s.json 19 | opencc -i data/test/b/sentiment_analysis_testb.csv -o data/test/b/b_sc.csv -c t2s.json 20 | ``` 21 | 22 | ### 中文词向量 23 | 24 | 简体中文的词向量[chinese word vectors](https://github.com/Embedding/Chinese-Word-Vectors) 里的Word2vec / Skip-Gram with Negative Sampling,内容选择微博 (Word + Character + Ngram) 25 | **中文停用词**使用此[微博中文停用词库]( 26 | https://github.com/chdd/weibo/blob/master/stopwords/%E4%B8%AD%E6%96%87%E5%81%9C%E7%94%A8%E8%AF%8D%E5%BA%93.txt) (其中去除0-9) 27 | 28 | ### 分词 29 | 30 | 分词使用的是[jieba](https://github.com/fxsjy/jieba)包, 主要先按词组拆分,如果词组不在词库(已去除停用词)中出现,再将该词组按字拆分, 31 | 因为考虑到项目为辨析情绪非翻译,考虑弱化语言结构,所以这里对未在词库中出现的新词不进行保留。 32 | 33 | ```sh 34 | python preprocess_data.py --data_dir data/train 35 | python preprocess_data.py --data_dir data/val 36 | python preprocess_data.py -t --data_dir data/test/a 37 | python preprocess_data.py -t --data_dir data/test/b 38 | ``` 39 | 40 | ## 模型 41 | 42 | ### 模型结构 43 | 44 | 模型由参数共享的语句理解层和参数独立的情感辨别层: 45 | 46 | - 特征共享层:由1词向量层 + 1位置向量层(提供位置信息) + 3个Transformer Encoder 自注意力模块组成 47 | - 情感辨别层:由1卷积层 + 1最大池化层 + 1全连接层组成 48 | 49 | ![attn_conv picture](/pic/attnconv.png) 50 | 51 | 该模型的思路是模仿人处理该问题的行为:第一步理解语句(自注意力模块),第二步辨别情感(卷积+最大池化) 52 | 53 | ### Transformer Encoder: 自注意力模块 54 | 55 | Transformer是由谷歌团队在[Attention Is All You Need]( https://arxiv.org/pdf/1706.03762.pdf)首次提出,这里使用的是Encoder中的自注意力Transformer 56 | 自注意力Transformer Encoder对输入进行线性变换得到每个位置的query和(key, value)键值对, 57 | 通过对query和key求点积来寻找与query最相关的key并对其结果使用softmax得到该键值对的权重。 58 | 这个query的回答就是:sum(value * 对应权重) 59 | 最后对这个query的回答进行维度缩放(使用position-wise feed forword,即一维卷积,stride=1, 激活函数为relu) 60 | 这样若有N个位置,得到N个query及其对应的回答 61 | 62 | ![transformer_encoder picture](/pic/transformer_enc.png) 63 | 64 | ### CNN情感辨别模块 65 | 66 | 这里借鉴的是Yoon Kim在[Convolutional Neural Networks for Sentence Classification](http://aclweb.org/anthology/S18-1019)提出的架构。其中: 67 | 卷积层kernel的宽度为Transformer提取的Attention的维度大小,kernel的高度取10(即对临近的10个Attention进行卷积操作)。kernel的数量取64 68 | 最大池化的作用范围为整个feature map,即每个Kernel得到的feature map在经过最大池化后被提炼为一个值 69 | 70 | ![textcnn pic](/pic/textcnn.png) 71 | 72 | ## 训练 / 推断 73 | 74 | ### 训练 75 | 76 | ```sh 77 | python main.py --model_dir output 78 | ``` 79 | 80 | ### 推断 81 | 82 | ```sh 83 | python main.py -t --test_dir path/to/test/folder --model_dir output 84 | ``` 85 | 86 | ## 效果 87 | 88 | Average F1: 0.61 -------------------------------------------------------------------------------- /model/embedding.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | 5 | def word_embedding(inputs, vector_path, scale=False, scope="word_embedding"): 6 | """word index -> 词向量转换 7 | 8 | Args: 9 | inputs (2d tensor): batch中每个句子的word index 10 | vector_path (str): 预训练中文词向量的npy文件地址 11 | scale (bool, optional): Defaults to False. 是否对词向量进行缩放 12 | scope (str, optional): Defaults to "word_embedding". scope名称 13 | 14 | Returns: 15 | 3d tensor: (n, sentence_length, embed_size) 16 | """ 17 | 18 | pretrained_embs = np.load(vector_path) 19 | embed_size = pretrained_embs.shape[1] 20 | 21 | with tf.variable_scope(scope): 22 | pretrained_embs = tf.get_variable( 23 | name="embs_pretrained", 24 | initializer=tf.constant_initializer( 25 | np.asarray(pretrained_embs), dtype=tf.float32), 26 | shape=pretrained_embs.shape, trainable=False) 27 | 28 | num_vector = tf.get_variable( 29 | name="NUM", 30 | shape=[1, embed_size], 31 | initializer=tf.random_uniform_initializer(-0.04, 0.04), 32 | trainable=True) 33 | 34 | lookup_table = tf.concat( 35 | [pretrained_embs, num_vector], axis=0) 36 | 37 | outputs = tf.nn.embedding_lookup(lookup_table, inputs) 38 | 39 | if scale: 40 | outputs = outputs * (embed_size ** 0.5) 41 | 42 | return outputs 43 | 44 | 45 | def position_embedding(inputs, 46 | num_units, 47 | mask_pad=False, 48 | scale=True, 49 | scope="position_embedding", 50 | reuse=None): 51 | '''Sinusoidal Positional_Encoding. 52 | Args: 53 | inputs: A 3d Tensor with shape of (batch, N, T). 54 | num_units: Output dimensionality 55 | mask_pad: Boolean. If True, :0 will be ignored (replaced by zero vectors) 56 | scale: Boolean. If True, the output will be multiplied by sqrt num_units(check details from paper) 57 | scope: Optional scope for `variable_scope`. 58 | reuse: Boolean, whether to reuse the weights of a previous layer 59 | by the same name. 60 | Returns: 61 | A 'Tensor' with one more rank than inputs's, with the dimensionality should be 'num_units' 62 | ''' 63 | 64 | # N 为 batch size; T 为 max sentence length 65 | N, T = inputs.get_shape().as_list() 66 | with tf.variable_scope(scope, reuse=reuse): 67 | position_ind = tf.tile(tf.expand_dims( 68 | tf.range(1, T + 1), 0), [N, 1]) # (N, T) 69 | 70 | # First part of the PE function: sin and cos argument 71 | position_enc = np.array([ 72 | [pos / np.power(10000, 2. * i / num_units) for i in range(num_units)] 73 | for pos in range(T)]) # (T, num_units) 74 | 75 | # Second part, apply the cosine to even columns and sin to odds. 76 | position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i 77 | position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1 78 | 79 | # Convert to a tensor 80 | lookup_table = tf.convert_to_tensor(position_enc, dtype=tf.float32) 81 | 82 | lookup_table = tf.concat((tf.zeros(shape=[1, num_units]), 83 | lookup_table), 0) 84 | 85 | # * 如果是pad(id为0)则不给予位置信息 (added) 86 | if mask_pad: 87 | pad_mask = tf.cast(tf.not_equal(inputs, 0), tf.int32) 88 | position_ind = position_ind * pad_mask 89 | 90 | outputs = tf.nn.embedding_lookup(lookup_table, position_ind) 91 | 92 | if scale: 93 | outputs = outputs * num_units**0.5 94 | 95 | return outputs 96 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # File created using '.gitignore Generator' for Visual Studio Code: https://bit.ly/vscode-gig 2 | 3 | # Created by https://www.gitignore.io/api/visualstudiocode,windows,jupyternotebook,python 4 | 5 | ### JupyterNotebook ### 6 | .ipynb_checkpoints 7 | */.ipynb_checkpoints/* 8 | 9 | # Remove previous ipynb_checkpoints 10 | # git rm -r .ipynb_checkpoints/ 11 | # 12 | 13 | ### Python ### 14 | # Byte-compiled / optimized / DLL files 15 | __pycache__/ 16 | *.py[cod] 17 | *$py.class 18 | 19 | # C extensions 20 | *.so 21 | 22 | # Distribution / packaging 23 | .Python 24 | build/ 25 | develop-eggs/ 26 | dist/ 27 | downloads/ 28 | eggs/ 29 | .eggs/ 30 | lib/ 31 | lib64/ 32 | parts/ 33 | sdist/ 34 | var/ 35 | wheels/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | MANIFEST 40 | 41 | # PyInstaller 42 | # Usually these files are written by a python script from a template 43 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 44 | *.manifest 45 | *.spec 46 | 47 | # Installer logs 48 | pip-log.txt 49 | pip-delete-this-directory.txt 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .coverage 55 | .coverage.* 56 | .cache 57 | nosetests.xml 58 | coverage.xml 59 | *.cover 60 | .hypothesis/ 61 | .pytest_cache/ 62 | 63 | # Translations 64 | *.mo 65 | *.pot 66 | 67 | # Django stuff: 68 | *.log 69 | local_settings.py 70 | db.sqlite3 71 | 72 | # Flask stuff: 73 | instance/ 74 | .webassets-cache 75 | 76 | # Scrapy stuff: 77 | .scrapy 78 | 79 | # Sphinx documentation 80 | docs/_build/ 81 | 82 | # PyBuilder 83 | target/ 84 | 85 | # Jupyter Notebook 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # celery beat schedule file 91 | celerybeat-schedule 92 | 93 | # SageMath parsed files 94 | *.sage.py 95 | 96 | # Environments 97 | .env 98 | .venv 99 | env/ 100 | venv/ 101 | ENV/ 102 | env.bak/ 103 | venv.bak/ 104 | 105 | # Spyder project settings 106 | .spyderproject 107 | .spyproject 108 | 109 | # Rope project settings 110 | .ropeproject 111 | 112 | # mkdocs documentation 113 | /site 114 | 115 | # mypy 116 | .mypy_cache/ 117 | 118 | ### Python Patch ### 119 | .venv/ 120 | 121 | ### Python.VirtualEnv Stack ### 122 | # Virtualenv 123 | # http://iamzed.com/2009/05/07/a-primer-on-virtualenv/ 124 | [Bb]in 125 | [Ii]nclude 126 | [Ll]ib 127 | [Ll]ib64 128 | [Ll]ocal 129 | [Ss]cripts 130 | pyvenv.cfg 131 | pip-selfcheck.json 132 | 133 | ### VisualStudioCode ### 134 | .vscode/* 135 | !.vscode/settings.json 136 | !.vscode/tasks.json 137 | !.vscode/launch.json 138 | !.vscode/extensions.json 139 | 140 | ### Windows ### 141 | # Windows thumbnail cache files 142 | Thumbs.db 143 | ehthumbs.db 144 | ehthumbs_vista.db 145 | 146 | # Dump file 147 | *.stackdump 148 | 149 | # Folder config file 150 | [Dd]esktop.ini 151 | 152 | # Recycle Bin used on file shares 153 | $RECYCLE.BIN/ 154 | 155 | # Windows Installer files 156 | *.cab 157 | *.msi 158 | *.msix 159 | *.msm 160 | *.msp 161 | 162 | # Windows shortcuts 163 | *.lnk 164 | 165 | 166 | # End of https://www.gitignore.io/api/visualstudiocode,windows,jupyternotebook,python 167 | 168 | # Custom rules (everything added below won't be overriden by 'Generate .gitignore File' if you use 'Update' option) 169 | data/ai_* 170 | data/test/a/* 171 | data/test/b/* 172 | data/train/* 173 | data/val/* 174 | !data/train/.gitkeep 175 | !data/test/a/.gitkeep 176 | !data/test/b/.gitkeep 177 | !data/val/.gitkeep 178 | .vscode/* 179 | chinese_vectors/* 180 | !chinese_vectors/readme.md 181 | !chinese_vectors/build_word_vectors.py 182 | TODO 183 | *.ipynb 184 | test* 185 | ensemble/*.csv -------------------------------------------------------------------------------- /model/helper.py: -------------------------------------------------------------------------------- 1 | """ 2 | utility functions for handling hyperparams / logging 3 | """ 4 | 5 | import inspect 6 | import json 7 | import logging 8 | from collections import Iterable 9 | 10 | import yaml 11 | 12 | 13 | def flatten(items, ignore_types=(str, bytes)): 14 | """展开成一维可迭代的生成器 15 | 16 | Args: 17 | items (iterable): 可以迭代展开的对象 18 | ignore_types (class): Defaults to (str, bytes). 想要忽略的可迭代类 19 | 20 | Yield: 21 | 不可迭代或忽略列表中的元素 22 | """ 23 | 24 | for x in items: 25 | if isinstance(x, Iterable) and not isinstance(x, ignore_types): 26 | yield from flatten(x) 27 | else: 28 | yield x 29 | 30 | 31 | class Params(): 32 | """Class that loads hyperparameters from a yaml file. 33 | Example: 34 | ``` 35 | params = Params(yaml_path) 36 | print(params.learning_rate) 37 | params.learning_rate = 0.5 # change the value of learning_rate in params 38 | ``` 39 | """ 40 | 41 | def __init__(self, yaml_path): 42 | self.update(yaml_path) 43 | 44 | def save(self, yaml_path): 45 | """Saves parameters to yaml file""" 46 | with open(yaml_path, 'w') as f: 47 | yaml.dump(self.__dict__, f) 48 | 49 | def update(self, yaml_path): 50 | """Loads parameters from yaml file""" 51 | with open(yaml_path) as f: 52 | params = yaml.load(f) 53 | self.__dict__.update(params) 54 | 55 | @property 56 | def dict(self): 57 | """Gives dict-like access to Params instance by `params.dict['learning_rate']`""" 58 | return self.__dict__ 59 | 60 | 61 | def save_dict_to_yaml(d, yaml_path): 62 | """Saves dict of floats in yaml file 63 | Args: 64 | d: (dict) of float-castable values (np.float, int, float, etc.) 65 | yaml_path: (string) path to yaml file 66 | """ 67 | with open(yaml_path, 'w', encoding='utf-8') as f: 68 | # We need to convert the values to float for yaml (it doesn't accept np.array, np.float, ) 69 | d = {k: float(v) for k, v in d.items()} 70 | 71 | yaml.dump(d, f, encoding='utf-8', indent=4, 72 | default_flow_style=False, allow_unicode=True) 73 | 74 | 75 | def set_logger(log_path, file_logging_level="INFO", console_logging_level="INFO", 76 | name=None): 77 | """logging 设置使得同时输出到文件和console 78 | 79 | Args: 80 | log_path (str): log 文件的地址 81 | file_logging_level (str): "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" 中一个 82 | console_logging_level (str): "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" 中一个 83 | name (str): logger的名字 当None时,默认使用module 的__name__ 84 | 85 | Return: 86 | (logging object): 设置好的logger 87 | """ 88 | 89 | frm = inspect.stack()[1] 90 | mod = inspect.getmodule(frm[0]) 91 | if name is None: 92 | # use calling module's __name__ 93 | logger = logging.getLogger(mod.__name__) 94 | else: 95 | logger = logging.getLogger(name) 96 | 97 | logger.setLevel(getattr(logging, file_logging_level.upper())) 98 | 99 | if not logger.handlers: 100 | # log to log file 101 | file_handler = logging.FileHandler(log_path) 102 | file_formatter = logging.Formatter( 103 | "%(asctime)s:%(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S") 104 | file_handler.setFormatter(file_formatter) 105 | logger.addHandler(file_handler) 106 | 107 | # log to console 108 | stream_handler = logging.StreamHandler() 109 | stream_formatter = logging.Formatter("%(message)s") 110 | stream_handler.setFormatter(stream_formatter) 111 | stream_handler.setLevel( 112 | getattr(logging, console_logging_level.upper())) 113 | logger.addHandler(stream_handler) 114 | return logger 115 | -------------------------------------------------------------------------------- /model/input_fn.py: -------------------------------------------------------------------------------- 1 | """ 2 | Define the input data pipeline 3 | """ 4 | 5 | import os 6 | 7 | import tensorflow as tf 8 | 9 | CPU_COUNT = os.cpu_count() 10 | _SHUFFLE_BUFFER = 120000 11 | 12 | 13 | def _set_static_shape(t, shape): 14 | t.set_shape(shape) 15 | return t 16 | 17 | 18 | def _cascade_label_set_shape(dataset, label_flat_length, label_num): 19 | dataset = dataset.map(lambda line: tf.reshape(line, (-1, label_num)), 20 | num_parallel_calls=CPU_COUNT) 21 | 22 | data_shape = [int(label_flat_length / label_num), label_num] 23 | dataset = dataset.map(lambda line: _set_static_shape(line, data_shape), 24 | num_parallel_calls=CPU_COUNT) 25 | return dataset 26 | 27 | 28 | def build_dataset(file_path, 29 | length, 30 | padding=False, 31 | cascading_label=False, 32 | label_num=4): 33 | """创建子dataset 34 | 35 | Args: 36 | file_path (str): 文件名, 若None, 则生成伪标签(for inference) 37 | length (int): 一行包含的元素数量 38 | padding (bool, optional): Defaults to False.如果为True, 则当一行中实际 39 | 元素数量 < length时,会用0填充 40 | cascading_label (bool, optional): Defaults to False. 如果True,则将label堆叠 41 | 成二维 42 | label_num (int, optional): Defaults to None. 当cascading_label为True时, 43 | label_num为最里层维度的数量 44 | 45 | Returns: 46 | Dataset: 返回dataset 47 | """ 48 | 49 | def _label_generator(size): 50 | while True: 51 | yield [0] * size 52 | 53 | if file_path is None: 54 | dataset = tf.data.Dataset.from_generator( 55 | lambda: _label_generator(length), 56 | output_types=tf.int32, 57 | output_shapes=tf.TensorShape([length])) 58 | else: 59 | dataset = tf.data.TextLineDataset(file_path) 60 | dataset = (dataset 61 | .map(lambda string: tf.string_split( 62 | [string], delimiter=",").values, 63 | num_parallel_calls=CPU_COUNT) 64 | .map(lambda strings: tf.string_to_number(strings, tf.int32), 65 | num_parallel_calls=CPU_COUNT)) 66 | 67 | if padding: # 填充0至length长度 68 | dataset = dataset.map(lambda line: tf.pad( 69 | line, [[0, length - tf.shape(line)[0]]], constant_values=0), 70 | num_parallel_calls=CPU_COUNT) 71 | 72 | if cascading_label: 73 | dataset = dataset.map(lambda line: tf.reshape(line, (-1, label_num)), 74 | num_parallel_calls=CPU_COUNT) 75 | 76 | # 给dynamic tensor 提供 static shape 以方便后续使用 77 | data_shape = [length] if not cascading_label else [ 78 | int(length / label_num), label_num] 79 | dataset = dataset.map(lambda line: _set_static_shape(line, data_shape), 80 | num_parallel_calls=CPU_COUNT) 81 | return dataset 82 | 83 | 84 | def input_fn(sentences, 85 | labels, 86 | batch_size=1, 87 | is_training=False, 88 | is_test=False, 89 | repeat_count=1, 90 | prefetch=2): 91 | """得到features & labels 92 | 93 | Args: 94 | sentences (dataset): tf.data.Dataset 对象 95 | labels (dataset): tf.data.Dataset 对象 96 | batch_size (int, optional): Defaults to 1. batch大小 97 | is_training (bool, optional): Defaults to False. 是否为训练 98 | is_test (bool, optional): Defaults to False. 是否为推断 99 | repeat_count (int, optional): Defaults to 1. 若None, 则无限循环 100 | prefetch (int, optional): Defaults to 2. 预备到pipeline的数量 101 | 102 | Returns: 103 | features & labels 104 | """ 105 | 106 | dataset = tf.data.Dataset.zip((sentences, labels)) 107 | 108 | if is_training: 109 | dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER) 110 | 111 | dataset = (dataset 112 | .repeat(count=repeat_count) 113 | .batch(batch_size, drop_remainder=True) 114 | .prefetch(prefetch)) 115 | 116 | iterator = dataset.make_one_shot_iterator() 117 | features, labels = iterator.get_next() 118 | return features, labels 119 | -------------------------------------------------------------------------------- /preprocess_data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | creates or transforms the dataset e.g. sentences.txt, labels.txt 5 | """ 6 | 7 | import argparse 8 | import csv 9 | import json 10 | import os 11 | import re 12 | 13 | import jieba 14 | import numpy as np 15 | 16 | from model.helper import Params 17 | 18 | CHINESE_WORD_INT_PATH = "./chinese_vectors/word_idx_table.json" 19 | STOPWORDS_PATH = "./chinese_vectors/chinese_stopwords.txt" 20 | 21 | parser = argparse.ArgumentParser() 22 | parser.add_argument( 23 | "-t", "--test", help="clean test format data", action="store_true") 24 | parser.add_argument('--data_dir', default=None, 25 | help="Directory containing the raw data to split into sentence & label txt") 26 | 27 | 28 | def _add_sub_or_unk_word(word, vocab): 29 | res = [] 30 | tmp = jieba.lcut(word, cut_all=True) 31 | for i in (0, -1): 32 | if tmp[i] in vocab: 33 | res.append(tmp[i]) 34 | return res if len(res) > 0 else None 35 | 36 | 37 | def _add_num_token(word): 38 | word = int(word) 39 | if word >= 10: 40 | return "" # 将数字 -> 41 | else: 42 | return str(word) # 0-9 保留 43 | 44 | 45 | def tokenize_sentence(line, vocab): 46 | """句子分词 47 | 48 | Args: 49 | line (str): 原始的句子 50 | vocab (dict): 词/词组为key,index为value的字典 51 | 52 | Returns: 53 | list: 包含词/词组的index的列表 54 | """ 55 | 56 | rule = re.compile("[^a-zA-Z0-9\u4e00-\u9fa5]") 57 | line = rule.sub('', line) 58 | 59 | sentence = [] 60 | for word in jieba.cut(line, cut_all=False): 61 | if word in vocab: 62 | try: 63 | sentence.append(_add_num_token(word)) 64 | except ValueError: 65 | sentence.append(word) 66 | else: 67 | sub_words = _add_sub_or_unk_word(word, vocab) 68 | if sub_words is not None: 69 | sentence += sub_words 70 | return sentence 71 | 72 | 73 | def _write_rows_to_csv(lists, saved_csv_name): 74 | with open(saved_csv_name, 'w', newline='', encoding='utf-8', errors='ignore') as f: 75 | writer = csv.writer(f, delimiter=',') 76 | writer.writerows(lists) 77 | 78 | 79 | def sentence_label_save(file_path, w2i_dict, params, test=False): 80 | """保存预处理完成的转型为int的sentence(sentence有长度截断)和one-hot后的labels (如果test=False) 81 | 82 | Args: 83 | file_path (str): 原始数据文件 84 | w2i_dict (dict): 语料库与int对应的字典 85 | params (Params object): 含有预处理所需参数的Params对象 86 | test (bool, optional): Defaults to False. 该文件是否为test, 若True则不输出labels 87 | 88 | """ 89 | 90 | def _string_to_int_sentence(line, lookup_table, params): 91 | int_sentence = [] 92 | num_idx = params.chinese_word_size 93 | # 经初步处理后sentence 超过的max_len的部分去除 94 | if len(line) > params.max_len: 95 | line = line[:params.max_len] 96 | for word in line: 97 | if word == "": 98 | int_sentence.append(num_idx) 99 | else: 100 | int_sentence.append(lookup_table[word]) 101 | return int_sentence 102 | 103 | def _one_hot_label(label, one_hot_len): 104 | label_one_hot = np.array([0] * 80) 105 | idx = [x + 2 + 4 * i for i, x in enumerate(label)] 106 | label_one_hot[idx] = 1 107 | return list(label_one_hot) 108 | 109 | labels = [] 110 | sentences_idx_path = os.path.join( 111 | os.path.dirname(file_path), "sentences_idx.csv") 112 | 113 | with open(sentences_idx_path, 'w', newline='') as save_idx_f: 114 | writer_idx = csv.writer(save_idx_f, delimiter=',') 115 | with open(file_path, newline='', encoding='utf-8', errors='ignore') as f: 116 | reader = csv.reader(f, delimiter=',') 117 | next(reader) 118 | for idx, sentence, *label in reader: 119 | sentence = tokenize_sentence(sentence, w2i_dict) 120 | sentence_idx = _string_to_int_sentence( 121 | sentence, w2i_dict, params) 122 | if not test: 123 | label = [int(x) for x in label] 124 | # one-hot for each label category 125 | label = _one_hot_label(label, one_hot_len=80) 126 | labels.append(label) 127 | writer_idx.writerow(sentence_idx) 128 | 129 | labels_path = os.path.join(os.path.dirname(file_path), "labels.csv") 130 | if not test: 131 | _write_rows_to_csv(labels, labels_path) 132 | 133 | 134 | def load_chinese_table(chinese_path, stopwords_path): 135 | """返回去除停止词的word转int的词典 136 | 137 | Args: 138 | chinese_path (str): 中文词向量json文件地址 139 | stopwords_path (str): 中文停用词地址 140 | 141 | Returns: 142 | dict: 返回 word->int 对应的字典 143 | """ 144 | 145 | with open(chinese_path, encoding='utf-8') as f: 146 | word_int_table = json.load(f) 147 | 148 | stopwords = set() 149 | with open(stopwords_path, 'r', encoding='gb2312', errors='ignore') as f: 150 | for stopword in f: 151 | stopwords.add(stopword.strip()) 152 | 153 | return {k: v for k, v in word_int_table.items() if k not in stopwords} 154 | 155 | 156 | def main(): 157 | args = parser.parse_args() 158 | if args.data_dir is None: 159 | raise Exception("must give a dataset folder") 160 | params = Params("./params.yaml") 161 | word_int_table = load_chinese_table(CHINESE_WORD_INT_PATH, STOPWORDS_PATH) 162 | dataset_path = os.path.join( 163 | args.data_dir, os.path.basename(args.data_dir) + "_sc.csv") 164 | sentence_label_save( 165 | dataset_path, word_int_table, params, test=args.test) 166 | 167 | 168 | if __name__ == "__main__": 169 | main() 170 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import tensorflow as tf 7 | 8 | from model.helper import Params 9 | from model.input_fn import build_dataset, input_fn 10 | from model.model_fn import model_fn 11 | 12 | _MIN_EVAL_FREQUENCY = 100 13 | parser = argparse.ArgumentParser() 14 | parser.add_argument('--model_dir', default=None, 15 | help="Directory containing the model graph & metrics") 16 | 17 | parser.add_argument('--test_dir', default="data/test/a", 18 | help="Directory containing the test data") 19 | 20 | parser.add_argument( 21 | "-e", "--eval", help="evaluate", action="store_true") 22 | parser.add_argument( 23 | "-t", "--test", help="predict", action="store_true") 24 | 25 | 26 | def save_or_update_predict(predicts, 27 | dirname, 28 | predict_save_name): 29 | """将推断得到的predicts按要求格式保存到本地 30 | 31 | Args: 32 | predicts (numpy array): 模型预测得到的结果集合 33 | dirname (str): 测试集所在的文件夹地址 34 | predict_save_name (str): 保存到本地的文件名称 35 | """ 36 | 37 | for filename in os.listdir(dirname): 38 | if "sentiment_analysis" in filename: 39 | original_test_data = os.path.join(dirname, filename) 40 | predict_save_file = os.path.join(dirname, predict_save_name) 41 | 42 | if os.path.isfile(predict_save_file): 43 | os.remove(predict_save_file) 44 | 45 | test_data = pd.read_csv(original_test_data) 46 | test_data.iloc[:, 1] = "" # erase contents 47 | test_data.iloc[:, 2:] = predicts # replace in place 48 | test_data.to_csv(predict_save_file, index=False) 49 | 50 | 51 | def main(unused): 52 | params = Params("params.yaml") 53 | args = parser.parse_args() 54 | if args.model_dir is None: 55 | raise Exception("You must give a folder to save / retore the model") 56 | 57 | # load training data 58 | train_feature = build_dataset( 59 | "./data/train/sentences_idx.csv", 60 | length=params.max_len, 61 | padding=True) 62 | train_label = build_dataset( 63 | "./data/train/labels.csv", 64 | length=params.multi_categories * params.num_sentiment, 65 | padding=False, 66 | cascading_label=True, 67 | label_num=params.num_sentiment) 68 | 69 | # load eval data 70 | eval_feature = build_dataset( 71 | "./data/val/sentences_idx.csv", 72 | length=params.max_len, 73 | padding=True) 74 | eval_label = build_dataset( 75 | "./data/val/labels.csv", 76 | length=params.multi_categories * params.num_sentiment, 77 | padding=False, 78 | cascading_label=True, 79 | label_num=params.num_sentiment) 80 | 81 | # load test data 82 | test_feature = build_dataset( 83 | os.path.join(args.test_dir, "sentences_idx.csv"), 84 | length=params.max_len, 85 | padding=True) 86 | test_label = build_dataset( # pseudo labels 87 | None, 88 | length=params.multi_categories * params.num_sentiment, 89 | padding=False, 90 | cascading_label=True, 91 | label_num=params.num_sentiment) 92 | 93 | # define train, eval, test's input_fn 94 | def train_input_fn(): 95 | return input_fn(train_feature, 96 | train_label, 97 | batch_size=params.batch_size, 98 | is_training=True, 99 | repeat_count=None, 100 | prefetch=params.prefetch 101 | ) 102 | 103 | def eval_input_fn(): 104 | return input_fn(eval_feature, 105 | eval_label, 106 | batch_size=params.batch_size, 107 | is_training=False, 108 | repeat_count=1, 109 | prefetch=params.prefetch) 110 | 111 | def test_input_fn(): 112 | return input_fn(test_feature, 113 | test_label, 114 | batch_size=1, 115 | is_training=False, 116 | is_test=True, 117 | repeat_count=1) 118 | 119 | # define strategy 120 | # NUM_GPUS = 2 121 | # strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=NUM_GPUS) 122 | 123 | # define config 124 | config = tf.estimator.RunConfig( 125 | model_dir=args.model_dir, 126 | tf_random_seed=params.random_seed, 127 | keep_checkpoint_max=params.keep_checkpoint_max, 128 | save_checkpoints_steps=params.save_n_step, 129 | # train_distribute=strategy 130 | ) 131 | 132 | # define estimator 133 | nn = tf.estimator.Estimator( 134 | model_fn=model_fn, 135 | config=config, 136 | params=params 137 | ) 138 | 139 | # define train spec 140 | train_spec = tf.estimator.TrainSpec( 141 | train_input_fn, 142 | max_steps=params.train_steps) 143 | 144 | # define eval spec 145 | eval_spec = tf.estimator.EvalSpec( 146 | eval_input_fn, 147 | steps=500, 148 | throttle_secs=0, 149 | # exporters=[exporters], 150 | name="eval") 151 | 152 | # train and evaluate 153 | if not args.test: 154 | tf.estimator.train_and_evaluate( 155 | nn, 156 | train_spec, 157 | eval_spec) 158 | 159 | else: # 'pred' 160 | predict_results = nn.predict(input_fn=test_input_fn) 161 | results = [] 162 | for result in predict_results: # result is dict object 163 | results.append(result["classes"]) 164 | results = np.asarray(results) 165 | save_or_update_predict(results, 166 | args.test_dir, 167 | "ai_competition_submission_predict_label_data.csv") 168 | 169 | 170 | if __name__ == "__main__": 171 | # Enable logging for tf.estimator 172 | tf.logging.set_verbosity(tf.logging.INFO) 173 | tf.app.run(main) 174 | -------------------------------------------------------------------------------- /model/model_fn.py: -------------------------------------------------------------------------------- 1 | """Define the model.""" 2 | 3 | import tensorflow as tf 4 | 5 | from model.attention import (dense_logits, feedforward, inception, 6 | label_smoothing, multihead_attention) 7 | from model.embedding import position_embedding, word_embedding 8 | from model.hook import _LoggerHook 9 | from model.metrics import average_macro_f1 10 | 11 | 12 | def model_fn( 13 | features, 14 | labels, 15 | mode, 16 | params): 17 | is_training = (mode == tf.estimator.ModeKeys.TRAIN) 18 | 19 | x = features 20 | unchanged_labels = labels # non-smoothing labels for f1 metrics 21 | 22 | if params.label_smooth and labels is not None: 23 | labels = tf.cast(labels, tf.float32) 24 | labels = label_smoothing(labels, epsilon=params.epsilon) 25 | 26 | # build embedding vectors 27 | vector = word_embedding(x, params.vector_path, scale=False) 28 | 29 | # ! reduce the fiexed word dimensions to appropriate dimension 30 | if params.hidden_size != vector.get_shape().as_list()[-1]: 31 | # 原论文中使用全连接降维 32 | with tf.variable_scope("dimension_reduction"): 33 | vector = tf.layers.dense( 34 | vector, 35 | params.hidden_size, 36 | activation=None, 37 | use_bias=False, 38 | kernel_initializer=tf.contrib.layers.xavier_initializer(), 39 | kernel_regularizer=tf.contrib.layers.l2_regularizer(1.0)) 40 | 41 | # scale the word embedding 42 | vector = vector * (params.hidden_size ** 0.5) 43 | 44 | # 给词向量 增加位置信息 45 | vector += position_embedding(x, 46 | num_units=params.hidden_size, 47 | scale=False) 48 | 49 | # # * add dropout mask vector may be not a good idea 50 | vector = tf.layers.dropout(vector, rate=params.dropout_rate, 51 | training=tf.convert_to_tensor(is_training)) 52 | 53 | # # transformer attention stacks 54 | for i in range(params.num_attention_stacks): 55 | with tf.variable_scope(f"num_attention_stacks_{i + 1}"): 56 | # multi-head attention 57 | vector = multihead_attention(queries=vector, 58 | keys=vector, 59 | num_units=params.hidden_size, 60 | num_heads=params.num_heads, 61 | dropout_rate=params.dropout_rate, 62 | kernel_initializer=tf.contrib.layers.xavier_initializer(), 63 | kernel_regularizer=tf.contrib.layers.l2_regularizer( 64 | 1.0), 65 | is_training=is_training, 66 | causality=False) 67 | 68 | # feed forward 69 | vector = feedforward(vector, 70 | kernel_initializer=tf.contrib.layers.xavier_initializer(), 71 | num_units=[2 * params.hidden_size, params.hidden_size]) 72 | attentions = vector 73 | 74 | # 最里增加一维,以模拟一维黑白通道 75 | # (N, attention_stacks*T, C, 1) 76 | attentions = tf.expand_dims(attentions, -1) 77 | 78 | # ************************************************************ 79 | # complete attention part, now CNN capture part 80 | # ************************************************************ 81 | logits = [] 82 | # 每个category对应一个inception_maxpool classifier 83 | for topic in range(params.multi_categories): 84 | cnn_features = inception(attentions, 85 | filter_size_list=params.filter_size_list, 86 | num_filters=params.num_filters, 87 | hidden_size=params.hidden_size, 88 | kernel_initializer=tf.contrib.layers.xavier_initializer(), 89 | kernel_regularizer=tf.contrib.layers.l2_regularizer( 90 | 1.0), 91 | scope=f"category_{topic+1}_inception") # (n, 1, 1, total_filter_num) 92 | 93 | total_feature_num = len(params.filter_size_list) * params.num_filters 94 | # cnn_features: (n, total_filter_num) 95 | cnn_features = tf.reshape(cnn_features, (-1, total_feature_num)) 96 | 97 | # category_logit: (n, num_sentiment) 98 | category_logits = dense_logits( 99 | cnn_features, 100 | params.num_sentiment, 101 | kernel_regularizer=tf.contrib.layers.l2_regularizer( 102 | 1.0), 103 | kernel_initializer=tf.contrib.layers.xavier_initializer(), 104 | scope=f"category_{topic+1}_logits", 105 | inner_dense_outshape=params.inner_dense_outshape, 106 | inner_dense_activation=tf.tanh, 107 | use_bias=True) 108 | 109 | # 将该category的logit加入列表 110 | logits.append(category_logits) 111 | 112 | # logits: (n, multi_categories, num_sentiment) 113 | logits = tf.stack(logits, axis=1) 114 | 115 | # * train & eval common part 116 | if (mode == tf.estimator.ModeKeys.TRAIN or 117 | mode == tf.estimator.ModeKeys.EVAL): 118 | 119 | gstep = tf.train.get_or_create_global_step() 120 | 121 | # loss: (n, multi_categories) 122 | loss = tf.nn.softmax_cross_entropy_with_logits_v2( 123 | labels=labels, logits=logits) 124 | loss = tf.reduce_sum(loss, axis=1) # (n,) 125 | loss = tf.reduce_mean(loss, axis=0) # scala 126 | 127 | if params.use_regularizer: 128 | loss_reg = sum(tf.get_collection( 129 | tf.GraphKeys.REGULARIZATION_LOSSES)) 130 | loss += params.reg_const * loss_reg 131 | loss = tf.identity(loss, name="loss") 132 | 133 | # predictions = tf.nn.softmax(logits) 134 | predictions = tf.cast(tf.equal(tf.reduce_max( 135 | logits, axis=-1, keepdims=True), logits), tf.float32) 136 | 137 | avg_macro_f1, avg_macro_f1_update_op = average_macro_f1( 138 | labels=tf.cast(unchanged_labels, tf.float32), 139 | predictions=predictions) 140 | 141 | eval_metric_ops = { 142 | 'avg_macro_f1': (avg_macro_f1, avg_macro_f1_update_op)} 143 | 144 | tf.summary.scalar("loss", loss) 145 | tf.summary.scalar("f1", avg_macro_f1) 146 | 147 | summary_hook = tf.train.SummarySaverHook(save_steps=params.print_n_step, 148 | output_dir="./summary", 149 | summary_op=tf.summary.merge_all()) 150 | 151 | else: 152 | loss = None 153 | eval_metric_ops = None 154 | 155 | # * train specific part 156 | if (mode == tf.estimator.ModeKeys.TRAIN): 157 | learning_rate = tf.train.cosine_decay_restarts( 158 | learning_rate=params.learning_rate, 159 | global_step=gstep, 160 | first_decay_steps=params.first_decay_steps, 161 | t_mul=params.t_mul, 162 | m_mul=params.m_mul, 163 | alpha=params.alpha, 164 | name="learning_rate") 165 | optimizer = tf.train.MomentumOptimizer( 166 | learning_rate=learning_rate, 167 | momentum=params.momentum) 168 | 169 | gradients, variables = zip(*optimizer.compute_gradients(loss)) 170 | gradients, _ = tf.clip_by_global_norm(gradients, params.max_norm) 171 | train_op = optimizer.apply_gradients(zip(gradients, variables), 172 | global_step=gstep) 173 | 174 | # add custom training logger 175 | custom_logger = _LoggerHook( 176 | loss, gstep, learning_rate, params.print_n_step) 177 | else: 178 | train_op = None 179 | 180 | # * predict part 181 | if mode == tf.estimator.ModeKeys.PREDICT: 182 | # 在预测时, logits:(multi_categories, num_sentiment) 183 | # pred: (multi_categories,) 184 | pred = tf.subtract(tf.argmax(logits, axis=-1), 2) 185 | predictions = { 186 | "classes": pred, 187 | } 188 | export_outputs = { 189 | "classify": tf.estimator.export.PredictOutput(predictions) 190 | } 191 | else: 192 | predictions = None 193 | export_outputs = None 194 | 195 | training_hooks = [custom_logger, summary_hook] if is_training else None 196 | 197 | return tf.estimator.EstimatorSpec(mode=mode, 198 | predictions=predictions, 199 | loss=loss, 200 | train_op=train_op, 201 | eval_metric_ops=eval_metric_ops, 202 | training_hooks=training_hooks) 203 | -------------------------------------------------------------------------------- /model/attention.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | 创建模型所需的函数 5 | transformer part 代码主要借鉴自: 6 | https://github.com/Kyubyong/transformer/blob/master/modules.py 7 | """ 8 | 9 | import numpy as np 10 | import tensorflow as tf 11 | 12 | 13 | # ################################################### 14 | # Self Attention Part 15 | def normalize(inputs, 16 | epsilon=1e-8, 17 | scope="ln", 18 | reuse=None): 19 | '''Applies layer normalization. 20 | 21 | Args: 22 | inputs: A tensor with 2 or more dimensions, where the first dimension has 23 | `batch_size`. 24 | epsilon: A floating number. A very small number for preventing ZeroDivision Error. 25 | scope: Optional scope for `variable_scope`. 26 | reuse: Boolean, whether to reuse the weights of a previous layer 27 | by the same name. 28 | 29 | Returns: 30 | A tensor with the same shape and data dtype as `inputs`. 31 | ''' 32 | with tf.variable_scope(scope, reuse=reuse): 33 | inputs_shape = inputs.get_shape() 34 | params_shape = inputs_shape[-1:] 35 | 36 | mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) 37 | beta = tf.get_variable("beta", 38 | shape=params_shape, 39 | dtype=tf.float32, 40 | initializer=tf.zeros_initializer()) 41 | gamma = tf.get_variable("gamma", 42 | shape=params_shape, 43 | dtype=tf.float32, 44 | initializer=tf.ones_initializer()) 45 | # 原实现代码 46 | # beta = tf.Variable(tf.zeros(params_shape)) 47 | # gamma = tf.Variable(tf.ones(params_shape)) 48 | normalized = (inputs - mean) / ((variance + epsilon) ** (.5)) 49 | outputs = gamma * normalized + beta 50 | 51 | return outputs 52 | 53 | 54 | def multihead_attention(queries, 55 | keys, 56 | num_units=None, 57 | num_heads=8, 58 | dropout_rate=0, 59 | kernel_initializer=None, 60 | kernel_regularizer=None, 61 | is_training=True, 62 | causality=False, 63 | scope="multihead_attention", 64 | reuse=None): 65 | '''Applies multihead attention. 66 | 67 | Args: 68 | queries: A 3d tensor with shape of [N, T_q, C_q]. 69 | keys: A 3d tensor with shape of [N, T_k, C_k]. 70 | num_units: A scalar. Attention size. 71 | dropout_rate: A floating point number. 72 | kernel_initializer: weight initializer 73 | kernel_regularizer: weight regularizer 74 | is_training: Boolean. Controller of mechanism for dropout. 75 | causality: Boolean. If true, units that reference the future are masked. 76 | num_heads: An int. Number of heads. 77 | scope: Optional scope for `variable_scope`. 78 | reuse: Boolean, whether to reuse the weights of a previous layer 79 | by the same name. 80 | 81 | Returns 82 | A 3d tensor with shape of (N, T_q, C) 83 | ''' 84 | with tf.variable_scope(scope, reuse=reuse): 85 | # Set the fall back option for num_units 86 | if num_units is None: 87 | num_units = queries.get_shape().as_list[-1] 88 | 89 | # Linear projections 90 | # ? remove activation? 91 | # 使得不同的multi-head attention 都回应不同的query,以及句子的不同子理解 92 | Q = tf.layers.dense(queries, 93 | num_units, 94 | kernel_initializer=kernel_initializer, 95 | kernel_regularizer=kernel_regularizer, 96 | activation=tf.nn.relu) # (N, T_q, C) 97 | K = tf.layers.dense(keys, 98 | num_units, 99 | kernel_initializer=kernel_initializer, 100 | kernel_regularizer=kernel_regularizer, 101 | activation=tf.nn.relu) # (N, T_k, C) 102 | V = tf.layers.dense(keys, 103 | num_units, 104 | kernel_initializer=kernel_initializer, 105 | kernel_regularizer=kernel_regularizer, 106 | activation=tf.nn.relu) # (N, T_k, C) 107 | 108 | # Split and concat 109 | # 将Q, K, V 分成 num_heads 个子集合 110 | # Q_ = {Q1, Q2,...,Qh} 111 | # K_ = {K1, K2, ..., Kh} 112 | # V_ = {V1, V2, ..., Vh} 113 | # {Qi, Ki, Vi} 对应 head_i 的 scale dot-product attention 的三个输入 114 | Q_ = tf.concat(tf.split(Q, num_heads, axis=2), 115 | axis=0) # (h*N, T_q, C/h) 116 | K_ = tf.concat(tf.split(K, num_heads, axis=2), 117 | axis=0) # (h*N, T_k, C/h) 118 | V_ = tf.concat(tf.split(V, num_heads, axis=2), 119 | axis=0) # (h*N, T_k, C/h) 120 | 121 | # Multiplication 122 | # 每个 head的query 与 keys矩阵相乘并拼接成一个大矩阵 123 | outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) 124 | 125 | # Scale 126 | # 防止 数值过大落入到softmax的饱和区域 127 | outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) 128 | 129 | # Key Masking 130 | # 使得与该query无关的keys不要参与回答问题,(将其MatMul得到的值设为极小, 131 | # 则Softmax后得到的权重近似 0) 132 | # 对于key里某向量全为0时,则将其key-value 对应的权重设为近似0的极小数 133 | # 源代码实现中: 为 zeros vector 134 | # tf.sign(x) = -1 if x < 0 135 | # tf.sign(x) = 0 if x == 0 or tf.is_nan(x) 136 | # tf.sign(x) = 1 if x > 0 137 | key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) 138 | key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k) 139 | key_masks = tf.tile(tf.expand_dims(key_masks, 1), [ 140 | 1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k) 141 | 142 | paddings = tf.ones_like(outputs) * (-2**32 + 1) 143 | 144 | # 如果是 0 (即)则返回极小值,使得softmax得出的权重接近0 145 | # 这样使得对sentence的query,不具有发言权 146 | outputs = tf.where(tf.equal(key_masks, 0), paddings, 147 | outputs) # (h*N, T_q, T_k) 148 | 149 | # Causality = Future blinding 150 | # tf.linalg.LinearOperatorLowerTriangular: 151 | # [[1, 2, 3, 4], [4, 5, 6, 8], [7, 8, 9, 10],[10, 11, 12, 13]]的tril: 152 | # ################################################################ 153 | # [[ 1. 0. 0. 0.] 154 | # [ 4. 5. 0. 0.] 155 | # [ 7. 8. 9. 0.] 156 | # [10. 11. 12. 13.]] 157 | if causality: 158 | diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) 159 | tril = tf.linalg.LinearOperatorLowerTriangular( 160 | diag_vals).to_dense() # (T_q, T_k) 161 | masks = tf.tile(tf.expand_dims(tril, 0), [ 162 | tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k) 163 | 164 | paddings = tf.ones_like(masks) * (-2**32 + 1) 165 | outputs = tf.where(tf.equal(masks, 0), paddings, 166 | outputs) # (h*N, T_q, T_k) 167 | 168 | # Activation 169 | outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) 170 | 171 | # Query Masking 172 | # 将零含义的quey得到的attention设为0向量(去噪) 173 | # 原理与key masking相同 174 | query_masks = tf.sign( 175 | tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q) 176 | query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q) 177 | query_masks = tf.tile(tf.expand_dims( 178 | query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k) 179 | outputs *= query_masks # broadcasting. (N, T_q, T_k) 180 | 181 | # Dropouts 182 | # 此时的outputs仍是 key-value 对应的权重,对该权重集合进行dropout 183 | # 使得与value相乘得到的attention更加robust(即不仅仅依赖极少数的value) 184 | outputs = tf.layers.dropout( 185 | outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training)) 186 | 187 | # Weighted sum 188 | # 此时的outputs是 attention = w1*v1 + ... + w_Tk*v_Tk 189 | outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) 190 | 191 | # Restore shape 192 | # 将 head_i 都concat起来, 因为结果维度与inputs(即query)相同,所以不需要全连接调整维度 193 | outputs = tf.concat(tf.split(outputs, num_heads, 194 | axis=0), axis=2) # (N, T_q, C) 195 | 196 | # Residual connection 197 | outputs += queries 198 | 199 | # Normalize 200 | outputs = normalize(outputs, scope="ln_1") # (N, T_q, C) 201 | # outputs = normalize(outputs) # (N, T_q, C) 202 | 203 | return outputs 204 | 205 | 206 | def feedforward(inputs, 207 | kernel_initializer=None, 208 | kernel_regularizer=None, 209 | num_units=[2048, 512], 210 | scope="multihead_attention", 211 | reuse=None): 212 | '''Point-wise feed forward net. 213 | 214 | Args: 215 | inputs: A 3d tensor with shape of [N, T, C]. 216 | kernel_initializer: weight initializer 217 | kernel_regularizer: weight regularizer 218 | num_units: A list of two integers. 219 | scope: Optional scope for `variable_scope`. 220 | reuse: Boolean, whether to reuse the weights of a previous layer 221 | by the same name. 222 | 223 | Returns: 224 | A 3d tensor with the same shape and dtype as inputs 225 | ''' 226 | with tf.variable_scope(scope, reuse=reuse): 227 | # Inner layer 228 | # kernel_size = 1 即 element-wise (T中每个元素从C维变换到num_units[0]维) 229 | params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, 230 | "activation": tf.nn.relu, "use_bias": True, 231 | "kernel_initializer": kernel_initializer, 232 | "kernel_regularizer": kernel_regularizer} 233 | outputs = tf.layers.conv1d(**params) 234 | 235 | # Readout layer 236 | params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, 237 | "activation": None, "use_bias": True, 238 | "kernel_initializer": kernel_initializer, 239 | "kernel_regularizer": kernel_regularizer} 240 | outputs = tf.layers.conv1d(**params) 241 | 242 | # Residual connection 243 | outputs += inputs 244 | 245 | # Normalize 246 | outputs = normalize(outputs, scope="ln_2") 247 | # outputs = normalize(outputs) 248 | 249 | return outputs 250 | 251 | 252 | # ################################################### 253 | # CNN Part 254 | def conv_maxpool(inputs, 255 | filter_size, 256 | num_filters, 257 | hidden_size, 258 | kernel_initializer=None, 259 | kernel_regularizer=None, 260 | scope="conv_maxpool", 261 | reuse=None): 262 | """conv + maxpool 263 | 264 | 以attention为单位,按filter_size单位的attentions为窗口卷积params.num_filters个feature map, 265 | 对每个feature map求整体最大值,作为这个feature map的特征值 266 | 267 | Args: 268 | inputs (4d tensor): (N, T, C, d), d一般是1 269 | filter_size (int): filter的高度(宽度默认与attention的维度相同,这里可以理解为 270 | 选用filter_size个attention作为filter的窗口, 每次按一个attention滑动 271 | num_filters (int): 一个filter生成的feature map数量 272 | hidden_size (int): attention的维度 273 | kernel_initializer: weight initializer 274 | kernel_regularizer: weight regularizer 275 | scope (str, optional): Defaults to "conv_maxpool". scope名称 276 | reuse (Bool, optional): Defaults to None. 是否重复是否该命名域的变量 277 | 278 | Returns: 279 | 4d tensor: (N, 1, 1, params.num_filters) 280 | """ 281 | 282 | with tf.variable_scope(scope, reuse=reuse): 283 | inputs_height = inputs.get_shape().as_list()[1] 284 | 285 | # conv 为 (n, new_height, 1, params.num_filters) 286 | # new_height = (inputs_height - filter_size / stride_height) + 1 287 | conv = tf.layers.conv2d( 288 | inputs, 289 | filters=num_filters, 290 | kernel_size=(filter_size, hidden_size), 291 | kernel_initializer=kernel_initializer, 292 | kernel_regularizer=kernel_regularizer, 293 | activation=tf.nn.relu) 294 | 295 | # pool 为 (n, 1, 1, params.num_filters) 296 | # 将一个feature map 池化为一个特征值 297 | pool = tf.layers.max_pooling2d( 298 | conv, 299 | pool_size=(inputs_height - filter_size + 1, 1), 300 | strides=(1, 1)) 301 | return pool 302 | 303 | 304 | def inception(inputs, 305 | filter_size_list, 306 | num_filters, 307 | hidden_size, 308 | kernel_initializer=None, 309 | kernel_regularizer=None, 310 | scope="inception", 311 | reuse=None): 312 | """将不同filter_size得到的不同特征组合在一起并返回 313 | 314 | Args: 315 | inputs (4d tensor): (N, T, C, d), d一般是1 316 | filter_size_list (A list of int): 含有多个filter_size的list 317 | num_filters (int): 一个filter生成的feature map数量 318 | hidden_size (int): attention的维度 319 | kernel_initializer: weight initializer 320 | kernel_regularizer: weight regularizer 321 | scope (str, optional): Defaults to "conv_maxpool". scope名称 322 | reuse (Bool, optional): Defaults to None. 是否重复是否该命名域的变量 323 | 324 | Returns: 325 | 4d tensor: (N, 1, 1, len(params.filter_size_list) * params.num_filters) 326 | """ 327 | 328 | with tf.variable_scope(scope, reuse=reuse): 329 | pooled_outputs = [] 330 | for filter_size in filter_size_list: 331 | feature = conv_maxpool(inputs, 332 | filter_size=filter_size, 333 | num_filters=num_filters, 334 | hidden_size=hidden_size, 335 | kernel_initializer=kernel_initializer, 336 | kernel_regularizer=kernel_regularizer, 337 | scope=f"conv_maxpool_{filter_size}_filter", 338 | reuse=reuse) 339 | pooled_outputs.append(feature) 340 | 341 | # (N, 1, 1, len(params.filter_sizes) * params.num_filters) 342 | return tf.concat(pooled_outputs, -1) 343 | 344 | 345 | def dense_logits(inputs, 346 | label_num, 347 | kernel_regularizer, 348 | kernel_initializer=None, 349 | scope="dense_logits", 350 | inner_dense_outshape=None, 351 | inner_dense_activation=tf.nn.relu, 352 | use_bias=True, 353 | reuse=None 354 | ): 355 | """经过(多层)dense输出category各个label class的logits 356 | 357 | Args: 358 | inputs (2d tensor): 特征向量:(n, feature_num) 359 | label_num (int): label class的数量 360 | kernel_regularizer (regularizer): 矩阵w的约束器 361 | kernel_initializer: weight initializer 362 | scope (str, optional): Defaults to "dense_logits". socpe namespace 363 | inner_dense_outshape (list, optional): Defaults to None. 364 | 若为None / [],则没有中间的dense层 365 | inner_dense_activation (operation, optional): Defaults to tf.nn.relu 366 | use_bias (bool, optional): Defaults to True. 是否在所有层中使用偏置 367 | reuse (Bool, optional): Defaults to None. 是否重复是否该命名域的变量 368 | 369 | Returns: 370 | 2d tensor: (n, label_num) 371 | """ 372 | 373 | out = inputs 374 | inner_dense_sizes = [] if inner_dense_outshape is None else inner_dense_outshape 375 | 376 | with tf.variable_scope(scope, reuse=reuse): 377 | for out_size in inner_dense_sizes: 378 | out = tf.layers.dense( 379 | out, 380 | out_size, 381 | activation=inner_dense_activation, 382 | use_bias=use_bias, 383 | kernel_initializer=kernel_initializer, 384 | kernel_regularizer=kernel_regularizer 385 | ) 386 | 387 | out = tf.layers.dense( 388 | out, 389 | label_num, 390 | activation=None, 391 | use_bias=use_bias, 392 | kernel_initializer=kernel_initializer, 393 | kernel_regularizer=kernel_regularizer 394 | ) 395 | 396 | return out 397 | 398 | 399 | def label_smoothing(inputs, epsilon=0.1): 400 | '''Applies label smoothing. See https://arxiv.org/abs/1512.00567. 401 | 402 | Args: 403 | inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary. 404 | epsilon: Smoothing rate. 405 | 406 | For example, 407 | 408 | ``` 409 | import tensorflow as tf 410 | inputs = tf.convert_to_tensor([[[0, 0, 1], 411 | [0, 1, 0], 412 | [1, 0, 0]], 413 | [[1, 0, 0], 414 | [1, 0, 0], 415 | [0, 1, 0]]], tf.float32) 416 | 417 | outputs = label_smoothing(inputs) 418 | 419 | with tf.Session() as sess: 420 | print(sess.run([outputs])) 421 | 422 | >> 423 | [array([[[ 0.03333334, 0.03333334, 0.93333334], 424 | [ 0.03333334, 0.93333334, 0.03333334], 425 | [ 0.93333334, 0.03333334, 0.03333334]], 426 | [[ 0.93333334, 0.03333334, 0.03333334], 427 | [ 0.93333334, 0.03333334, 0.03333334], 428 | [ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)] 429 | ``` 430 | ''' 431 | K = inputs.get_shape().as_list()[-1] # number of channels 432 | return ((1 - epsilon) * inputs) + (epsilon / K) 433 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "694a27f7c43c4c5bfd67fd1a610bb60907f73e4c4811235abc9cfee7cd756c55" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "3.6" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "absl-py": { 20 | "hashes": [ 21 | "sha256:1e6e70506fb4d867cf269af7bcc27b744c36bbc4c516f0f8ccf2039956deea72" 22 | ], 23 | "version": "==0.4.1" 24 | }, 25 | "astor": { 26 | "hashes": [ 27 | "sha256:95c30d87a6c2cf89aa628b87398466840f0ad8652f88eb173125a6df8533fb8d", 28 | "sha256:fb503b9e2fdd05609fbf557b916b4a7824171203701660f0c55bbf5a7a68713e" 29 | ], 30 | "markers": "python_version != '3.2.*' and python_version != '3.1.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.0.*'", 31 | "version": "==0.7.1" 32 | }, 33 | "cycler": { 34 | "hashes": [ 35 | "sha256:1d8a5ae1ff6c5cf9b93e8811e581232ad8920aeec647c37316ceac982b08cb2d", 36 | "sha256:cd7b2d1018258d7247a71425e9f26463dfb444d411c39569972f4ce586b0c9d8" 37 | ], 38 | "version": "==0.10.0" 39 | }, 40 | "gast": { 41 | "hashes": [ 42 | "sha256:7068908321ecd2774f145193c4b34a11305bd104b4551b09273dfd1d6a374930" 43 | ], 44 | "version": "==0.2.0" 45 | }, 46 | "grpcio": { 47 | "hashes": [ 48 | "sha256:00e0ed7a4558b65df24aed9369880c7e247bcd48c9c71c9c40f3143ab02842a5", 49 | "sha256:113d457cf87a2d642820f2a442008d714c52f9760fea2c02b22fe06aacc40531", 50 | "sha256:114399097a2af15fac768dd99c32171ed33804bf127945b5cba0fb9f4380b09d", 51 | "sha256:14a16eb13472cb16213dad17d4580d885824e04b93ae4323ed532168f99477a0", 52 | "sha256:15a5fc23ba2e5f30758fbb8763f79a764e3de1ffcb92c2ccd46f45ffa63910d2", 53 | "sha256:16cdd82a3aed9b6d3067492413bfffe61bf0f98c06f2942f887d79b8fd68898d", 54 | "sha256:21901beb267bd996a094d960996bff047db7c10ad3bfa7bd4689eb0374101e63", 55 | "sha256:253ae7ed8b50132ab331146ecb329d740330f9f76b6986cc36c3280eb74878c7", 56 | "sha256:25f2d6120e710223195027375855a06cfc5a4f448e89b20f8f09975527297aeb", 57 | "sha256:2767b87260b2256d738a496d5aaf8ce4bbeef34cdbe9c852508c09a36ff35631", 58 | "sha256:2fffb227db6d95e62086563720518cd5cdbf7127675f49067f55d3c7cca019c4", 59 | "sha256:3b16f12bedb3fc22db8c4a55119f460778330d0ca44cc9716d106170b4270685", 60 | "sha256:3f57682b103febfd603350d2e0c91bdb5240593dd3c377ade9ab27da0540f4ad", 61 | "sha256:4473f462accbfb6207174af8bbdd21c09d355a16d6a718e24810afb236ed237e", 62 | "sha256:4a70e37c6bf2f81f48f739184c058c9912caf9fd85db6b8688e8916b748b02cb", 63 | "sha256:529b872bddc0ad5243fa58f77dfbac1a04c9812f35a4d2033cba795e3b579411", 64 | "sha256:5f54bb445a443a12123d88f3f83a715d00c0579481201e08caa931a0c38f794e", 65 | "sha256:670e884e5b5c8805e30d214da790cb86487db27ed9e7ccd74f11a2bc5a27df2b", 66 | "sha256:67f1dd57935abe0d090f3b5f9fbc4d7ef84a53c9cd806c7cb91a2bda9dd8f14a", 67 | "sha256:6bb4acb354a0f84ca767658bf414b70dbfc88a41210456720e5ef1248354b0ad", 68 | "sha256:701534465e7936524e786f9448e9ccc9a519fddb59bca744ef2abe04fd737104", 69 | "sha256:7ab4fcef50ec8257a4ec5ff676001d9be466aa965df57ccf89873a263c7bb4ae", 70 | "sha256:7b2f2882a909213d038f399dcf07995870e3a2f27a93ce8dbfb65db860df30be", 71 | "sha256:9380f8365d9fd7fea6497615295b4e749345bdc7990655d52ad22fef0127f1d3", 72 | "sha256:aa4ca8f74500d90a6b8745aef03fb765b8c1655524f695baa46320763aad5822", 73 | "sha256:bf203d00fa7d7f9a67646c5468732b03330f7910fffa1bac7dcde956d27d3508", 74 | "sha256:cba9f8b4910644c6664f875ba20e8ea031919118d4567fe0b77aee28d7fa32c6", 75 | "sha256:d360ccb3aedec5b799c8b0d7968013b92a5a67fad00da77ff987360d59182f9f", 76 | "sha256:e0b262828da4b0986ab9994ed8727655b21f1575b6b31cf2f05ba0d9ca650bae", 77 | "sha256:e9caf61a789dd657404195d7bc0d3e521bf1878b43448ffe8b03bb03e27fb23d", 78 | "sha256:eade750711df77696ae8361412018c0b294bb078fc0a3b51f5a77e789af692c7", 79 | "sha256:f322631581149eb35f834ea0defcacf4edc8641db206f83027b9c239aa46a442" 80 | ], 81 | "version": "==1.15.0" 82 | }, 83 | "jieba": { 84 | "hashes": [ 85 | "sha256:de385e48582a4862e55a9167334d0fbe91d479026e5dac40e59e22c08b8e883e" 86 | ], 87 | "index": "pypi", 88 | "version": "==0.39" 89 | }, 90 | "kiwisolver": { 91 | "hashes": [ 92 | "sha256:0ee4ed8b3ae8f5f712b0aa9ebd2858b5b232f1b9a96b0943dceb34df2a223bc3", 93 | "sha256:0f7f532f3c94e99545a29f4c3f05637f4d2713e7fd91b4dd8abfc18340b86cd5", 94 | "sha256:1a078f5dd7e99317098f0e0d490257fd0349d79363e8c923d5bb76428f318421", 95 | "sha256:1aa0b55a0eb1bd3fa82e704f44fb8f16e26702af1a073cc5030eea399e617b56", 96 | "sha256:2874060b91e131ceeff00574b7c2140749c9355817a4ed498e82a4ffa308ecbc", 97 | "sha256:379d97783ba8d2934d52221c833407f20ca287b36d949b4bba6c75274bcf6363", 98 | "sha256:3b791ddf2aefc56382aadc26ea5b352e86a2921e4e85c31c1f770f527eb06ce4", 99 | "sha256:4329008a167fac233e398e8a600d1b91539dc33c5a3eadee84c0d4b04d4494fa", 100 | "sha256:45813e0873bbb679334a161b28cb9606d9665e70561fd6caa8863e279b5e464b", 101 | "sha256:53a5b27e6b5717bdc0125338a822605084054c80f382051fb945d2c0e6899a20", 102 | "sha256:574f24b9805cb1c72d02b9f7749aa0cc0b81aa82571be5201aa1453190390ae5", 103 | "sha256:66f82819ff47fa67a11540da96966fb9245504b7f496034f534b81cacf333861", 104 | "sha256:79e5fe3ccd5144ae80777e12973027bd2f4f5e3ae8eb286cabe787bed9780138", 105 | "sha256:83410258eb886f3456714eea4d4304db3a1fc8624623fc3f38a487ab36c0f653", 106 | "sha256:8b6a7b596ce1d2a6d93c3562f1178ebd3b7bb445b3b0dd33b09f9255e312a965", 107 | "sha256:9576cb63897fbfa69df60f994082c3f4b8e6adb49cccb60efb2a80a208e6f996", 108 | "sha256:95a25d9f3449046ecbe9065be8f8380c03c56081bc5d41fe0fb964aaa30b2195", 109 | "sha256:a424f048bebc4476620e77f3e4d1f282920cef9bc376ba16d0b8fe97eec87cde", 110 | "sha256:aaec1cfd94f4f3e9a25e144d5b0ed1eb8a9596ec36d7318a504d813412563a85", 111 | "sha256:acb673eecbae089ea3be3dcf75bfe45fc8d4dcdc951e27d8691887963cf421c7", 112 | "sha256:b15bc8d2c2848a4a7c04f76c9b3dc3561e95d4dabc6b4f24bfabe5fd81a0b14f", 113 | "sha256:b1c240d565e977d80c0083404c01e4d59c5772c977fae2c483f100567f50847b", 114 | "sha256:c595693de998461bcd49b8d20568c8870b3209b8ea323b2a7b0ea86d85864694", 115 | "sha256:ce3be5d520b4d2c3e5eeb4cd2ef62b9b9ab8ac6b6fedbaa0e39cdb6f50644278", 116 | "sha256:e0f910f84b35c36a3513b96d816e6442ae138862257ae18a0019d2fc67b041dc", 117 | "sha256:ea36e19ac0a483eea239320aef0bd40702404ff8c7e42179a2d9d36c5afcb55c", 118 | "sha256:efabbcd4f406b532206b8801058c8bab9e79645b9880329253ae3322b7b02cd5", 119 | "sha256:f923406e6b32c86309261b8195e24e18b6a8801df0cfc7814ac44017bfcb3939" 120 | ], 121 | "markers": "python_version != '3.0.*' and python_version != '3.2.*' and python_version != '3.3.*' and python_version >= '2.7' and python_version != '3.1.*'", 122 | "version": "==1.0.1" 123 | }, 124 | "markdown": { 125 | "hashes": [ 126 | "sha256:9ba587db9daee7ec761cfc656272be6aabe2ed300fece21208e4aab2e457bc8f", 127 | "sha256:a856869c7ff079ad84a3e19cd87a64998350c2b94e9e08e44270faef33400f81" 128 | ], 129 | "version": "==2.6.11" 130 | }, 131 | "matplotlib": { 132 | "hashes": [ 133 | "sha256:0ba8e3ec1b0feddc6b068fe70dc38dcf2917e301ad8d2b3f848c14ad463a4157", 134 | "sha256:10a48e33e64dbd95f0776ba162f379c5cc55301c2d155506e79ce0c26b52f2ce", 135 | "sha256:1376535fe731adbba55ab9e48896de226b7e89dbb55390c5fbd8f7161b7ae3be", 136 | "sha256:16f0f8ba22df1e2c9f06c87088de45742322fde282a93b5c744c0f969cf7932e", 137 | "sha256:1c6c999f2212858021329537f8e0f98f3f29086ec3683511dd1ecec84409f51d", 138 | "sha256:2316dc177fc7b3d8848b49365498de0c385b4c9bba511edddd24c34fbe3d37a4", 139 | "sha256:3398bfb533482bf21974cecf28224dd23784ad4e4848be582903f7a2436ec12e", 140 | "sha256:3477cb1e1061b34210acc43d20050be8444478ff50b8adfac5fe2b45fc97df01", 141 | "sha256:3cc06333b8264428d02231804e2e726b902e9161dc16f573183dee6cb7ef621f", 142 | "sha256:4259ea7cb2c238355ee13275eddd261d869cefbdeb18a65f35459589d6d17def", 143 | "sha256:4addcf93234b6122f530f90f485fd3d00d158911fbc1ed24db3fa66cd49fe565", 144 | "sha256:50c0e24bcbce9c54346f4a2f4e97b0ed111f0413ac3fe9954061ae1c8aa7021f", 145 | "sha256:62ed7597d9e54db6e133420d779c642503c25eba390e1178d85dfb2ba0d05948", 146 | "sha256:69f6d51e41a17f6a5f70c56bb10b8ded9f299609204495a7fa2782a3a755ffc5", 147 | "sha256:6d232e49b74e3d2db22c63c25a9a0166d965e87e2b057f795487f1f244b61d9d", 148 | "sha256:7355bf757ecacd5f0ac9dd9523c8e1a1103faadf8d33c22664178e17533f8ce5", 149 | "sha256:886b1045c5105631f10c1cbc999f910e44d33af3e9c7efd68c2123efc06ab636", 150 | "sha256:9e1f353edd7fc7e5e9101abd5bc0201946f77a1b59e0da49095086c03db856ed", 151 | "sha256:b3a343dfcbe296dbe0f26c731beee72a792ff948407e6979524298ae7bc3234e", 152 | "sha256:d93675af09ca497a25f4f8d62f3313cf0f21e45427a87487049fe84898b99909", 153 | "sha256:e2409ef9d37804dfb566f39c962e6ed70f281ff516b8131b3e6b4e6442711ff1", 154 | "sha256:f8b653b0f89938ba72e92ab080c2f3aa24c1b72e2f61add22880cd1b9a6e3cdd" 155 | ], 156 | "index": "pypi", 157 | "version": "==2.2.3" 158 | }, 159 | "numpy": { 160 | "hashes": [ 161 | "sha256:07379fe0b450f6fd6e5934a9bc015025bb4ce1c8fbed3ca8bef29328b1bc9570", 162 | "sha256:085afac75bbc97a096744fcfc97a4b321c5a87220286811e85089ae04885acdd", 163 | "sha256:2d6481c6bdab1c75affc0fc71eb1bd4b3ecef620d06f2f60c3f00521d54be04f", 164 | "sha256:2df854df882d322d5c23087a4959e145b953dfff2abe1774fec4f639ac2f3160", 165 | "sha256:381ad13c30cd1d0b2f3da8a0c1a4aa697487e8bb0e9e0cbeb7439776bcb645f8", 166 | "sha256:385f1ce46e08676505b692bfde918c1e0b350963a15ef52d77691c2cf0f5dbf6", 167 | "sha256:4130e5ae16c656b7de654dc5e595cfeb85d3a4b0bb0734d19c0dce6dc7ee0e07", 168 | "sha256:4d278c2261be6423c5e63d8f0ceb1b0c6db3ff83f2906f4b860db6ae99ca1bb5", 169 | "sha256:51c5dcb51cf88b34b7d04c15f600b07c6ccbb73a089a38af2ab83c02862318da", 170 | "sha256:589336ba5199c8061239cf446ee2f2f1fcc0c68e8531ee1382b6fc0c66b2d388", 171 | "sha256:5ae3564cb630e155a650f4f9c054589848e97836bebae5637240a0d8099f817b", 172 | "sha256:5edf1acc827ed139086af95ce4449b7b664f57a8c29eb755411a634be280d9f2", 173 | "sha256:6b82b81c6b3b70ed40bc6d0b71222ebfcd6b6c04a6e7945a936e514b9113d5a3", 174 | "sha256:6c57f973218b776195d0356e556ec932698f3a563e2f640cfca7020086383f50", 175 | "sha256:758d1091a501fd2d75034e55e7e98bfd1370dc089160845c242db1c760d944d9", 176 | "sha256:8622db292b766719810e0cb0f62ef6141e15fe32b04e4eb2959888319e59336b", 177 | "sha256:8b8dcfcd630f1981f0f1e3846fae883376762a0c1b472baa35b145b911683b7b", 178 | "sha256:91fdd510743ae4df862dbd51a4354519dd9fb8941347526cd9c2194b792b3da9", 179 | "sha256:97fa8f1dceffab782069b291e38c4c2227f255cdac5f1e3346666931df87373e", 180 | "sha256:9b705f18b26fb551366ab6347ba9941b62272bf71c6bbcadcd8af94d10535241", 181 | "sha256:9d69967673ab7b028c2df09cae05ba56bf4e39e3cb04ebe452b6035c3b49848e", 182 | "sha256:9e1f53afae865cc32459ad211493cf9e2a3651a7295b7a38654ef3d123808996", 183 | "sha256:a4a433b3a264dbc9aa9c7c241e87c0358a503ea6394f8737df1683c7c9a102ac", 184 | "sha256:baadc5f770917ada556afb7651a68176559f4dca5f4b2d0947cd15b9fb84fb51", 185 | "sha256:c725d11990a9243e6ceffe0ab25a07c46c1cc2c5dc55e305717b5afe856c9608", 186 | "sha256:d696a8c87315a83983fc59dd27efe034292b9e8ad667aeae51a68b4be14690d9", 187 | "sha256:e1864a4e9f93ddb2dc6b62ccc2ec1f8250ff4ac0d3d7a15c8985dd4e1fbd6418", 188 | "sha256:e1d18421a7e2ad4a655b76e65d549d4159f8874c18a417464c1d439ee7ccc7cd" 189 | ], 190 | "markers": "python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.2.*' and python_version != '3.0.*'", 191 | "version": "==1.14.5" 192 | }, 193 | "pandas": { 194 | "hashes": [ 195 | "sha256:11975fad9edbdb55f1a560d96f91830e83e29bed6ad5ebf506abda09818eaf60", 196 | "sha256:12e13d127ca1b585dd6f6840d3fe3fa6e46c36a6afe2dbc5cb0b57032c902e31", 197 | "sha256:1c87fcb201e1e06f66e23a61a5fea9eeebfe7204a66d99df24600e3f05168051", 198 | "sha256:242e9900de758e137304ad4b5663c2eff0d798c2c3b891250bd0bd97144579da", 199 | "sha256:26c903d0ae1542890cb9abadb4adcb18f356b14c2df46e4ff657ae640e3ac9e7", 200 | "sha256:2e1e88f9d3e5f107b65b59cd29f141995597b035d17cc5537e58142038942e1a", 201 | "sha256:31b7a48b344c14691a8e92765d4023f88902ba3e96e2e4d0364d3453cdfd50db", 202 | "sha256:4fd07a932b4352f8a8973761ab4e84f965bf81cc750fb38e04f01088ab901cb8", 203 | "sha256:5b24ca47acf69222e82530e89111dd9d14f9b970ab2cd3a1c2c78f0c4fbba4f4", 204 | "sha256:647b3b916cc8f6aeba240c8171be3ab799c3c1b2ea179a3be0bd2712c4237553", 205 | "sha256:66b060946046ca27c0e03e9bec9bba3e0b918bafff84c425ca2cc2e157ce121e", 206 | "sha256:6efa9fa6e1434141df8872d0fa4226fc301b17aacf37429193f9d70b426ea28f", 207 | "sha256:be4715c9d8367e51dbe6bc6d05e205b1ae234f0dc5465931014aa1c4af44c1ba", 208 | "sha256:bea90da782d8e945fccfc958585210d23de374fa9294a9481ed2abcef637ebfc", 209 | "sha256:d318d77ab96f66a59e792a481e2701fba879e1a453aefeebdb17444fe204d1ed", 210 | "sha256:d785fc08d6f4207437e900ffead930a61e634c5e4f980ba6d3dc03c9581748c7", 211 | "sha256:de9559287c4fe8da56e8c3878d2374abc19d1ba2b807bfa7553e912a8e5ba87c", 212 | "sha256:f4f98b190bb918ac0bc0e3dd2ab74ff3573da9f43106f6dba6385406912ec00f", 213 | "sha256:f71f1a7e2d03758f6e957896ed696254e2bc83110ddbc6942018f1a232dd9dad", 214 | "sha256:fb944c8f0b0ab5c1f7846c686bc4cdf8cde7224655c12edcd59d5212cd57bec0" 215 | ], 216 | "index": "pypi", 217 | "version": "==0.23.4" 218 | }, 219 | "protobuf": { 220 | "hashes": [ 221 | "sha256:10394a4d03af7060fa8a6e1cbf38cea44be1467053b0aea5bbfcb4b13c4b88c4", 222 | "sha256:1489b376b0f364bcc6f89519718c057eb191d7ad6f1b395ffd93d1aa45587811", 223 | "sha256:1931d8efce896981fe410c802fd66df14f9f429c32a72dd9cfeeac9815ec6444", 224 | "sha256:196d3a80f93c537f27d2a19a4fafb826fb4c331b0b99110f985119391d170f96", 225 | "sha256:46e34fdcc2b1f2620172d3a4885128705a4e658b9b62355ae5e98f9ea19f42c2", 226 | "sha256:59cd75ded98094d3cf2d79e84cdb38a46e33e7441b2826f3838dcc7c07f82995", 227 | "sha256:5ee0522eed6680bb5bac5b6d738f7b0923b3cafce8c4b1a039a6107f0841d7ed", 228 | "sha256:65917cfd5da9dfc993d5684643063318a2e875f798047911a9dd71ca066641c9", 229 | "sha256:685bc4ec61a50f7360c9fd18e277b65db90105adbf9c79938bd315435e526b90", 230 | "sha256:92e8418976e52201364a3174e40dc31f5fd8c147186d72380cbda54e0464ee19", 231 | "sha256:9335f79d1940dfb9bcaf8ec881fb8ab47d7a2c721fb8b02949aab8bbf8b68625", 232 | "sha256:a7ee3bb6de78185e5411487bef8bc1c59ebd97e47713cba3c460ef44e99b3db9", 233 | "sha256:ceec283da2323e2431c49de58f80e1718986b79be59c266bb0509cbf90ca5b9e", 234 | "sha256:fcfc907746ec22716f05ea96b7f41597dfe1a1c088f861efb8a0d4f4196a6f10" 235 | ], 236 | "version": "==3.6.1" 237 | }, 238 | "pyparsing": { 239 | "hashes": [ 240 | "sha256:0832bcf47acd283788593e7a0f542407bd9550a55a8a8435214a1960e04bcb04", 241 | "sha256:fee43f17a9c4087e7ed1605bd6df994c6173c1e977d7ade7b651292fab2bd010" 242 | ], 243 | "version": "==2.2.0" 244 | }, 245 | "python-dateutil": { 246 | "hashes": [ 247 | "sha256:1adb80e7a782c12e52ef9a8182bebeb73f1d7e24e374397af06fb4956c8dc5c0", 248 | "sha256:e27001de32f627c22380a688bcc43ce83504a7bc5da472209b4c70f02829f0b8" 249 | ], 250 | "version": "==2.7.3" 251 | }, 252 | "pytz": { 253 | "hashes": [ 254 | "sha256:a061aa0a9e06881eb8b3b2b43f05b9439d6583c206d0a6c340ff72a7b6669053", 255 | "sha256:ffb9ef1de172603304d9d2819af6f5ece76f2e85ec10692a524dd876e72bf277" 256 | ], 257 | "version": "==2018.5" 258 | }, 259 | "pyyaml": { 260 | "hashes": [ 261 | "sha256:3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b", 262 | "sha256:3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf", 263 | "sha256:40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a", 264 | "sha256:558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3", 265 | "sha256:a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1", 266 | "sha256:aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1", 267 | "sha256:bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613", 268 | "sha256:d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04", 269 | "sha256:d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f", 270 | "sha256:e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537", 271 | "sha256:e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531" 272 | ], 273 | "index": "pypi", 274 | "version": "==3.13" 275 | }, 276 | "six": { 277 | "hashes": [ 278 | "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", 279 | "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" 280 | ], 281 | "version": "==1.11.0" 282 | }, 283 | "tensorboard": { 284 | "hashes": [ 285 | "sha256:64edbe66864e02719f85708ae01efe3448af964c042a502fd2046cc87a3b1f12", 286 | "sha256:e4ea6ac2e47bf715b915f08a186e6205fa097318bd73f0b265d437b1d7834484" 287 | ], 288 | "markers": "python_version != '3.0.*' and python_version != '3.3.*' and python_version >= '2.7' and python_version != '3.1.*' and python_version != '3.2.*'", 289 | "version": "==1.10.0" 290 | }, 291 | "tensorflow": { 292 | "hashes": [ 293 | "sha256:0b00e62c7f13a9935413ef71aa4c251699a0a2715955ee25f6f174bedd201b27", 294 | "sha256:2aa07a64ea7ec238bc734c82f0bb11562c58bdcd61e976ff2076275bfbc9ab42", 295 | "sha256:452b9547dd69c2e264263f651f11e5aca60dd911cddbc6cf34a01137fd39505a", 296 | "sha256:4d95038155cf3c95d3d92aaf494442aaa8f787a87f82d889d8305579b554ab45", 297 | "sha256:5bb2d1ff4321dda724885be8167ace3bf716708c8aff21bd622e047801915eb2", 298 | "sha256:6f9aa4282a8890d79b83f2f55a2429675f2d0881964248a8d8838e761773e170", 299 | "sha256:7344ea4207f25dccfd8fdd6da2283930190a34a90d301636b7281785c148682c", 300 | "sha256:7c99f379fcc2fc8cf776471006947115996bbd985d6ee41aa26058f58bf4273e", 301 | "sha256:814d717b68b83476a2bd3b5fba26949786d224770dfc7462d820e390409e5a6b", 302 | "sha256:92b166baf82b1c5962d2661e9d5d01ebcd83d528d0295e48bf9896e5f2875d73", 303 | "sha256:9eb5d117089b5cfa33c6dd32fd05574662a8167de53b686708b683e4af9bc687", 304 | "sha256:a0ad5e1e9ccb230fbcbde78cd187b74526db3145899f5c639f453e246c0b80c0" 305 | ], 306 | "index": "pypi", 307 | "version": "==1.10.0" 308 | }, 309 | "termcolor": { 310 | "hashes": [ 311 | "sha256:1d6d69ce66211143803fbc56652b41d73b4a400a2891d7bf7a1cdf4c02de613b" 312 | ], 313 | "version": "==1.1.0" 314 | }, 315 | "tqdm": { 316 | "hashes": [ 317 | "sha256:18f1818ce951aeb9ea162ae1098b43f583f7d057b34d706f66939353d1208889", 318 | "sha256:df02c0650160986bac0218bb07952245fc6960d23654648b5d5526ad5a4128c9" 319 | ], 320 | "index": "pypi", 321 | "version": "==4.26.0" 322 | }, 323 | "werkzeug": { 324 | "hashes": [ 325 | "sha256:c3fd7a7d41976d9f44db327260e263132466836cef6f91512889ed60ad26557c", 326 | "sha256:d5da73735293558eb1651ee2fddc4d0dedcfa06538b8813a2e20011583c9e49b" 327 | ], 328 | "version": "==0.14.1" 329 | }, 330 | "wheel": { 331 | "hashes": [ 332 | "sha256:0a2e54558a0628f2145d2fc822137e322412115173e8a2ddbe1c9024338ae83c", 333 | "sha256:80044e51ec5bbf6c894ba0bc48d26a8c20a9ba629f4ca19ea26ecfcf87685f5f" 334 | ], 335 | "markers": "python_version != '3.1.*' and python_version >= '2.7' and python_version != '3.2.*' and python_version != '3.0.*' and python_version != '3.3.*'", 336 | "version": "==0.31.1" 337 | } 338 | }, 339 | "develop": { 340 | "astroid": { 341 | "hashes": [ 342 | "sha256:292fa429e69d60e4161e7612cb7cc8fa3609e2e309f80c224d93a76d5e7b58be", 343 | "sha256:c7013d119ec95eb626f7a2011f0b63d0c9a095df9ad06d8507b37084eada1a8d" 344 | ], 345 | "version": "==2.0.4" 346 | }, 347 | "atomicwrites": { 348 | "hashes": [ 349 | "sha256:0312ad34fcad8fac3704d441f7b317e50af620823353ec657a53e981f92920c0", 350 | "sha256:ec9ae8adaae229e4f8446952d204a3e4b5fdd2d099f9be3aaf556120135fb3ee" 351 | ], 352 | "markers": "python_version != '3.0.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.2.*'", 353 | "version": "==1.2.1" 354 | }, 355 | "attrs": { 356 | "hashes": [ 357 | "sha256:10cbf6e27dbce8c30807caf056c8eb50917e0eaafe86347671b57254006c3e69", 358 | "sha256:ca4be454458f9dec299268d472aaa5a11f67a4ff70093396e1ceae9c76cf4bbb" 359 | ], 360 | "version": "==18.2.0" 361 | }, 362 | "autopep8": { 363 | "hashes": [ 364 | "sha256:655e3ee8b4545be6cfed18985f581ee9ecc74a232550ee46e9797b6fbf4f336d" 365 | ], 366 | "index": "pypi", 367 | "version": "==1.4" 368 | }, 369 | "backcall": { 370 | "hashes": [ 371 | "sha256:38ecd85be2c1e78f77fd91700c76e14667dc21e2713b63876c0eb901196e01e4", 372 | "sha256:bbbf4b1e5cd2bdb08f915895b51081c041bac22394fdfcfdfbe9f14b77c08bf2" 373 | ], 374 | "version": "==0.1.0" 375 | }, 376 | "colorama": { 377 | "hashes": [ 378 | "sha256:463f8483208e921368c9f306094eb6f725c6ca42b0f97e313cb5d5512459feda", 379 | "sha256:48eb22f4f8461b1df5734a074b57042430fb06e1d61bd1e11b078c0fe6d7a1f1" 380 | ], 381 | "markers": "sys_platform == 'win32'", 382 | "version": "==0.3.9" 383 | }, 384 | "decorator": { 385 | "hashes": [ 386 | "sha256:2c51dff8ef3c447388fe5e4453d24a2bf128d3a4c32af3fabef1f01c6851ab82", 387 | "sha256:c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c" 388 | ], 389 | "version": "==4.3.0" 390 | }, 391 | "flake8": { 392 | "hashes": [ 393 | "sha256:7253265f7abd8b313e3892944044a365e3f4ac3fcdcfb4298f55ee9ddf188ba0", 394 | "sha256:c7841163e2b576d435799169b78703ad6ac1bbb0f199994fc05f700b2a90ea37" 395 | ], 396 | "index": "pypi", 397 | "version": "==3.5.0" 398 | }, 399 | "ipykernel": { 400 | "hashes": [ 401 | "sha256:00d88b7e628e4e893359119b894451611214bce09776a3bf8248fe42cb48ada6", 402 | "sha256:a706b975376efef98b70e10cd167ab9506cf08a689d689a3c7daf344c15040f6", 403 | "sha256:c5a498c70f7765c34f3397cf943b069057f5bef4e0218e4cfbb733e9f38fa5fa" 404 | ], 405 | "index": "pypi", 406 | "version": "==4.9.0" 407 | }, 408 | "ipython": { 409 | "hashes": [ 410 | "sha256:007dcd929c14631f83daff35df0147ea51d1af420da303fd078343878bd5fb62", 411 | "sha256:b0f2ef9eada4a68ef63ee10b6dde4f35c840035c50fd24265f8052c98947d5a4" 412 | ], 413 | "version": "==6.5.0" 414 | }, 415 | "ipython-genutils": { 416 | "hashes": [ 417 | "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8", 418 | "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8" 419 | ], 420 | "version": "==0.2.0" 421 | }, 422 | "isort": { 423 | "hashes": [ 424 | "sha256:1153601da39a25b14ddc54955dbbacbb6b2d19135386699e2ad58517953b34af", 425 | "sha256:b9c40e9750f3d77e6e4d441d8b0266cf555e7cdabdcff33c4fd06366ca761ef8", 426 | "sha256:ec9ef8f4a9bc6f71eec99e1806bfa2de401650d996c59330782b89a5555c1497" 427 | ], 428 | "index": "pypi", 429 | "version": "==4.3.4" 430 | }, 431 | "jedi": { 432 | "hashes": [ 433 | "sha256:b409ed0f6913a701ed474a614a3bb46e6953639033e31f769ca7581da5bd1ec1", 434 | "sha256:c254b135fb39ad76e78d4d8f92765ebc9bf92cbc76f49e97ade1d5f5121e1f6f" 435 | ], 436 | "version": "==0.12.1" 437 | }, 438 | "jupyter-client": { 439 | "hashes": [ 440 | "sha256:27befcf0446b01e29853014d6a902dd101ad7d7f94e2252b1adca17c3466b761", 441 | "sha256:59e6d791e22a8002ad0e80b78c6fd6deecab4f9e1b1aa1a22f4213de271b29ea" 442 | ], 443 | "version": "==5.2.3" 444 | }, 445 | "jupyter-core": { 446 | "hashes": [ 447 | "sha256:927d713ffa616ea11972534411544589976b2493fc7e09ad946e010aa7eb9970", 448 | "sha256:ba70754aa680300306c699790128f6fbd8c306ee5927976cbe48adacf240c0b7" 449 | ], 450 | "version": "==4.4.0" 451 | }, 452 | "lazy-object-proxy": { 453 | "hashes": [ 454 | "sha256:0ce34342b419bd8f018e6666bfef729aec3edf62345a53b537a4dcc115746a33", 455 | "sha256:1b668120716eb7ee21d8a38815e5eb3bb8211117d9a90b0f8e21722c0758cc39", 456 | "sha256:209615b0fe4624d79e50220ce3310ca1a9445fd8e6d3572a896e7f9146bbf019", 457 | "sha256:27bf62cb2b1a2068d443ff7097ee33393f8483b570b475db8ebf7e1cba64f088", 458 | "sha256:27ea6fd1c02dcc78172a82fc37fcc0992a94e4cecf53cb6d73f11749825bd98b", 459 | "sha256:2c1b21b44ac9beb0fc848d3993924147ba45c4ebc24be19825e57aabbe74a99e", 460 | "sha256:2df72ab12046a3496a92476020a1a0abf78b2a7db9ff4dc2036b8dd980203ae6", 461 | "sha256:320ffd3de9699d3892048baee45ebfbbf9388a7d65d832d7e580243ade426d2b", 462 | "sha256:50e3b9a464d5d08cc5227413db0d1c4707b6172e4d4d915c1c70e4de0bbff1f5", 463 | "sha256:5276db7ff62bb7b52f77f1f51ed58850e315154249aceb42e7f4c611f0f847ff", 464 | "sha256:61a6cf00dcb1a7f0c773ed4acc509cb636af2d6337a08f362413c76b2b47a8dd", 465 | "sha256:6ae6c4cb59f199d8827c5a07546b2ab7e85d262acaccaacd49b62f53f7c456f7", 466 | "sha256:7661d401d60d8bf15bb5da39e4dd72f5d764c5aff5a86ef52a042506e3e970ff", 467 | "sha256:7bd527f36a605c914efca5d3d014170b2cb184723e423d26b1fb2fd9108e264d", 468 | "sha256:7cb54db3535c8686ea12e9535eb087d32421184eacc6939ef15ef50f83a5e7e2", 469 | "sha256:7f3a2d740291f7f2c111d86a1c4851b70fb000a6c8883a59660d95ad57b9df35", 470 | "sha256:81304b7d8e9c824d058087dcb89144842c8e0dea6d281c031f59f0acf66963d4", 471 | "sha256:933947e8b4fbe617a51528b09851685138b49d511af0b6c0da2539115d6d4514", 472 | "sha256:94223d7f060301b3a8c09c9b3bc3294b56b2188e7d8179c762a1cda72c979252", 473 | "sha256:ab3ca49afcb47058393b0122428358d2fbe0408cf99f1b58b295cfeb4ed39109", 474 | "sha256:bd6292f565ca46dee4e737ebcc20742e3b5be2b01556dafe169f6c65d088875f", 475 | "sha256:cb924aa3e4a3fb644d0c463cad5bc2572649a6a3f68a7f8e4fbe44aaa6d77e4c", 476 | "sha256:d0fc7a286feac9077ec52a927fc9fe8fe2fabab95426722be4c953c9a8bede92", 477 | "sha256:ddc34786490a6e4ec0a855d401034cbd1242ef186c20d79d2166d6a4bd449577", 478 | "sha256:e34b155e36fa9da7e1b7c738ed7767fc9491a62ec6af70fe9da4a057759edc2d", 479 | "sha256:e5b9e8f6bda48460b7b143c3821b21b452cb3a835e6bbd5dd33aa0c8d3f5137d", 480 | "sha256:e81ebf6c5ee9684be8f2c87563880f93eedd56dd2b6146d8a725b50b7e5adb0f", 481 | "sha256:eb91be369f945f10d3a49f5f9be8b3d0b93a4c2be8f8a5b83b0571b8123e0a7a", 482 | "sha256:f460d1ceb0e4a5dcb2a652db0904224f367c9b3c1470d5a7683c0480e582468b" 483 | ], 484 | "version": "==1.3.1" 485 | }, 486 | "mccabe": { 487 | "hashes": [ 488 | "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", 489 | "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" 490 | ], 491 | "version": "==0.6.1" 492 | }, 493 | "more-itertools": { 494 | "hashes": [ 495 | "sha256:c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092", 496 | "sha256:c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e", 497 | "sha256:fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d" 498 | ], 499 | "version": "==4.3.0" 500 | }, 501 | "parso": { 502 | "hashes": [ 503 | "sha256:35704a43a3c113cce4de228ddb39aab374b8004f4f2407d070b6a2ca784ce8a2", 504 | "sha256:895c63e93b94ac1e1690f5fdd40b65f07c8171e3e53cbd7793b5b96c0e0a7f24" 505 | ], 506 | "version": "==0.3.1" 507 | }, 508 | "pickleshare": { 509 | "hashes": [ 510 | "sha256:84a9257227dfdd6fe1b4be1319096c20eb85ff1e82c7932f36efccfe1b09737b", 511 | "sha256:c9a2541f25aeabc070f12f452e1f2a8eae2abd51e1cd19e8430402bdf4c1d8b5" 512 | ], 513 | "version": "==0.7.4" 514 | }, 515 | "pluggy": { 516 | "hashes": [ 517 | "sha256:6e3836e39f4d36ae72840833db137f7b7d35105079aee6ec4a62d9f80d594dd1", 518 | "sha256:95eb8364a4708392bae89035f45341871286a333f749c3141c20573d2b3876e1" 519 | ], 520 | "markers": "python_version != '3.0.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.2.*'", 521 | "version": "==0.7.1" 522 | }, 523 | "prompt-toolkit": { 524 | "hashes": [ 525 | "sha256:1df952620eccb399c53ebb359cc7d9a8d3a9538cb34c5a1344bdbeb29fbcc381", 526 | "sha256:3f473ae040ddaa52b52f97f6b4a493cfa9f5920c255a12dc56a7d34397a398a4", 527 | "sha256:858588f1983ca497f1cf4ffde01d978a3ea02b01c8a26a8bbc5cd2e66d816917" 528 | ], 529 | "version": "==1.0.15" 530 | }, 531 | "py": { 532 | "hashes": [ 533 | "sha256:06a30435d058473046be836d3fc4f27167fd84c45b99704f2fb5509ef61f9af1", 534 | "sha256:50402e9d1c9005d759426988a492e0edaadb7f4e68bcddfea586bc7432d009c6" 535 | ], 536 | "markers": "python_version != '3.0.*' and python_version >= '2.7' and python_version != '3.3.*' and python_version != '3.1.*' and python_version != '3.2.*'", 537 | "version": "==1.6.0" 538 | }, 539 | "pycodestyle": { 540 | "hashes": [ 541 | "sha256:682256a5b318149ca0d2a9185d365d8864a768a28db66a84a2ea946bcc426766", 542 | "sha256:6c4245ade1edfad79c3446fadfc96b0de2759662dc29d07d80a6f27ad1ca6ba9" 543 | ], 544 | "version": "==2.3.1" 545 | }, 546 | "pyflakes": { 547 | "hashes": [ 548 | "sha256:08bd6a50edf8cffa9fa09a463063c425ecaaf10d1eb0335a7e8b1401aef89e6f", 549 | "sha256:8d616a382f243dbf19b54743f280b80198be0bca3a5396f1d2e1fca6223e8805" 550 | ], 551 | "version": "==1.6.0" 552 | }, 553 | "pygments": { 554 | "hashes": [ 555 | "sha256:78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d", 556 | "sha256:dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc" 557 | ], 558 | "version": "==2.2.0" 559 | }, 560 | "pylint": { 561 | "hashes": [ 562 | "sha256:1d6d3622c94b4887115fe5204982eee66fdd8a951cf98635ee5caee6ec98c3ec", 563 | "sha256:31142f764d2a7cd41df5196f9933b12b7ee55e73ef12204b648ad7e556c119fb" 564 | ], 565 | "index": "pypi", 566 | "version": "==2.1.1" 567 | }, 568 | "pytest": { 569 | "hashes": [ 570 | "sha256:453cbbbe5ce6db38717d282b758b917de84802af4288910c12442984bde7b823", 571 | "sha256:a8a07f84e680482eb51e244370aaf2caa6301ef265f37c2bdefb3dd3b663f99d" 572 | ], 573 | "index": "pypi", 574 | "version": "==3.8.0" 575 | }, 576 | "python-dateutil": { 577 | "hashes": [ 578 | "sha256:1adb80e7a782c12e52ef9a8182bebeb73f1d7e24e374397af06fb4956c8dc5c0", 579 | "sha256:e27001de32f627c22380a688bcc43ce83504a7bc5da472209b4c70f02829f0b8" 580 | ], 581 | "version": "==2.7.3" 582 | }, 583 | "pyzmq": { 584 | "hashes": [ 585 | "sha256:25a0715c8f69cf72f67cfe5a68a3f3ed391c67c063d2257bec0fe7fc2c7f08f8", 586 | "sha256:2bab63759632c6b9e0d5bf19cc63c3b01df267d660e0abcf230cf0afaa966349", 587 | "sha256:30ab49d99b24bf0908ebe1cdfa421720bfab6f93174e4883075b7ff38cc555ba", 588 | "sha256:32c7ca9fc547a91e3c26fc6080b6982e46e79819e706eb414dd78f635a65d946", 589 | "sha256:41219ae72b3cc86d97557fe5b1ef5d1adc1057292ec597b50050874a970a39cf", 590 | "sha256:4b8c48a9a13cea8f1f16622f9bd46127108af14cd26150461e3eab71e0de3e46", 591 | "sha256:55724997b4a929c0d01b43c95051318e26ddbae23565018e138ae2dc60187e59", 592 | "sha256:65f0a4afae59d4fc0aad54a917ab599162613a761b760ba167d66cc646ac3786", 593 | "sha256:6f88591a8b246f5c285ee6ce5c1bf4f6bd8464b7f090b1333a446b6240a68d40", 594 | "sha256:75022a4c60dcd8765bb9ca32f6de75a0ec83b0d96e0309dc479f4c7b21f26cb7", 595 | "sha256:76ea493bfab18dcb090d825f3662b5612e2def73dffc196d51a5194b0294a81d", 596 | "sha256:7b60c045b80709e4e3c085bab9b691e71761b44c2b42dbb047b8b498e7bc16b3", 597 | "sha256:8e6af2f736734aef8ed6f278f9f552ec7f37b1a6b98e59b887484a840757f67d", 598 | "sha256:9ac2298e486524331e26390eac14e4627effd3f8e001d4266ed9d8f1d2d31cce", 599 | "sha256:9ba650f493a9bc1f24feca1d90fce0e5dd41088a252ac9840131dfbdbf3815ca", 600 | "sha256:a02a4a385e394e46012dc83d2e8fd6523f039bb52997c1c34a2e0dd49ed839c1", 601 | "sha256:a3ceee84114d9f5711fa0f4db9c652af0e4636c89eabc9b7f03a3882569dd1ed", 602 | "sha256:a72b82ac1910f2cf61a49139f4974f994984475f771b0faa730839607eeedddf", 603 | "sha256:ab136ac51027e7c484c53138a0fab4a8a51e80d05162eb7b1585583bcfdbad27", 604 | "sha256:c095b224300bcac61e6c445e27f9046981b1ac20d891b2f1714da89d34c637c8", 605 | "sha256:c5cc52d16c06dc2521340d69adda78a8e1031705924e103c0eb8fc8af861d810", 606 | "sha256:d612e9833a89e8177f8c1dc68d7b4ff98d3186cd331acd616b01bbdab67d3a7b", 607 | "sha256:e828376a23c66c6fe90dcea24b4b72cd774f555a6ee94081670872918df87a19", 608 | "sha256:e9767c7ab2eb552796440168d5c6e23a99ecaade08dda16266d43ad461730192", 609 | "sha256:ebf8b800d42d217e4710d1582b0c8bff20cdcb4faad7c7213e52644034300924" 610 | ], 611 | "markers": "python_version != '3.0*' and python_version >= '2.7' and python_version != '3.2*' and python_version != '3.1*'", 612 | "version": "==17.1.2" 613 | }, 614 | "rope": { 615 | "hashes": [ 616 | "sha256:a108c445e1cd897fe19272ab7877d172e7faf3d4148c80e7d20faba42ea8f7b2" 617 | ], 618 | "index": "pypi", 619 | "version": "==0.11.0" 620 | }, 621 | "simplegeneric": { 622 | "hashes": [ 623 | "sha256:dc972e06094b9af5b855b3df4a646395e43d1c9d0d39ed345b7393560d0b9173" 624 | ], 625 | "version": "==0.8.1" 626 | }, 627 | "six": { 628 | "hashes": [ 629 | "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", 630 | "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" 631 | ], 632 | "version": "==1.11.0" 633 | }, 634 | "tornado": { 635 | "hashes": [ 636 | "sha256:1c0816fc32b7d31b98781bd8ebc7a9726d7dce67407dc353a2e66e697e138448", 637 | "sha256:4f66a2172cb947387193ca4c2c3e19131f1c70fa8be470ddbbd9317fd0801582", 638 | "sha256:5327ba1a6c694e0149e7d9126426b3704b1d9d520852a3e4aa9fc8fe989e4046", 639 | "sha256:6a7e8657618268bb007646b9eae7661d0b57f13efc94faa33cd2588eae5912c9", 640 | "sha256:a9b14804783a1d77c0bd6c66f7a9b1196cbddfbdf8bceb64683c5ae60bd1ec6f", 641 | "sha256:c58757e37c4a3172949c99099d4d5106e4d7b63aa0617f9bb24bfbff712c7866", 642 | "sha256:d8984742ce86c0855cccecd5c6f54a9f7532c983947cff06f3a0e2115b47f85c" 643 | ], 644 | "markers": "python_version != '3.0.*' and python_version != '3.3.*' and python_version != '3.2.*' and python_version >= '2.7' and python_version != '3.1.*'", 645 | "version": "==5.1" 646 | }, 647 | "traitlets": { 648 | "hashes": [ 649 | "sha256:9c4bd2d267b7153df9152698efb1050a5d84982d3384a37b2c1f7723ba3e7835", 650 | "sha256:c6cb5e6f57c5a9bdaa40fa71ce7b4af30298fbab9ece9815b5d995ab6217c7d9" 651 | ], 652 | "version": "==4.3.2" 653 | }, 654 | "typed-ast": { 655 | "hashes": [ 656 | "sha256:0948004fa228ae071054f5208840a1e88747a357ec1101c17217bfe99b299d58", 657 | "sha256:10703d3cec8dcd9eef5a630a04056bbc898abc19bac5691612acba7d1325b66d", 658 | "sha256:1f6c4bd0bdc0f14246fd41262df7dfc018d65bb05f6e16390b7ea26ca454a291", 659 | "sha256:25d8feefe27eb0303b73545416b13d108c6067b846b543738a25ff304824ed9a", 660 | "sha256:29464a177d56e4e055b5f7b629935af7f49c196be47528cc94e0a7bf83fbc2b9", 661 | "sha256:2e214b72168ea0275efd6c884b114ab42e316de3ffa125b267e732ed2abda892", 662 | "sha256:3e0d5e48e3a23e9a4d1a9f698e32a542a4a288c871d33ed8df1b092a40f3a0f9", 663 | "sha256:519425deca5c2b2bdac49f77b2c5625781abbaf9a809d727d3a5596b30bb4ded", 664 | "sha256:57fe287f0cdd9ceaf69e7b71a2e94a24b5d268b35df251a88fef5cc241bf73aa", 665 | "sha256:668d0cec391d9aed1c6a388b0d5b97cd22e6073eaa5fbaa6d2946603b4871efe", 666 | "sha256:68ba70684990f59497680ff90d18e756a47bf4863c604098f10de9716b2c0bdd", 667 | "sha256:6de012d2b166fe7a4cdf505eee3aaa12192f7ba365beeefaca4ec10e31241a85", 668 | "sha256:79b91ebe5a28d349b6d0d323023350133e927b4de5b651a8aa2db69c761420c6", 669 | "sha256:8550177fa5d4c1f09b5e5f524411c44633c80ec69b24e0e98906dd761941ca46", 670 | "sha256:898f818399cafcdb93cbbe15fc83a33d05f18e29fb498ddc09b0214cdfc7cd51", 671 | "sha256:94b091dc0f19291adcb279a108f5d38de2430411068b219f41b343c03b28fb1f", 672 | "sha256:a26863198902cda15ab4503991e8cf1ca874219e0118cbf07c126bce7c4db129", 673 | "sha256:a8034021801bc0440f2e027c354b4eafd95891b573e12ff0418dec385c76785c", 674 | "sha256:bc978ac17468fe868ee589c795d06777f75496b1ed576d308002c8a5756fb9ea", 675 | "sha256:c05b41bc1deade9f90ddc5d988fe506208019ebba9f2578c622516fd201f5863", 676 | "sha256:c9b060bd1e5a26ab6e8267fd46fc9e02b54eb15fffb16d112d4c7b1c12987559", 677 | "sha256:edb04bdd45bfd76c8292c4d9654568efaedf76fe78eb246dde69bdb13b2dad87", 678 | "sha256:f19f2a4f547505fe9072e15f6f4ae714af51b5a681a97f187971f50c283193b6" 679 | ], 680 | "markers": "python_version < '3.7' and implementation_name == 'cpython'", 681 | "version": "==1.1.0" 682 | }, 683 | "wcwidth": { 684 | "hashes": [ 685 | "sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e", 686 | "sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c" 687 | ], 688 | "version": "==0.1.7" 689 | }, 690 | "wrapt": { 691 | "hashes": [ 692 | "sha256:d4d560d479f2c21e1b5443bbd15fe7ec4b37fe7e53d335d3b9b0a7b1226fe3c6" 693 | ], 694 | "version": "==1.10.11" 695 | } 696 | } 697 | } 698 | --------------------------------------------------------------------------------