├── LICENSE ├── README.md ├── deepclassifier ├── __init__.py ├── layers │ ├── __init__.py │ └── lstm.py ├── models │ ├── __init__.py │ ├── bertdpcnn.py │ ├── berthan.py │ ├── bertrcnn.py │ ├── berttextcnn.py │ ├── dpcnn.py │ ├── han.py │ ├── rcnn.py │ └── textcnn.py └── trainers │ ├── __init__.py │ └── trainer.py ├── docs ├── Makefile ├── build │ ├── doctrees │ │ ├── environment.pickle │ │ └── index.doctree │ └── html │ │ ├── .buildinfo │ │ ├── _sources │ │ └── index.rst.txt │ │ ├── _static │ │ ├── alabaster.css │ │ ├── basic.css │ │ ├── css │ │ │ ├── badge_only.css │ │ │ ├── fonts │ │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ │ ├── fontawesome-webfont.eot │ │ │ │ ├── fontawesome-webfont.svg │ │ │ │ ├── fontawesome-webfont.ttf │ │ │ │ ├── fontawesome-webfont.woff │ │ │ │ ├── fontawesome-webfont.woff2 │ │ │ │ ├── lato-bold-italic.woff │ │ │ │ ├── lato-bold-italic.woff2 │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-normal-italic.woff │ │ │ │ ├── lato-normal-italic.woff2 │ │ │ │ ├── lato-normal.woff │ │ │ │ └── lato-normal.woff2 │ │ │ └── theme.css │ │ ├── custom.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── file.png │ │ ├── fonts │ │ │ ├── FontAwesome.otf │ │ │ ├── Lato │ │ │ │ ├── lato-bold.eot │ │ │ │ ├── lato-bold.ttf │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-bolditalic.eot │ │ │ │ ├── lato-bolditalic.ttf │ │ │ │ ├── lato-bolditalic.woff │ │ │ │ ├── lato-bolditalic.woff2 │ │ │ │ ├── lato-italic.eot │ │ │ │ ├── lato-italic.ttf │ │ │ │ ├── lato-italic.woff │ │ │ │ ├── lato-italic.woff2 │ │ │ │ ├── lato-regular.eot │ │ │ │ ├── lato-regular.ttf │ │ │ │ ├── lato-regular.woff │ │ │ │ └── lato-regular.woff2 │ │ │ ├── Roboto-Slab-Bold.woff │ │ │ ├── Roboto-Slab-Bold.woff2 │ │ │ ├── Roboto-Slab-Light.woff │ │ │ ├── Roboto-Slab-Light.woff2 │ │ │ ├── Roboto-Slab-Regular.woff │ │ │ ├── Roboto-Slab-Regular.woff2 │ │ │ ├── Roboto-Slab-Thin.woff │ │ │ ├── Roboto-Slab-Thin.woff2 │ │ │ ├── RobotoSlab │ │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ │ └── roboto-slab-v7-regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ ├── fontawesome-webfont.woff2 │ │ │ ├── lato-bold-italic.woff │ │ │ ├── lato-bold-italic.woff2 │ │ │ ├── lato-bold.woff │ │ │ ├── lato-bold.woff2 │ │ │ ├── lato-normal-italic.woff │ │ │ ├── lato-normal-italic.woff2 │ │ │ ├── lato-normal.woff │ │ │ └── lato-normal.woff2 │ │ ├── jquery-3.5.1.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── badge_only.js │ │ │ ├── html5shiv-printshiv.min.js │ │ │ ├── html5shiv.min.js │ │ │ ├── modernizr.min.js │ │ │ └── theme.js │ │ ├── language_data.js │ │ ├── minus.png │ │ ├── plus.png │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.3.1.js │ │ └── underscore.js │ │ ├── genindex.html │ │ ├── index.html │ │ ├── objects.inv │ │ ├── search.html │ │ └── searchindex.js ├── make.bat └── source │ ├── Models │ ├── 01_TextCNN.rst │ ├── 02_RCNN.rst │ ├── 03_DPCNN.rst │ ├── 04_HAN.rst │ ├── 05_BertTextCNN.rst │ ├── 06_BertRCNN.rst │ ├── 07_BertDPCNN.rst │ ├── 08_BertHAN.rst │ └── index.rst │ ├── QuickStart │ └── index.rst │ ├── conf.py │ └── index.rst ├── examples ├── README.md ├── __init__.py ├── example_berttextcnn.py ├── example_textcnn.py ├── label.png ├── len.png └── preprocessing.py ├── setup.py └── tests ├── __init__.py ├── test_bertdpcnn.py ├── test_berthan.py ├── test_bertrcnn.py ├── test_berttextcnn.py ├── test_dpcnn.py ├── test_han.py ├── test_rcnn.py └── test_textcnn.py /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # **DeepClassifier** 2 | DeepClassifier is a python package based on pytorch, which is easy-use and general for text classification task. You can install DeepClassifier by `pip install -U deepclassifier`。 3 | If you want to know more information about DeepClassifier, please see the [**documentation**](https://deepclassifier.readthedocs.io/en/latest/). So let's start!🤩 4 | > If you think DeepClassifier is good, please star and fork it to give me motivation to continue maintenance!🤩 And it's my pleasure that if Deepclassifier is helpful to you!🥰 5 | 6 | ## **Installation** 7 | Just like other Python packages, DeepClassifier also can be installed through pip.The command of installation is `pip install -U deepclassifier`. 8 | 9 | ## **Models** 10 | Here is a list of models that have been integrated into DeepClassifier. In the future, we will integrate more models into DeepClassifier. Welcome to join us!🤩 11 | 1. **TextCNN:** [Convolutional Neural Networks for Sentence Classification](https://www.aclweb.org/anthology/D14-1181.pdf) ,2014 EMNLP 12 | 2. **RCNN:** [Recurrent Convolutional Neural Networks for Text Classification](https://www.deeplearningitalia.com/wp-content/uploads/2018/03/Recurrent-Convolutional-Neural-Networks-for-Text-Classification.pdf),2015,IJCAI 13 | 3. **DPCNN:** [Deep Pyramid Convolutional Neural Networks for Text Categorization](https://ai.tencent.com/ailab/media/publications/ACL3-Brady.pdf) ,2017,ACL 14 | 4. **HAN:** [Hierarchical Attention Networks for Document Classification](https://www.aclweb.org/anthology/N16-1174.pdf), 2016,ACL 15 | 5. **BERT:** [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/pdf/1810.04805.pdf),2018, ACL 16 | 6. **BertTextCNN:** BERT+TextCNN 17 | 7. **BertRCNN:** BERT+RCNN 18 | 8. **BertDPCNN:** BERT+DPCNN 19 | 9. **BertHAN:** BERT+HAN 20 | ... 21 | 22 | ## Quick start 23 | I wiil show you that how to use DeepClassifier below.🥰 Click [**[here]**](https://github.com/codewithzichao/DeepClassifier/blob/master/examples) to display the complete code. 24 | 25 | you can define model like that(take BertTextCNN model as example):👇 26 | 27 | ```python 28 | from deepclassifier.models import BertTextCNN 29 | 30 | # parameters of model 31 | embedding_dim = 768 # if you use bert, the default is 768. 32 | dropout_rate = 0.2 33 | num_class = 2 34 | bert_path = "/Users/codewithzichao/Desktop/bert-base-uncased/" 35 | 36 | my_model = BertTextCNN(embedding_dim=embedding_dim, 37 | dropout_rate=dropout_rate, 38 | num_class=num_class, 39 | bert_path=bert_path) 40 | 41 | optimizer = optim.Adam(my_model.parameters()) 42 | loss_fn = nn.CrossEntropyLoss() 43 | ``` 44 | After defining model, you can train/test/predict model like that:👇 45 | 46 | ```python 47 | from deepclassifier.trainers import Trainer 48 | 49 | model_name = "berttextcnn" 50 | save_path = "best.ckpt" 51 | writer = SummaryWriter("logfie/1") 52 | max_norm = 0.25 53 | eval_step_interval = 20 54 | 55 | my_trainer =Trainer(model_name=model_name,model=my_model, 56 | train_loader=train_loader,dev_loader=dev_loader, 57 | test_loader=test_loader, optimizer=optimizer, 58 | loss_fn=loss_fn,save_path=save_path, epochs=1, 59 | writer=writer, max_norm=max_norm, 60 | eval_step_interval=eval_step_interval) 61 | 62 | # training 63 | my_trainer.train() 64 | # print the best F1 value on dev set 65 | print(my_trainer.best_f1) 66 | 67 | # testing 68 | p, r, f1 = my_trainer.test() 69 | print(p, r, f1) 70 | 71 | # predict 72 | pred_data = DataLoader(pred_data, batch_size=1) 73 | pred_label = my_trainer.predict(pred_data) 74 | print(pred_label) 75 | 76 | ``` 77 | 78 | ## **Contact me** 79 | If you want any questions about DeepClassifier, welcome to submit issue or pull requests! And welcome to communicate with me through 2843656167@qq.com.🥳 80 | 81 | ## Citation 82 | ```tex 83 | @misc{zichao2020deepclassifier, 84 | author = {Zichao Li}, 85 | title = {DeepClassifier: use-friendly and flexiable package of NLP based text classification models}, 86 | year = {2020}, 87 | publisher = {GitHub}, 88 | journal = {GitHub Repository}, 89 | howpublished = {\url{https://github.com/codewithzichao/DeepClassifier}}, 90 | } 91 | ``` 92 | 93 | -------------------------------------------------------------------------------- /deepclassifier/__init__.py: -------------------------------------------------------------------------------- 1 | name = "deepclassifier" 2 | __version__="0.0.6" -------------------------------------------------------------------------------- /deepclassifier/layers/__init__.py: -------------------------------------------------------------------------------- 1 | from .lstm import LSTM 2 | -------------------------------------------------------------------------------- /deepclassifier/layers/lstm.py: -------------------------------------------------------------------------------- 1 | ''' 2 | source:https://github.com/songyingxin/TextClassification/blob/master/models/LSTM.py 3 | ''' 4 | 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import torch 8 | 9 | class LSTM(nn.Module): 10 | 11 | def __init__(self, input_size, hidden_size, num_layers, bidirectional, dropout, batch_first): 12 | """ 13 | Args: 14 | input_size: x 的特征维度 15 | hidden_size: 隐层的特征维度 16 | num_layers: LSTM 层数 17 | """ 18 | super(LSTM, self).__init__() 19 | 20 | self.rnn = nn.LSTM( 21 | input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional, 22 | dropout=dropout, batch_first=batch_first) 23 | 24 | def forward(self, x, lengths): 25 | # x: [seq_len, batch_size, input_size] 26 | # lengths: [batch_size] 27 | packed_x = nn.utils.rnn.pack_padded_sequence(x, lengths) 28 | 29 | # packed_x, packed_output: PackedSequence 对象 30 | # hidden: [num_layers * bidirectional, batch_size, hidden_size] 31 | # cell: [num_layers * bidirectional, batch_size, hidden_size] 32 | packed_output, (hidden, cell) = self.rnn(packed_x) 33 | 34 | # output: [real_seq_len, batch_size, hidden_size * 2] 35 | # output_lengths: [batch_size] 36 | output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output) 37 | 38 | return hidden, output 39 | 40 | 41 | class GRU(nn.Module): 42 | 43 | def __init__(self, input_size, hidden_size, num_layers, bidirectional, dropout, batch_first): 44 | """ 45 | Args: 46 | input_size: x 的特征维度 47 | hidden_size: 隐层的特征维度 48 | num_layers: LSTM 层数 49 | """ 50 | super(GRU, self).__init__() 51 | 52 | self.rnn = nn.GRU( 53 | input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, bidirectional=bidirectional, 54 | dropout=dropout, batch_first=batch_first) 55 | 56 | self.init_params() 57 | 58 | def init_params(self): 59 | for i in range(self.rnn.num_layers): 60 | nn.init.orthogonal_(getattr(self.rnn, f'weight_hh_l{i}')) 61 | nn.init.kaiming_normal_(getattr(self.rnn, f'weight_ih_l{i}')) 62 | nn.init.constant_(getattr(self.rnn, f'bias_hh_l{i}'), val=0) 63 | nn.init.constant_(getattr(self.rnn, f'bias_ih_l{i}'), val=0) 64 | getattr(self.rnn, f'bias_hh_l{i}').chunk(4)[1].fill_(1) 65 | 66 | if self.rnn.bidirectional: 67 | nn.init.orthogonal_( 68 | getattr(self.rnn, f'weight_hh_l{i}_reverse')) 69 | nn.init.kaiming_normal_( 70 | getattr(self.rnn, f'weight_ih_l{i}_reverse')) 71 | nn.init.constant_( 72 | getattr(self.rnn, f'bias_hh_l{i}_reverse'), val=0) 73 | nn.init.constant_( 74 | getattr(self.rnn, f'bias_ih_l{i}_reverse'), val=0) 75 | getattr(self.rnn, f'bias_hh_l{i}_reverse').chunk(4)[1].fill_(1) 76 | 77 | def forward(self, x, lengths): 78 | # x: [seq_len, batch_size, input_size] 79 | # lengths: [batch_size] 80 | packed_x = nn.utils.rnn.pack_padded_sequence(x, lengths) 81 | 82 | # packed_x, packed_output: PackedSequence 对象 83 | # hidden: [num_layers * bidirectional, batch_size, hidden_size] 84 | # cell: [num_layers * bidirectional, batch_size, hidden_size] 85 | packed_output, (hidden, cell) = self.rnn(packed_x) 86 | 87 | # output: [real_seq_len, batch_size, hidden_size * 2] 88 | # output_lengths: [batch_size] 89 | output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output) 90 | 91 | return hidden, output 92 | -------------------------------------------------------------------------------- /deepclassifier/models/__init__.py: -------------------------------------------------------------------------------- 1 | from .textcnn import TextCNN 2 | from .rcnn import RCNN 3 | from .dpcnn import DPCNN 4 | from .han import HAN 5 | from .berttextcnn import BertTextCNN 6 | from .bertrcnn import BertRCNN 7 | from .bertdpcnn import BertDPCNN 8 | from .berthan import BertHAN -------------------------------------------------------------------------------- /deepclassifier/models/bertdpcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | from transformers import * 14 | import numpy as np 15 | 16 | class BertDPCNN(nn.Module): 17 | def __init__(self, 18 | embedding_dim, 19 | dropout_rate, 20 | num_class, 21 | bert_path, 22 | num_blocks=3, 23 | kernel_sizes=3, 24 | num_filters=250, 25 | requires_grads=False): 26 | ''' 27 | initialization 28 | :param embedding_dim: embedding dim 29 | :param dropout_rate: dropout rate 30 | :param num_class: the number of label 31 | :param bert_path: bert path 32 | :param num_blocks: the number of block ,default:3 33 | :param kernel_sizes: kernel size 34 | :param num_filters: the number of filter 35 | :param requires_grads: whether to update gradient of Bert in training 36 | ''' 37 | super(BertDPCNN, self).__init__() 38 | 39 | self.embedding_dim = embedding_dim 40 | self.num_blocks = num_blocks 41 | self.kernel_sizes = kernel_sizes 42 | self.num_filters = num_filters 43 | self.dropout_rate = dropout_rate 44 | self.num_class = num_class 45 | self.bert_path=bert_path 46 | self.requires_grads=requires_grads 47 | 48 | self.bert = BertModel.from_pretrained(self.bert_path) 49 | if self.requires_grads is False: 50 | for p in self.bert.parameters(): 51 | p.requires_grads=False 52 | 53 | # text region embedding 54 | self.region_embedding = nn.Conv2d(in_channels=1, out_channels=self.num_filters, 55 | stride=1, kernel_size=(self.kernel_sizes, self.embedding_dim)) 56 | 57 | # two conv 58 | self.conv2d1 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 59 | stride=2, kernel_size=(self.kernel_sizes, 1), padding=0) 60 | self.conv2d2 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 61 | stride=2, kernel_size=(self.kernel_sizes, 1), padding=0) 62 | self.padding1 = nn.ZeroPad2d((0, 0,(self.kernel_sizes-1)//2, (self.kernel_sizes-1)-((self.kernel_sizes-1)//2))) # top bottom 63 | self.padding2 = nn.ZeroPad2d((0, 0, 0, self.kernel_sizes-2)) # bottom 64 | 65 | # one block 66 | self.block_max_pool = nn.MaxPool2d(kernel_size=(self.kernel_sizes, 1), stride=2) 67 | self.conv2d3 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 68 | stride=1, kernel_size=(self.kernel_sizes, 1), padding=0) 69 | self.conv2d4 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 70 | stride=1, kernel_size=(self.kernel_sizes, 1), padding=0) 71 | 72 | # final pool and softmax 73 | 74 | self.flatten = nn.Flatten() 75 | self.dropout=nn.Dropout(p=self.dropout_rate) 76 | self.classifier = nn.Linear(in_features=self.num_filters, out_features=self.num_class) 77 | 78 | def forward(self, input_ids, attention_mask=None): 79 | ''' 80 | forard propagation 81 | :param params: input_ids:[batch_size,max_length] 82 | :param params: attention_mask:[batch_size,max_length] 83 | :return: logits:[batch_size,num_class] 84 | ''' 85 | 86 | bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask) 87 | embedding = bert_output.last_hidden_state.unsqueeze(dim=1) 88 | 89 | x = self.region_embedding(embedding) 90 | 91 | x = self.padding1(x) 92 | x = torch.relu(self.conv2d1(x)) 93 | x = self.padding1(x) 94 | x = torch.relu(self.conv2d2(x)) 95 | for i in range(self.num_blocks): 96 | x = self._block(x) 97 | 98 | x = self.flatten(x) 99 | x=self.dropout(x) 100 | outputs = self.classifier(x) 101 | 102 | return outputs 103 | 104 | def _block(self, x): 105 | 106 | x = self.padding2(x) 107 | pool_x = self.block_max_pool(x) 108 | 109 | x = self.padding1(pool_x) 110 | x = F.relu(self.conv2d3(x)) 111 | x = self.padding1(x) 112 | x = F.relu(self.conv2d4(x)) 113 | 114 | return x + pool_x 115 | -------------------------------------------------------------------------------- /deepclassifier/models/berthan.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | import numpy as np 14 | from transformers import * 15 | 16 | class BertHAN(nn.Module): 17 | def __init__(self, 18 | embedding_dim, 19 | word_hidden_size, 20 | seq_hidden_size, 21 | dropout_rate, 22 | num_class, 23 | bert_path, 24 | rnn_type="lstm", 25 | requires_grads=False): 26 | ''' 27 | initialization 28 | :param embedding_dim: embedding dim 29 | :param word_hidden_size: word hidden size 30 | :param seq_hidden_size: sequence hidden size 31 | :param dropout_rate: dropout rate 32 | :param num_class: the number of label 33 | :param bert_path: bert path 34 | :param rnn_type: rnn type. default:lstm 35 | :param requires_grads: whether to update gradient of Bert in training 36 | ''' 37 | super(BertHAN,self).__init__() 38 | self.embedding_dim = embedding_dim 39 | self.word_hidden_size = word_hidden_size 40 | self.seq_hidden_size = seq_hidden_size 41 | self.rnn_type = rnn_type 42 | self.dropout_rate = dropout_rate 43 | self.num_class = num_class 44 | self.bert_path=bert_path 45 | self.requires_grads=requires_grads 46 | 47 | self.bert=BertModel.from_pretrained(self.bert_path) 48 | if self.requires_grads is False: 49 | for p in self.bert.parameters(): 50 | p.requires_grads=False 51 | 52 | if self.rnn_type == "lstm": 53 | self.word_rnn = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.word_hidden_size, 54 | batch_first=True, num_layers=1, bidirectional=True) 55 | self.seq_rnn = nn.LSTM(input_size=self.word_hidden_size * 2, hidden_size=self.seq_hidden_size, 56 | batch_first=True, num_layers=1, bidirectional=True) 57 | elif self.rnn_type=="gru": 58 | self.word_rnn = nn.GRU(input_size=self.embedding_dim, hidden_size=self.word_hidden_size, 59 | batch_first=True, num_layers=1, bidirectional=True) 60 | self.seq_rnn = nn.GRU(input_size=self.word_hidden_size * 2, hidden_size=self.seq_hidden_size, 61 | batch_first=True, num_layers=1, bidirectional=True) 62 | else: 63 | raise Exception("wrong rnn type,must be one of [lstm,gru].") 64 | 65 | self.fc1=nn.Linear(in_features=self.word_hidden_size * 2, out_features=self.word_hidden_size * 2) 66 | self.U_w=nn.Parameter(torch.Tensor(self.word_hidden_size * 2, self.word_hidden_size * 2)) 67 | self.fc2=nn.Linear(in_features=self.word_hidden_size * 2, out_features=self.word_hidden_size * 2) 68 | self.U_s=nn.Parameter(torch.Tensor(self.word_hidden_size*2,self.word_hidden_size*2)) 69 | 70 | self.dropout=nn.Dropout(p=self.dropout_rate) 71 | self.classifer=nn.Linear(in_features=self.word_hidden_size*2,out_features=self.num_class) 72 | 73 | def forward(self, input_ids, attention_mask=None): 74 | ''' 75 | forard propagation 76 | :param params: input_ids:[batch_size,max_seq_length,max_word_length] 77 | :param params: attention_mask:[batch_size,max_seq_length,max_word_length] 78 | :return: logits:[batch_size,num_class] 79 | ''' 80 | 81 | seq_length=input_ids.size()[1] 82 | word_length=input_ids.size()[2] 83 | input_ids=input_ids.view(-1,word_length) 84 | if attention_mask is not None: 85 | attention_mask=attention_mask.view(-1,word_length) 86 | 87 | # bert encoding 88 | bert_output=self.bert(input_ids=input_ids,attention_mask=attention_mask) 89 | x=bert_output.last_hidden_state 90 | x,_=self.word_rnn(x) 91 | 92 | # char attention 93 | temp=torch.tanh(self.fc1(x)) 94 | char_score=torch.matmul(temp,self.U_w) 95 | char_weights=F.softmax(char_score,dim=1) 96 | x=torch.mul(char_weights,x) 97 | x=torch.sum(x,dim=1) 98 | 99 | x=x.view(-1,seq_length,x.shape[-1]) 100 | x,_=self.seq_rnn(x) 101 | 102 | # word attention 103 | temp=torch.tanh(x) 104 | word_score=torch.matmul(temp,self.U_s) 105 | word_weights=F.softmax(word_score,dim=1) 106 | x=torch.mul(word_weights,x) 107 | x=torch.sum(x,dim=1) 108 | 109 | x=self.dropout(x) 110 | outputs=self.classifer(x) 111 | 112 | return outputs 113 | -------------------------------------------------------------------------------- /deepclassifier/models/bertrcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | from transformers import * 13 | import numpy as np 14 | 15 | class BertRCNN(nn.Module): 16 | def __init__(self, 17 | embedding_dim, 18 | hidden_size, 19 | dropout_rate, 20 | num_class, 21 | bert_path, 22 | rnn_type="lstm", 23 | num_layers=1, 24 | requires_grads=False): 25 | ''' 26 | initialization 27 | :param embedding_dim:embedding dim 28 | :param hidden_size: rnn hidden size 29 | :param dropout_rate: dropout rate 30 | :param num_class: the number of label 31 | :param bert_path: bert path 32 | :param rnn_type: rnn type. Default:lstm 33 | :param num_layers: the number of rnn layer 34 | :param requires_grads: whether to update gradient of Bert in training stage 35 | ''' 36 | super(BertRCNN, self).__init__() 37 | self.embedding_dim = embedding_dim 38 | self.num_layers = num_layers 39 | self.hidden_size = hidden_size 40 | self.rnn_type = rnn_type 41 | self.dropout_rate = dropout_rate 42 | self.num_class = num_class 43 | self.bert_path=bert_path 44 | self.requires_grads=requires_grads 45 | 46 | self.bert = AutoModel.from_pretrained(self.bert_path) 47 | if self.requires_grads is False: 48 | for p in self.bert.parameters(): 49 | p.requires_grads = False 50 | 51 | if self.rnn_type == "lstm": 52 | self.birnn = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_size, 53 | num_layers=self.num_layers, batch_first=True, bidirectional=True) 54 | elif self.rnn_type == "gru": 55 | self.birnn = nn.GRU(input_size=self.embedding_dim, hidden_size=self.hidden_size, 56 | num_layers=self.num_layers, batch_first=True, bidirectional=True) 57 | else: 58 | raise ValueError("rnn type must be one of {lstm,gru}.") 59 | 60 | self.W = nn.Linear(in_features=self.embedding_dim + self.hidden_size * 2 * self.num_layers, 61 | out_features=self.hidden_size * 2) 62 | 63 | self.global_max_pool1d = nn.AdaptiveMaxPool1d(output_size=1) 64 | self.classifier = nn.Linear(in_features=self.hidden_size * 2, out_features=self.num_class) 65 | 66 | def forward(self, input_ids, attention_mask=None): 67 | ''' 68 | forard propagation 69 | :param params: input_ids:[batch_size,max_length] 70 | :param params: attention_mask:[batch_size,max_length] 71 | :return: logits:[batch_size,num_class] 72 | ''' 73 | 74 | bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask) 75 | x = bert_output.last_hidden_state 76 | 77 | rnn_output, _ = self.birnn(x) 78 | x = torch.cat((x, rnn_output), dim=-1) 79 | x = torch.tanh(self.W(x)) 80 | x = x.permute(0, 2, 1) 81 | x = self.global_max_pool1d(x).squeeze(dim=-1) 82 | outputs = self.classifier(x) 83 | 84 | return outputs 85 | -------------------------------------------------------------------------------- /deepclassifier/models/berttextcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | from transformers import * 13 | 14 | class BertTextCNN(nn.Module): 15 | def __init__(self, 16 | embedding_dim, 17 | dropout_rate, 18 | num_class, 19 | bert_path, 20 | num_layers=3, 21 | kernel_sizes=[3, 4, 5], 22 | num_filters=[100, 100, 100], 23 | strides=[1, 1, 1], 24 | paddings=[0, 0, 0], 25 | requires_grads=False): 26 | ''' 27 | initialization 28 | ⚠⚠️In default,the way to initialize embedding is loading pretrained embedding look-up table! 29 | :param dropout_rate: dropout rate 30 | :param num_class: the number of label 31 | :param bert_path: bert config path 32 | :param embedding_dim: embedding dim 33 | :param num_layers: the number of cnn layer 34 | :param kernel_sizes: list of conv kernel size 35 | :param num_filters: list of conv filters 36 | :param strides: list of conv strides 37 | :param paddings: list of conv padding 38 | :param requires_grads: whther to update gradient of Bert in training stage 39 | ''' 40 | super(BertTextCNN, self).__init__() 41 | self.embedding_dim = embedding_dim 42 | self.num_layers = num_layers 43 | self.kernel_sizes = kernel_sizes 44 | self.num_filters = num_filters 45 | self.strides = strides 46 | self.paddings = paddings 47 | self.dropout_rate = dropout_rate 48 | self.num_class = num_class 49 | self.bert_path=bert_path 50 | self.requires_grads=requires_grads 51 | 52 | self.bert = AutoModel.from_pretrained(self.bert_path) 53 | if self.requires_grads is False: 54 | for p in self.bert.parameters(): 55 | p.requires_grads = False 56 | 57 | if self.num_layers != len(self.kernel_sizes) or self.num_layers != len(self.num_filters): 58 | raise Exception("The number of num_layers and num_filters must be equal to the number of kernel_sizes!") 59 | 60 | self.conv1ds = [] 61 | self.global_max_pool1ds = [] 62 | final_hidden_size = sum(self.num_filters) 63 | for i in range(self.num_layers): 64 | conv1d = nn.Conv1d(in_channels=self.embedding_dim, out_channels=self.num_filters[i], 65 | kernel_size=self.kernel_sizes[i], 66 | stride=self.strides[i], padding=self.paddings[i]) 67 | global_max_pool1d = nn.AdaptiveMaxPool1d(output_size=1) 68 | self.conv1ds.append(conv1d) 69 | self.global_max_pool1ds.append(global_max_pool1d) 70 | 71 | self.dropout = nn.Dropout(p=self.dropout_rate) 72 | self.classifier = nn.Linear(in_features=final_hidden_size, out_features=self.num_class) 73 | 74 | def forward(self, input_ids, attention_mask=None): 75 | ''' 76 | forard propagation 77 | :param params: input_ids:[batch_size,max_length] 78 | :param params: attention_mask:[batch_size,max_length] 79 | :return: logits:[batch_size,num_class] 80 | ''' 81 | 82 | bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask) 83 | x = bert_output.last_hidden_state 84 | 85 | x = x.permute(0, 2, 1) 86 | cnn_pool_result = [] 87 | for i in range(self.num_layers): 88 | temp = torch.relu(self.conv1ds[i](x)) 89 | temp = self.global_max_pool1ds[i](temp).squeeze(dim=-1) 90 | cnn_pool_result.append(temp) 91 | 92 | x = torch.cat(cnn_pool_result, dim=-1) 93 | x = self.dropout(x) 94 | outputs = self.classifier(x) 95 | 96 | return outputs 97 | 98 | 99 | -------------------------------------------------------------------------------- /deepclassifier/models/dpcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | 14 | 15 | class DPCNN(nn.Module): 16 | def __init__(self, 17 | embedding_dim, 18 | dropout_rate, 19 | num_class, 20 | vocab_size=0, 21 | seq_length=0, 22 | num_blocks=3, 23 | num_filters=250, 24 | kernel_sizes=3, 25 | embedding_matrix=None, 26 | requires_grads=False): 27 | ''' 28 | initialization 29 | ⚠️In default,the way to initialize embedding is loading pretrained embedding look-up table! 30 | :param embedding_dim: embedding dim 31 | :param num_class: the number of label 32 | :param dropout_rate: dropout rate 33 | :param vocab_size: vocabulary size 34 | :param seq_length: max length of sequence after padding 35 | :param num_blocks: the number of block in DPCNN model 36 | :param num_filters: the number of filters of conv kernel 37 | :param kernel_sizes: conv kernel size 38 | :param embedding_matrix: pretrained embedding look up table 39 | :param requires_grads: whether to update gradient of embedding in training stage 40 | ''' 41 | super(DPCNN, self).__init__() 42 | 43 | self.vocab_size = vocab_size 44 | self.seq_length = seq_length 45 | self.embedding_dim = embedding_dim 46 | self.num_filters = num_filters 47 | self.dropout_rate=dropout_rate 48 | self.num_blocks = num_blocks 49 | self.num_class = num_class 50 | self.kernel_sizes = kernel_sizes 51 | self.embedding_matrix = embedding_matrix 52 | self.requires_grads = requires_grads 53 | 54 | # embedding 55 | if self.embedding_matrix is None: 56 | self.embedding = nn.Embedding(num_embeddings=self.vocab_size, 57 | embedding_dim=self.embedding_dim, 58 | padding_idx=0) 59 | else: 60 | self.embedding = nn.Embedding.from_pretrained(self.embedding_matrix, freeze=self.requires_grads) 61 | self.vocab_size = self.embedding_matrix.shape[0] 62 | 63 | # text region embedding 64 | self.region_embedding = nn.Conv2d(in_channels=1, out_channels=self.num_filters, 65 | stride=1, kernel_size=(self.kernel_sizes, self.embedding_dim)) 66 | 67 | # two conv 68 | self.conv2d1 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 69 | stride=2, kernel_size=(self.kernel_sizes, 1), padding=0) 70 | self.conv2d2 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 71 | stride=2, kernel_size=(self.kernel_sizes, 1), padding=0) 72 | self.padding1 = nn.ZeroPad2d((0, 0, (self.kernel_sizes-1)//2, (self.kernel_sizes-1)-((self.kernel_sizes-1)//2))) # top bottom 73 | self.padding2 = nn.ZeroPad2d((0, 0, 0, self.kernel_sizes-2)) # bottom 74 | 75 | # one block 76 | self.block_max_pool = nn.MaxPool2d(kernel_size=(self.kernel_sizes, 1), stride=2) 77 | self.conv2d3 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 78 | stride=1, kernel_size=(self.kernel_sizes, 1), padding=0) 79 | self.conv2d4 = nn.Conv2d(in_channels=self.num_filters, out_channels=self.num_filters, 80 | stride=1, kernel_size=(self.kernel_sizes, 1), padding=0) 81 | 82 | # final pool and softmax 83 | self.flatten = nn.Flatten() 84 | self.dropout=nn.Dropout(p=self.dropout_rate) 85 | self.classifier = nn.Linear(in_features=self.num_filters, out_features=self.num_class) 86 | 87 | def forward(self, inputs): 88 | ''' 89 | forward propagation 90 | :param inputs: [batch_size,seq_length] 91 | :return: [batch_size,num_class] 92 | ''' 93 | 94 | embedding=self.embedding(inputs).unsqueeze(dim=1) 95 | x = self.region_embedding(embedding) 96 | 97 | x = self.padding1(x) 98 | x = torch.relu(self.conv2d1(x)) 99 | x = self.padding1(x) 100 | x = torch.relu(self.conv2d2(x)) 101 | for i in range(self.num_blocks): 102 | x = self._block(x) 103 | 104 | x = self.flatten(x) 105 | x=self.dropout(x) 106 | outputs = self.classifier(x) 107 | 108 | return outputs 109 | 110 | def _block(self, x): 111 | 112 | x = self.padding2(x) 113 | pool_x = self.block_max_pool(x) 114 | 115 | x = self.padding1(pool_x) 116 | x = F.relu(self.conv2d3(x)) 117 | x = self.padding1(x) 118 | x = F.relu(self.conv2d4(x)) 119 | 120 | return x + pool_x 121 | 122 | 123 | -------------------------------------------------------------------------------- /deepclassifier/models/han.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | import numpy as np 14 | import torch.optim as optim 15 | 16 | class HAN(nn.Module): 17 | def __init__(self, 18 | embedding_dim, 19 | word_hidden_size, 20 | seq_hidden_size, 21 | dropout_rate, 22 | num_class, 23 | vocab_size=0, 24 | seq_length=0, 25 | rnn_type="lstm", 26 | embedding_matrix=None, 27 | requires_grads=False): 28 | ''' 29 | initialization 30 | ⚠️In default,the way to initialize embedding is loading pretrained embedding look-up table! 31 | :param embedding_dim: embedding dim 32 | :param word_hidden_size: word hidden size 33 | :param seq_hidden_size: seq hidden size 34 | :param dropout_rate: dropout rate 35 | :param num_class: the number of label 36 | :param vocab_size: vocabulary size 37 | :param seq_length: sequence length 38 | :param rnn_type: rnn type,which must be lstm or gru. 39 | :param embedding_matrix: pretrained embedding lookup table,shape is [vocab_size,embedidng_dim] 40 | :param requires_grads: whether to update gradient of embedding in training stage 41 | ''' 42 | super(HAN,self).__init__() 43 | self.vocab_size = vocab_size 44 | self.seq_length = seq_length 45 | self.embedding_dim = embedding_dim 46 | self.word_hidden_size = word_hidden_size 47 | self.seq_hidden_size=seq_hidden_size 48 | self.dropout_rate=dropout_rate 49 | self.num_class=num_class 50 | self.rnn_type = rnn_type 51 | self.embedding_matrix = embedding_matrix 52 | self.requires_grads=requires_grads 53 | 54 | if self.embedding_matrix is None: 55 | self.embedding = nn.Embedding(num_embeddings=self.vocab_size, 56 | embedding_dim=self.embedding_dim, 57 | padding_idx=0) 58 | else: 59 | self.embedding = nn.Embedding.from_pretrained(self.embedding_matrix, freeze=self.requires_grads) 60 | self.vocab_size = self.embedding_matrix.shape[0] 61 | 62 | if self.rnn_type == "lstm": 63 | self.word_rnn = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.word_hidden_size, 64 | batch_first=True, num_layers=1, bidirectional=True) 65 | self.seq_rnn = nn.LSTM(input_size=self.word_hidden_size * 2, hidden_size=self.seq_hidden_size, 66 | batch_first=True, num_layers=1, bidirectional=True) 67 | elif self.rnn_type == "gru": 68 | self.word_rnn = nn.GRU(input_size=self.embedding_dim, hidden_size=self.word_hidden_size, 69 | batch_first=True, num_layers=1, bidirectional=True) 70 | self.seq_rnn = nn.GRU(input_size=self.word_hidden_size * 2, hidden_size=self.seq_hidden_size, 71 | batch_first=True, num_layers=1, bidirectional=True) 72 | else: 73 | raise Exception("wrong rnn type,must be one of [lstm,gru].") 74 | 75 | self.fc1 = nn.Linear(in_features=self.word_hidden_size * 2, out_features=self.word_hidden_size * 2) 76 | self.U_w = nn.Parameter(torch.Tensor(self.word_hidden_size * 2, self.word_hidden_size * 2)) 77 | self.fc2 = nn.Linear(in_features=self.word_hidden_size * 2, out_features=self.word_hidden_size * 2) 78 | self.U_s = nn.Parameter(torch.Tensor(self.word_hidden_size * 2, self.word_hidden_size * 2)) 79 | 80 | self.dropout=nn.Dropout(p=self.dropout_rate) 81 | self.classifer = nn.Linear(in_features=self.word_hidden_size * 2, out_features=self.num_class) 82 | 83 | def forward(self,inputs): 84 | 85 | word_length=inputs.size()[-1] 86 | seq_length=inputs.size()[1] 87 | inputs=inputs.view(-1,word_length) 88 | x=self.embedding(inputs) 89 | x, _ = self.word_rnn(x) 90 | 91 | # char attention 92 | temp = torch.tanh(self.fc1(x)) 93 | char_score = torch.matmul(temp, self.U_w) 94 | char_weights = F.softmax(char_score, dim=1) 95 | x = torch.mul(char_weights, x) 96 | x = torch.sum(x, dim=1) 97 | 98 | x = x.view(-1, seq_length, x.shape[-1]) 99 | x, _ = self.seq_rnn(x) 100 | 101 | # word attention 102 | temp = torch.tanh(x) 103 | word_score = torch.matmul(temp, self.U_s) 104 | word_weights = F.softmax(word_score, dim=1) 105 | x = torch.mul(word_weights, x) 106 | x = torch.sum(x, dim=1) 107 | 108 | x=self.dropout(x) 109 | outputs = self.classifer(x) 110 | 111 | return outputs 112 | 113 | 114 | 115 | 116 | -------------------------------------------------------------------------------- /deepclassifier/models/rcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import numpy as np 13 | from deepclassifier.layers import LSTM 14 | 15 | class RCNN(nn.Module): 16 | def __init__(self, 17 | embedding_dim, 18 | hidden_size, 19 | dropout_rate, 20 | num_class, 21 | vocab_size=0, 22 | seq_length=0, 23 | rnn_type="lstm", 24 | num_layers=1, 25 | embedding_matrix=None, 26 | requires_grads=False): 27 | ''' 28 | initialization 29 | ⚠️In default,the way to initialize embedding is loading pretrained embedding look-up table! 30 | :param embedding_dim: embedding dim 31 | :param hidden_size: hidden size of rnn 32 | :param dropout_rate: dropout rate 33 | :param num_class: the number of label 34 | :param vocab_size: vocabulary size 35 | :param seq_length: max length of sequence after padding 36 | :param rnn_type: the type of rnn, which must be lstm or gru. Default: lstm. 37 | :param num_layers: the number of rnn layer.Default: 1. 38 | :param embedding_matrix: pretrained embedding look-up table,shape is [vocab_size,embedding_dim] 39 | :param requires_grads: whether to update gradient of embedding look up table in training stage 40 | ''' 41 | super(RCNN, self).__init__() 42 | 43 | self.vocab_size = vocab_size 44 | self.seq_length = seq_length 45 | self.embedding_dim = embedding_dim 46 | self.hidden_size = hidden_size 47 | self.num_class = num_class 48 | self.rnn_type = rnn_type 49 | self.dropout_rate=dropout_rate 50 | self.num_layers = num_layers 51 | self.embedding_matrix = embedding_matrix 52 | self.requires_grads = requires_grads 53 | 54 | if self.embedding_matrix is None: 55 | self.embedding = nn.Embedding(num_embeddings=self.vocab_size, 56 | embedding_dim=self.embedding_dim, 57 | padding_idx=0) 58 | else: 59 | self.embedding = nn.Embedding.from_pretrained(self.embedding_matrix, freeze=self.requires_grads) 60 | self.vocab_size = self.embedding_matrix.shape[0] 61 | 62 | if self.rnn_type == "lstm": 63 | self.birnn = nn.LSTM(input_size=self.embedding_dim, hidden_size=self.hidden_size, 64 | num_layers=self.num_layers, batch_first=True, bidirectional=True) 65 | elif self.rnn_type == "gru": 66 | self.birnn = nn.GRU(input_size=self.embedding_dim, hidden_size=self.hidden_size, 67 | num_layers=self.num_layers, batch_first=True, bidirectional=True) 68 | 69 | self.W = nn.Linear(in_features=self.embedding_dim + self.hidden_size * 2 * self.num_layers, 70 | out_features=self.hidden_size * 2) 71 | 72 | self.global_max_pool1d = nn.AdaptiveMaxPool1d(output_size=1) 73 | self.dropout=nn.Dropout(p=self.dropout_rate) 74 | self.classifier = nn.Linear(in_features=self.hidden_size * 2, out_features=self.num_class) 75 | 76 | def forward(self, input_ids,input_len=None): 77 | ''' 78 | forward propagation 79 | :param inputs: [batch_size,seq_length] 80 | :return: [batch_size,num_class] 81 | ''' 82 | 83 | x = self.embedding(input_ids) 84 | temp, _ = self.birnn(x) 85 | x = torch.cat((x, temp), dim=-1) 86 | x = torch.tanh(self.W(x)) 87 | x = x.permute(0, 2, 1) 88 | x = self.global_max_pool1d(x).squeeze(dim=-1) 89 | x=self.dropout(x) 90 | outputs = self.classifier(x) 91 | 92 | return outputs -------------------------------------------------------------------------------- /deepclassifier/models/textcnn.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | 4 | Author: 5 | Zichao Li,2843656167@qq.com 6 | 7 | ''' 8 | from __future__ import print_function 9 | 10 | import torch 11 | import torch.nn as nn 12 | import numpy as np 13 | 14 | class TextCNN(nn.Module): 15 | def __init__(self, 16 | embedding_dim, 17 | dropout_rate, 18 | num_class, 19 | vocab_size=0, 20 | seq_length=0, 21 | num_layers=3, 22 | kernel_sizes=[3, 4, 5], 23 | strides=[1, 1, 1], 24 | paddings=[0, 0, 0], 25 | num_filters=[100, 100, 100], 26 | embedding_matrix=None, 27 | requires_grads=False): 28 | ''' 29 | initialization 30 | ⚠️In default,the way to initialize embedding is loading pretrained embedding look-up table! 31 | :param embedding_dim: embedding dim 32 | :param dropout_rate: drouput rate 33 | :param num_class: the number of label 34 | :param vocab_size: vocabulary size 35 | :param seq_length: max length of sequence after padding 36 | :param num_layers: the number of cnn 37 | :param kernel_sizes: list of conv kernel size 38 | :param strides: list of conv strides 39 | :param paddings: list of padding 40 | :param num_filters: list of num filters 41 | :param embedding_matrix: pretrained embedding look-up table,shape is:[vocab_size,embedding_dim] 42 | :param requires_grads: whether to update gradient of embedding in training 43 | ''' 44 | super(TextCNN, self).__init__() 45 | 46 | self.vocab_size = vocab_size 47 | self.seq_length = seq_length 48 | self.embedding_dim = embedding_dim 49 | self.num_layers = num_layers 50 | self.kernel_sizes = kernel_sizes 51 | self.strides = strides 52 | self.paddings = paddings 53 | self.num_filters = num_filters 54 | self.dropout_rate = dropout_rate 55 | self.num_class = num_class 56 | self.embedding_matrix = embedding_matrix 57 | self.requires_grads = requires_grads 58 | 59 | if self.num_layers != len(self.kernel_sizes) or self.num_layers != len(self.num_filters): 60 | raise ValueError("The number of num_layers and num_filters must be equal to the number of kernel_sizes!") 61 | 62 | # embedding 63 | if self.embedding_matrix is None: 64 | self.embedding = nn.Embedding(num_embeddings=self.vocab_size, 65 | embedding_dim=self.embedding_dim, 66 | padding_idx=0) 67 | else: 68 | self.embedding = nn.Embedding.from_pretrained(self.embedding_matrix, freeze=self.requires_grads) 69 | self.vocab_size = self.embedding_matrix.shape[0] 70 | 71 | # conv layers 72 | self.conv1ds = [] 73 | self.global_max_pool1ds = [] 74 | final_hidden_size = sum(self.num_filters) 75 | for i in range(self.num_layers): 76 | conv1d = nn.Conv1d(in_channels=self.embedding_dim, out_channels=self.num_filters[i], 77 | kernel_size=self.kernel_sizes[i], 78 | stride=self.strides[i], padding=self.paddings[i]) 79 | global_max_pool1d = nn.AdaptiveMaxPool1d(output_size=1) 80 | self.conv1ds.append(conv1d) 81 | self.global_max_pool1ds.append(global_max_pool1d) 82 | 83 | # dropout 84 | self.dropout = nn.Dropout(p=self.dropout_rate) 85 | self.classifier = nn.Linear(in_features=final_hidden_size, out_features=self.num_class) 86 | 87 | def forward(self, input_ids): 88 | ''' 89 | forward propagation 90 | :param inputs: [batch_size,seq_length] 91 | :return: [batch_size,num_class] 92 | ''' 93 | 94 | x = self.embedding(input_ids) 95 | x = x.permute(0, 2, 1) 96 | cnn_pool_result = [] 97 | for i in range(self.num_layers): 98 | temp = torch.relu(self.conv1ds[i](x)) 99 | temp = self.global_max_pool1ds[i](temp).squeeze(dim=-1) 100 | cnn_pool_result.append(temp) 101 | 102 | x = torch.cat(cnn_pool_result, dim=-1) 103 | x = self.dropout(x) 104 | outputs = self.classifier(x) 105 | 106 | return outputs 107 | 108 | 109 | 110 | -------------------------------------------------------------------------------- /deepclassifier/trainers/__init__.py: -------------------------------------------------------------------------------- 1 | from .trainer import Trainer -------------------------------------------------------------------------------- /deepclassifier/trainers/trainer.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | ''' 3 | Author: 4 | Zichao Li,2843656167@qq.com 5 | ''' 6 | from __future__ import print_function 7 | 8 | import torch 9 | import torch.nn as nn 10 | import torch.nn.functional as F 11 | from sklearn.metrics import * 12 | import numpy as np 13 | 14 | 15 | class Trainer(object): 16 | def __init__(self, 17 | model_name, 18 | model, 19 | train_loader, 20 | dev_loader, 21 | test_loader, 22 | optimizer, 23 | loss_fn, 24 | save_path, 25 | epochs, 26 | writer, 27 | max_norm, 28 | eval_step_interval, 29 | device="cpu"): 30 | super(Trainer, self).__init__() 31 | 32 | self.model_name = model_name.lower() 33 | self.model = model 34 | self.train_loader = train_loader 35 | self.dev_loader = dev_loader 36 | self.test_loader = test_loader 37 | self.optimizer = optimizer 38 | self.loss_fn = loss_fn 39 | self.save_path = save_path 40 | self.epochs = epochs 41 | self.writer = writer 42 | self.max_norm = max_norm 43 | self.step_interval = eval_step_interval 44 | self.device = torch.device(device) 45 | 46 | self.model.to(self.device) 47 | 48 | def train(self): 49 | self.model.train() 50 | self.best_f1 = 0.0 51 | global_steps = 1 52 | 53 | for epoch in range(1, self.epochs + 1): 54 | for idx, batch_data in enumerate(self.train_loader, start=1): 55 | 56 | if self.model_name in ["textcnn", "rcnn", "han", "dpcnn"]: 57 | input_ids, y_true = batch_data[0], batch_data[-1] 58 | if y_true.shape !=1: 59 | y_true=y_true.squeeze(dim=-1) 60 | logits = self.model(input_ids.to(self.device)) 61 | elif self.model_name in ["berttextcnn", "bertrcnn", "berthan", "bertdpcnn"]: 62 | if len(batch_data) == 3: 63 | input_ids, attention_mask, y_true = batch_data[0], batch_data[1], batch_data[-1] 64 | if y_true.shape !=1: 65 | y_true=y_true.squeeze(dim=-1) 66 | logits = self.model(input_ids.to(self.device), attention_mask.to(self.device)) 67 | else: 68 | input_ids, y_true = batch_data[0], batch_data[-1] 69 | if y_true.shape !=1: 70 | y_true=y_true.squeeze(dim=-1) 71 | logits = self.model(input_ids.to(self.device)) 72 | else: 73 | raise ValueError("the number of batch_data is wrong!") 74 | 75 | loss = self.loss_fn(logits, y_true) 76 | if self.writer is not None: 77 | self.writer.add_scalar("train/loss", loss.cpu().item(), global_step=global_steps) 78 | print( 79 | "epoch:{epoch},step:{step},train_loss:{loss}.".format(epoch=epoch, step=idx, 80 | loss=loss.cpu().item())) 81 | 82 | self.optimizer.zero_grad() 83 | loss.backward() 84 | torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.max_norm) 85 | self.optimizer.step() 86 | 87 | global_steps += 1 88 | 89 | if global_steps % self.step_interval == 0: 90 | p, r, f1 = self.eval() 91 | if self.writer is not None: 92 | self.writer.add_scalar("valid/p", p, global_steps) 93 | self.writer.add_scalar("valid/r", r, global_steps) 94 | self.writer.add_scalar("valid/f1", f1, global_steps) 95 | print("------start evaluating model in dev data------") 96 | print( 97 | "epoch:{epoch},step:{idx},precision:{p},recall:{r},F1-score:{f1}".format(epoch=epoch, 98 | idx=idx, p=p, 99 | r=r, f1=f1)) 100 | if self.best_f1 < f1: 101 | self.best_f1 = f1 102 | torch.save(self.model.state_dict(), f=self.save_path) 103 | 104 | print("epoch:{epoch},step:{idx},best_f1:{best_f1}".format(epoch=epoch, idx=idx, 105 | best_f1=self.best_f1)) 106 | print("------finish evaluating model in dev data------") 107 | self.model.train() 108 | 109 | if self.writer is not None: 110 | self.writer.flush() 111 | self.writer.close() 112 | 113 | def eval(self): 114 | self.model.eval() 115 | y_preds = [] 116 | y_trues = [] 117 | 118 | with torch.no_grad(): 119 | for idx, batch_data in enumerate(self.dev_loader, start=1): 120 | 121 | if self.model_name in ["textcnn", "rcnn", "han", "dpcnn"]: 122 | input_ids, y_true = batch_data[0], batch_data[-1] 123 | if y_true.shape !=1: 124 | y_true=y_true.squeeze(dim=-1) 125 | logits = self.model(input_ids.to(self.device)) 126 | elif self.model_name in ["berttextcnn", "bertrcnn", "berthan", "bertdpcnn"]: 127 | if len(batch_data) == 3: 128 | input_ids, attention_mask, y_true = batch_data[0], batch_data[1], batch_data[-1] 129 | if y_true.shape !=1: 130 | y_true=y_true.squeeze(dim=-1) 131 | logits = self.model(input_ids.to(self.device), attention_mask.to(self.device)) 132 | else: 133 | input_ids, y_true = batch_data[0], batch_data[-1] 134 | if y_true.shape !=1: 135 | y_true=y_true.squeeze(dim=-1) 136 | logits = self.model(input_ids.to(self.device)) 137 | else: 138 | raise ValueError("the number of batch_data is wrong!") 139 | 140 | y_true = list(y_true.cpu().numpy()) 141 | y_trues.extend(y_true) 142 | 143 | logits = logits.cpu().numpy() 144 | for item in logits: 145 | pred = np.argmax(item) 146 | y_preds.append(pred) 147 | 148 | y_preds = np.array(y_preds) 149 | y_trues = np.array(y_trues) 150 | 151 | p = precision_score(y_trues, y_preds, average="macro") 152 | r = recall_score(y_trues, y_preds, average="macro") 153 | f1 = f1_score(y_trues, y_preds, average="weighted") 154 | 155 | return p, r, f1 156 | 157 | def test(self): 158 | self.model.eval() 159 | y_preds = [] 160 | y_trues = [] 161 | 162 | with torch.no_grad(): 163 | for idx, batch_data in enumerate(self.test_loader, start=1): 164 | 165 | if self.model_name in ["textcnn", "rcnn", "han", "dpcnn"]: 166 | input_ids, y_true = batch_data[0], batch_data[-1] 167 | if y_true.shape !=1: 168 | y_true=y_true.squeeze(dim=-1) 169 | logits = self.model(input_ids.to(self.device)) 170 | elif self.model_name in ["berttextcnn", "bertrcnn", "berthan", "bertdpcnn"]: 171 | if len(batch_data) == 3: 172 | input_ids, attention_mask, y_true = batch_data[0], batch_data[1], batch_data[-1] 173 | if y_true.shape !=1: 174 | y_true=y_true.squeeze(dim=-1) 175 | logits = self.model(input_ids.to(self.device), attention_mask.to(self.device)) 176 | else: 177 | input_ids, y_true = batch_data[0], batch_data[-1] 178 | if y_true.shape !=1: 179 | y_true=y_true.squeeze(dim=-1) 180 | logits = self.model(input_ids.to(self.device)) 181 | else: 182 | raise ValueError("the number of batch_data is wrong!") 183 | 184 | y_true = list(y_true.cpu().numpy()) 185 | y_trues.extend(y_true) 186 | 187 | logits = logits.cpu().numpy() 188 | for item in logits: 189 | pred = np.argmax(item) 190 | y_preds.append(pred) 191 | 192 | y_preds = np.array(y_preds) 193 | y_trues = np.array(y_trues) 194 | 195 | p = precision_score(y_trues, y_preds, average="macro") 196 | r = recall_score(y_trues, y_preds, average="macro") 197 | f1 = f1_score(y_trues, y_preds, average="weighted") 198 | 199 | return p, r, f1 200 | 201 | def predict(self, x): 202 | self.model.eval() 203 | y_preds = [] 204 | with torch.no_grad(): 205 | for idx, batch_data in enumerate(x, start=1): 206 | if self.model_name in ["textcnn", "rcnn", "han", "dpcnn"]: 207 | input_ids = batch_data 208 | logits = self.model(input_ids.to(self.device)) 209 | elif self.model_name in ["berttextcnn", "bertrcnn", "berthan", "bertdpcnn"]: 210 | if len(batch_data) == 2: 211 | input_ids, attention_mask = batch_data[0], batch_data[1] 212 | logits = self.model(input_ids.to(self.device), attention_mask.to(self.device)) 213 | else: 214 | input_ids=batch_data 215 | logits = self.model(input_ids.to(self.device)) 216 | else: 217 | raise ValueError("the number of batch_data is wrong!") 218 | 219 | logits = logits.cpu() 220 | prob = F.softmax(logits, dim=-1) 221 | y_preds.extend(prob) 222 | 223 | y_preds = torch.stack(y_preds, dim=0).numpy() 224 | return y_preds -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/build/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/build/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/build/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: f9bc921cff24ced2183db2aea1cdfb41 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/build/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. DeepClassifier documentation master file, created by 2 | sphinx-quickstart on Fri Dec 11 18:43:18 2020. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to DeepClassifier's documentation! 7 | ========================================== 8 | DeepClassifier is a python package based on pytorch, which is easy-use and general for text classification task.🤩 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | :caption: Contents: 13 | 14 | 15 | QuickStart/index.rst 16 | Models/index.rst 17 | -------------------------------------------------------------------------------- /docs/build/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/build/html/_static/css/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/css/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* This file intentionally left blank. */ 2 | -------------------------------------------------------------------------------- /docs/build/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s === 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node, addItems) { 70 | if (node.nodeType === 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && 74 | !jQuery(node.parentNode).hasClass(className) && 75 | !jQuery(node.parentNode).hasClass("nohighlight")) { 76 | var span; 77 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 78 | if (isInSVG) { 79 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 80 | } else { 81 | span = document.createElement("span"); 82 | span.className = className; 83 | } 84 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 85 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 86 | document.createTextNode(val.substr(pos + text.length)), 87 | node.nextSibling)); 88 | node.nodeValue = val.substr(0, pos); 89 | if (isInSVG) { 90 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 91 | var bbox = node.parentElement.getBBox(); 92 | rect.x.baseVal.value = bbox.x; 93 | rect.y.baseVal.value = bbox.y; 94 | rect.width.baseVal.value = bbox.width; 95 | rect.height.baseVal.value = bbox.height; 96 | rect.setAttribute('class', className); 97 | addItems.push({ 98 | "parent": node.parentNode, 99 | "target": rect}); 100 | } 101 | } 102 | } 103 | else if (!jQuery(node).is("button, select, textarea")) { 104 | jQuery.each(node.childNodes, function() { 105 | highlight(this, addItems); 106 | }); 107 | } 108 | } 109 | var addItems = []; 110 | var result = this.each(function() { 111 | highlight(this, addItems); 112 | }); 113 | for (var i = 0; i < addItems.length; ++i) { 114 | jQuery(addItems[i].parent).before(addItems[i].target); 115 | } 116 | return result; 117 | }; 118 | 119 | /* 120 | * backward compatibility for jQuery.browser 121 | * This will be supported until firefox bug is fixed. 122 | */ 123 | if (!jQuery.browser) { 124 | jQuery.uaMatch = function(ua) { 125 | ua = ua.toLowerCase(); 126 | 127 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 128 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 129 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 130 | /(msie) ([\w.]+)/.exec(ua) || 131 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 132 | []; 133 | 134 | return { 135 | browser: match[ 1 ] || "", 136 | version: match[ 2 ] || "0" 137 | }; 138 | }; 139 | jQuery.browser = {}; 140 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 141 | } 142 | 143 | /** 144 | * Small JavaScript module for the documentation. 145 | */ 146 | var Documentation = { 147 | 148 | init : function() { 149 | this.fixFirefoxAnchorBug(); 150 | this.highlightSearchWords(); 151 | this.initIndexTable(); 152 | if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { 153 | this.initOnKeyListeners(); 154 | } 155 | }, 156 | 157 | /** 158 | * i18n support 159 | */ 160 | TRANSLATIONS : {}, 161 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, 162 | LOCALE : 'unknown', 163 | 164 | // gettext and ngettext don't access this so that the functions 165 | // can safely bound to a different name (_ = Documentation.gettext) 166 | gettext : function(string) { 167 | var translated = Documentation.TRANSLATIONS[string]; 168 | if (typeof translated === 'undefined') 169 | return string; 170 | return (typeof translated === 'string') ? translated : translated[0]; 171 | }, 172 | 173 | ngettext : function(singular, plural, n) { 174 | var translated = Documentation.TRANSLATIONS[singular]; 175 | if (typeof translated === 'undefined') 176 | return (n == 1) ? singular : plural; 177 | return translated[Documentation.PLURALEXPR(n)]; 178 | }, 179 | 180 | addTranslations : function(catalog) { 181 | for (var key in catalog.messages) 182 | this.TRANSLATIONS[key] = catalog.messages[key]; 183 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 184 | this.LOCALE = catalog.locale; 185 | }, 186 | 187 | /** 188 | * add context elements like header anchor links 189 | */ 190 | addContextElements : function() { 191 | $('div[id] > :header:first').each(function() { 192 | $('\u00B6'). 193 | attr('href', '#' + this.id). 194 | attr('title', _('Permalink to this headline')). 195 | appendTo(this); 196 | }); 197 | $('dt[id]').each(function() { 198 | $('\u00B6'). 199 | attr('href', '#' + this.id). 200 | attr('title', _('Permalink to this definition')). 201 | appendTo(this); 202 | }); 203 | }, 204 | 205 | /** 206 | * workaround a firefox stupidity 207 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 208 | */ 209 | fixFirefoxAnchorBug : function() { 210 | if (document.location.hash && $.browser.mozilla) 211 | window.setTimeout(function() { 212 | document.location.href += ''; 213 | }, 10); 214 | }, 215 | 216 | /** 217 | * highlight the search words provided in the url in the text 218 | */ 219 | highlightSearchWords : function() { 220 | var params = $.getQueryParameters(); 221 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 222 | if (terms.length) { 223 | var body = $('div.body'); 224 | if (!body.length) { 225 | body = $('body'); 226 | } 227 | window.setTimeout(function() { 228 | $.each(terms, function() { 229 | body.highlightText(this.toLowerCase(), 'highlighted'); 230 | }); 231 | }, 10); 232 | $('
' + _('Hide Search Matches') + '
') 234 | .appendTo($('#searchbox')); 235 | } 236 | }, 237 | 238 | /** 239 | * init the domain index toggle buttons 240 | */ 241 | initIndexTable : function() { 242 | var togglers = $('img.toggler').click(function() { 243 | var src = $(this).attr('src'); 244 | var idnum = $(this).attr('id').substr(7); 245 | $('tr.cg-' + idnum).toggle(); 246 | if (src.substr(-9) === 'minus.png') 247 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 248 | else 249 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 250 | }).css('display', ''); 251 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 252 | togglers.click(); 253 | } 254 | }, 255 | 256 | /** 257 | * helper function to hide the search marks again 258 | */ 259 | hideSearchWords : function() { 260 | $('#searchbox .highlight-link').fadeOut(300); 261 | $('span.highlighted').removeClass('highlighted'); 262 | }, 263 | 264 | /** 265 | * make the url absolute 266 | */ 267 | makeURL : function(relativeURL) { 268 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 269 | }, 270 | 271 | /** 272 | * get the current relative url 273 | */ 274 | getCurrentURL : function() { 275 | var path = document.location.pathname; 276 | var parts = path.split(/\//); 277 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 278 | if (this === '..') 279 | parts.pop(); 280 | }); 281 | var url = parts.join('/'); 282 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 283 | }, 284 | 285 | initOnKeyListeners: function() { 286 | $(document).keydown(function(event) { 287 | var activeElementType = document.activeElement.tagName; 288 | // don't navigate when in search box, textarea, dropdown or button 289 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT' 290 | && activeElementType !== 'BUTTON' && !event.altKey && !event.ctrlKey && !event.metaKey 291 | && !event.shiftKey) { 292 | switch (event.keyCode) { 293 | case 37: // left 294 | var prevHref = $('link[rel="prev"]').prop('href'); 295 | if (prevHref) { 296 | window.location.href = prevHref; 297 | return false; 298 | } 299 | case 39: // right 300 | var nextHref = $('link[rel="next"]').prop('href'); 301 | if (nextHref) { 302 | window.location.href = nextHref; 303 | return false; 304 | } 305 | } 306 | } 307 | }); 308 | } 309 | }; 310 | 311 | // quick alias for translations 312 | _ = Documentation.gettext; 313 | 314 | $(document).ready(function() { 315 | Documentation.init(); 316 | }); 317 | -------------------------------------------------------------------------------- /docs/build/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0.0.1', 4 | LANGUAGE: 'None', 5 | COLLAPSE_INDEX: false, 6 | BUILDER: 'html', 7 | FILE_SUFFIX: '.html', 8 | LINK_SUFFIX: '.html', 9 | HAS_SOURCE: true, 10 | SOURCELINK_SUFFIX: '.txt', 11 | NAVIGATION_WITH_KEYS: false 12 | }; -------------------------------------------------------------------------------- /docs/build/html/_static/file.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/file.png -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/FontAwesome.otf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/FontAwesome.otf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Light.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Light.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Light.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Light.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Regular.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Regular.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Thin.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Thin.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/Roboto-Slab-Thin.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/Roboto-Slab-Thin.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-bold-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-bold-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-bold-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-bold-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-bold.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-normal-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-normal-italic.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-normal-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-normal-italic.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-normal.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-normal.woff -------------------------------------------------------------------------------- /docs/build/html/_static/fonts/lato-normal.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codewithzichao/DeepClassifier/c07e8041a2aca855fed9bf69fd571f1cbd5ec032/docs/build/html/_static/fonts/lato-normal.woff2 -------------------------------------------------------------------------------- /docs/build/html/_static/js/badge_only.js: -------------------------------------------------------------------------------- 1 | !function(e){var t={};function r(n){if(t[n])return t[n].exports;var o=t[n]={i:n,l:!1,exports:{}};return e[n].call(o.exports,o,o.exports,r),o.l=!0,o.exports}r.m=e,r.c=t,r.d=function(e,t,n){r.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:n})},r.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.t=function(e,t){if(1&t&&(e=r(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(r.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)r.d(n,o,function(t){return e[t]}.bind(null,o));return n},r.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return r.d(t,"a",t),t},r.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},r.p="",r(r.s=4)}({4:function(e,t,r){}}); -------------------------------------------------------------------------------- /docs/build/html/_static/js/html5shiv-printshiv.min.js: -------------------------------------------------------------------------------- 1 | /** 2 | * @preserve HTML5 Shiv 3.7.3-pre | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed 3 | */ 4 | !function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="DeepClassifier is a python package based on pytorch, which is easy-use and general for text classification task.🤩
156 |Contents:
158 |