├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── demo_lenet.py ├── demo_rnn.py └── pydeeplearn ├── __init__.py ├── core ├── __init__.py ├── layers.py ├── net.py ├── numeric.py └── solve.py ├── image ├── __init__.py ├── image.pyx └── setup.py ├── net ├── __init__.py ├── cnn.py └── rnn.py └── nlp ├── __init__.py ├── ptbtree.py └── wordvectors.py /.gitignore: -------------------------------------------------------------------------------- 1 | **/image.pyd 2 | **/image.cpp 3 | **/build/ 4 | **/*.gz 5 | **/*.zip 6 | **/*.pyc 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | {description} 294 | Copyright (C) {year} {fullname} 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | {signature of Ty Coon}, 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | 341 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | dai: 2 | cd pydeeplearn/image && python setup.py build_ext -i && cd ../.. 3 | 4 | clean: 5 | cd pydeeplearn/image && rm -rf build image.so image.pyd image.cpp && cd ../.. 6 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # pydeeplearn: a Python Deep Learning Library 2 | ## Introduction 3 | pydeeplearn is a simple deep learning library *written from scratch entirely in Python*. It is not meant to be a production-quality library (for that, check out Caffe, Theano, Mocha, Torch, or Deeplearning4j). I wrote this in my free time as an exercise and I am releasing the code for others to learn from. With that said, the codebase could definitely use more asserts and/or comments. 4 | 5 | The design of the layers and the modularity is mostly inspired by Mocha and Theano, and the convolution approach expands the image (im2col and col2im) similar to the approach in Caffe (Yangqing Jia) and in code by Andrej Karpathy. 6 | 7 | * Author: Sameh Khamis (sameh@umiacs.umd.edu) 8 | * License: GPLv2 for non-commercial research purposes only 9 | * Dependencies: Numpy 1.9.2 and Cython 0.19.1 (Run 'make' for im2col and col2im) 10 | 11 | ## Features 12 | * Modular design 13 | * In-memory data, parameter, and label layers 14 | * Data augmentations: cropping, rotation, shearing, and mirroring 15 | * Operations: convolution, pooling, dropout, and fully-connected 16 | * Non-linearities: relu, tanh, and sigmoid 17 | * Losses: cross-entropy, softmax, hinge, and squared 18 | * Gradient descent updates: vanilla, momentum, Nesterov's, Adagrad, and RMSprop 19 | * Step decay: fixed, inverse, and exponential 20 | * Supports evaluation of DAG-connected networks (not just chains) 21 | * Snapshot saving and loading 22 | * Network visualization with Graphviz (export to dot files) 23 | * Gradient checking through finite differences 24 | * Numerically stable functions 25 | 26 | ## Demos 27 | * Handwritten Digit Recognition: LeNet on MNIST, accuracy = 99.04% (LeCun et al, IEEE 1998) 28 | * Feed-forward convolutional neural network (CNN) with a fixed structure. Data is augmented through random crops and random shears. 29 | * Sentiment Analysis: RNN on Movie Reviews, accuracy = 79.4% (Socher et al, EMNLP 2013) 30 | * Varying structure recursive neural network (RNN), built through parsing a sentence dependency tree. Word representations are initialized from a pre-trained WordVectors class (GloVe or word2vec). 31 | 32 | ## Bonus 33 | * PTB Reader: parses dependency trees from the Penn Treebank Project 34 | * WordVectors: parses GloVe files (Pennington et al), easy to extend to word2vec (Mikolov et al). Supports lower-dimensional (random) projections and querying analogies 35 | -------------------------------------------------------------------------------- /demo_lenet.py: -------------------------------------------------------------------------------- 1 | # Handwritten Digit Recognition: LeNet on MNIST 2 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 3 | # License: GPLv2 for non-commercial research purposes only 4 | 5 | import numpy as np 6 | import cPickle, gzip, urllib, os 7 | from pydeeplearn.core.layers import Data, Label, Crop, Shear, Conv, Pool, FC, Relu, Dropout, Sum, Softmax 8 | from pydeeplearn.core.solve import RMSprop, InverseDecay 9 | from pydeeplearn.net.cnn import CNN 10 | 11 | # Load the dataset files 12 | mnistfile = 'mnist.pkl.gz' 13 | if not os.path.exists(mnistfile): 14 | urllib.urlretrieve('http://deeplearning.net/data/mnist/' + mnistfile, mnistfile) 15 | 16 | f = gzip.open(mnistfile, 'rb') 17 | train_set, valid_set, test_set = cPickle.load(f) 18 | f.close() 19 | 20 | train_set = (train_set[0].reshape(-1, 28, 28, 1), train_set[1].reshape(-1, 1)) 21 | valid_set = (valid_set[0].reshape(-1, 28, 28, 1), valid_set[1].reshape(-1, 1)) 22 | test_set = (test_set[0].reshape(-1, 28, 28, 1), test_set[1].reshape(-1, 1)) 23 | 24 | # Create the CNN structure 25 | lambdaa = 1e-4 26 | data = Data(np.r_[train_set[0], valid_set[0]].mean(axis=0)) 27 | label = Label() 28 | cropped = Crop(data, cropsize=(24, 24)) 29 | sheared = Shear(cropped) 30 | c1 = Conv(sheared, nfilters=20, window=5, stride=1) 31 | p1 = Pool(c1, window=2, stride=2) 32 | c2 = Conv(p1, nfilters=50, window=5, stride=1) 33 | p2 = Pool(c2, window=2, stride=2) 34 | f1 = FC(p2, ndim=500) 35 | r3 = Relu(f1, leak=0.01) 36 | f2 = FC(r3, ndim=10) 37 | reg = Sum(c1.input[1]**2) + Sum(c2.input[1]**2) + Sum(f1.input[1]**2) + Sum(f2.input[1]**2) 38 | loss = Softmax(f2, label) 39 | obj = loss + lambdaa * reg 40 | 41 | # CNN training 42 | cnn = CNN(obj, name='mnist', update=RMSprop(), step=InverseDecay()) 43 | cnn.train(np.r_[train_set[0], valid_set[0]], np.r_[train_set[1], valid_set[1]], epochs=15) 44 | 45 | # CNN prediction 46 | nruns = 4 47 | predicted = np.zeros((test_set[0].shape[0], 10)) 48 | for run in np.arange(nruns): 49 | predicted += cnn.predict(test_set[0]) / nruns 50 | print (np.argmax(predicted, axis=1) == test_set[1].reshape(-1)).sum() / float(test_set[1].size) 51 | -------------------------------------------------------------------------------- /demo_rnn.py: -------------------------------------------------------------------------------- 1 | # Sentiment Analysis: RNN on Movie Reviews 2 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 3 | # License: GPLv2 for non-commercial research purposes only 4 | 5 | import numpy as np 6 | import urllib, os 7 | from zipfile import ZipFile 8 | from pydeeplearn.core.solve import RMSprop, InverseDecay 9 | from pydeeplearn.nlp.ptbtree import PTBTree 10 | from pydeeplearn.nlp.wordvectors import WordVectors 11 | from pydeeplearn.net.rnn import RNN 12 | 13 | # Load the dataset files 14 | ptbfile = 'trainDevTestTrees_PTB.zip' 15 | if not os.path.exists(ptbfile): 16 | urllib.urlretrieve('http://nlp.stanford.edu/sentiment/' + ptbfile, ptbfile) 17 | 18 | f = ZipFile(ptbfile) 19 | train_trees = [PTBTree.parse(line) for line in f.read('trees/train.txt').split('\n') if len(line) > 0] 20 | dev_trees = [PTBTree.parse(line) for line in f.read('trees/dev.txt').split('\n') if len(line) > 0] 21 | test_trees = [PTBTree.parse(line) for line in f.read('trees/test.txt').split('\n') if len(line) > 0] 22 | f.close() 23 | 24 | # Load GloVe and randomly project the vectors to 25 dimensions 25 | glove = WordVectors.from_glove(d=50) 26 | glove.project(np.random.randn(50, 25) * 0.0001) 27 | 28 | # RNN training 29 | rnn = RNN(glove, lambdaa=1e-4, name='rnn', update=RMSprop(), step=InverseDecay()) 30 | rnn.train(train_trees + dev_trees, epochs=10) 31 | 32 | # RNN prediction 33 | pred, true = rnn.predict(test_trees) 34 | print (true == pred.argmax(axis=1)).sum() / float(true.size) 35 | -------------------------------------------------------------------------------- /pydeeplearn/__init__.py: -------------------------------------------------------------------------------- 1 | import core, net, image, nlp 2 | __all__ = ['core', 'net', 'image', 'nlp'] 3 | -------------------------------------------------------------------------------- /pydeeplearn/core/__init__.py: -------------------------------------------------------------------------------- 1 | import layers, net, solve, numeric 2 | __all__ = ['layers', 'net', 'solve', 'numeric'] 3 | -------------------------------------------------------------------------------- /pydeeplearn/core/layers.py: -------------------------------------------------------------------------------- 1 | # Neural network layers (or function nodes) 2 | # Includes an in-memory data layer, label layer, convolution, pooling, dropout, and other popular operations. Also includes cross-entropy, softmax, hinge, and squared loss layers. 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | import numeric 8 | from ..image import im2col, col2im, transform, invtransform 9 | 10 | DTYPE = np.float32 11 | 12 | class Node: 13 | def __init__(self): 14 | # don't save the outputs too! the circular refs make python's gc fail big 15 | self._input = [] 16 | self._value = np.array([]) 17 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 18 | 19 | def forward(self): 20 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 21 | 22 | def backward(self): 23 | pass 24 | 25 | @property 26 | def input(self): 27 | return self._input 28 | 29 | @property 30 | def value(self): 31 | return self._value 32 | 33 | @property 34 | def gradient(self): 35 | return self._gradient 36 | 37 | def __mul__(self, other): 38 | return ScalarMul(self, other) if np.isscalar(other) else Mul(self, other) 39 | 40 | def __rmul__(self, other): 41 | return ScalarMul(self, other) if np.isscalar(other) else Mul(other, self) 42 | 43 | def __add__(self, other): 44 | return Add(self, other) 45 | 46 | def __radd__(self, other): 47 | return Add(other, self) 48 | 49 | def __pow__(self, other): 50 | return ScalarPow(self, other) 51 | 52 | def __neg__(self): 53 | return Neg(self) 54 | 55 | @property 56 | def T(self): 57 | return Trans(self) 58 | 59 | @property 60 | def shape(self): 61 | return self._value.shape 62 | 63 | @property 64 | def size(self): 65 | return self._value.size 66 | 67 | @property 68 | def dtype(self): 69 | return self._value.dtype 70 | 71 | @property 72 | def ndim(self): 73 | return self._value.ndim 74 | 75 | def __str__(self): 76 | return '%s [%s]' % (self.__class__.__name__, 'x'.join([str(s) for s in self.shape])) 77 | 78 | def __setstate__(self, dict): 79 | self.__dict__ = dict 80 | 81 | if not isinstance(self, Param): 82 | self.__dict__['_value'] = np.empty(self.__dict__['_value_shape'], dtype=DTYPE) 83 | del self.__dict__['_value_shape'] 84 | 85 | def __getstate__(self): 86 | dict = self.__dict__.copy() 87 | 88 | del dict['_gradient'] 89 | if '_mask' in dict: del dict['_mask'] 90 | if '_col' in dict: del dict['_col'] 91 | if '_temp' in dict: del dict['_temp'] 92 | 93 | if isinstance(self, Label): 94 | del dict['_result'] 95 | 96 | if not isinstance(self, Param): 97 | dict['_value_shape'] = self.__dict__['_value'].shape 98 | del dict['_value'] 99 | 100 | return dict 101 | 102 | class Op(Node): 103 | pass # Base class of non-data, non-param, and non-label layers 104 | 105 | class Data(Node): 106 | def __init__(self, data_mean_or_shape): 107 | if type(data_mean_or_shape) is tuple: 108 | data_mean = np.zeros(data_mean_or_shape, dtype=DTYPE) 109 | else: 110 | data_mean = data_mean_or_shape 111 | 112 | self._input = [] 113 | self._mean = data_mean 114 | self._value = np.zeros(data_mean.shape, dtype=DTYPE)[np.newaxis] 115 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 116 | 117 | def forward(self): 118 | self._value = self._value - self._mean 119 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 120 | 121 | class Preprocess(Op): 122 | pass # Base class of data preprocessing (crop, contrast, tint, skew, etc.) 123 | 124 | class Crop(Preprocess): 125 | def __init__(self, input, cropsize): 126 | self._input = [input] 127 | self._cropsize = cropsize 128 | self._value = np.empty((input.shape[0], cropsize[0], cropsize[1], input.shape[3]), dtype=DTYPE) 129 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 130 | 131 | def forward(self, disabled=False): 132 | high = np.array(self._input[0].shape[1:3]) - self._cropsize 133 | self._pos = np.array([np.random.randint(h) for h in high]) 134 | self._value = self._input[0]._value[:, self._pos[0]:self._pos[0] + self._cropsize[0], self._pos[1]:self._pos[1] + self._cropsize[1], :] 135 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 136 | 137 | def backward(self, disabled=False): 138 | self._input[0]._gradient[:, self._pos[0]:self._pos[0] + self._cropsize[0], self._pos[1]:self._pos[1] + self._cropsize[1], :] += self._gradient 139 | 140 | class Mirror(Preprocess): 141 | def __init__(self, input): 142 | self._input = [input] 143 | self._value = np.empty(input.shape, dtype=DTYPE) 144 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 145 | 146 | def forward(self, disabled=False): 147 | self._flip = not disabled and np.random.rand() > 0.5 148 | self._value = self._input[0]._value[:, :, ::-1, :] if self._flip else self._input[0]._value 149 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 150 | 151 | def backward(self, disabled=False): 152 | self._input[0]._gradient += self._gradient[:, :, ::-1, :] if self._flip else self._gradient 153 | 154 | class Rotate(Preprocess): 155 | def __init__(self, input, minangle=-15, maxangle=15): 156 | self._input = [input] 157 | self._angles = (minangle, maxangle) 158 | self._value = np.empty(input.shape, dtype=DTYPE) 159 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 160 | 161 | def forward(self, disabled=False): 162 | theta = np.random.randn() * ((self._angles[1] - self._angles[0]) / 6.0) if not disabled else 0 # 6 sigma 163 | sin_theta, cos_theta = np.sin(theta * np.pi / 180), np.cos(theta * np.pi / 180) 164 | self._A = np.array([cos_theta, sin_theta, -sin_theta, cos_theta], dtype=np.float32).reshape(2, 2) 165 | self._value = transform(self._input[0]._value, self._A) 166 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 167 | 168 | def backward(self, disabled=False): 169 | self._input[0]._gradient += invtransform(self._gradient, self._A) 170 | 171 | class Shear(Preprocess): 172 | def __init__(self, input, minshearx=-0.75, maxshearx=0.75, minsheary=-0.75, maxsheary=0.75): 173 | self._input = [input] 174 | self._shearx = (minshearx, maxshearx) 175 | self._sheary = (minsheary, maxsheary) 176 | self._value = np.empty(input.shape, dtype=DTYPE) 177 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 178 | 179 | def forward(self, disabled=False): 180 | mx = np.random.randn() * ((self._shearx[1] - self._shearx[0]) / 6.0) if not disabled else 0 # 6 sigma 181 | my = np.random.randn() * ((self._sheary[1] - self._sheary[0]) / 6.0) if not disabled else 0 # 6 sigma 182 | self._A = np.array([1, mx, my, 1], dtype=np.float32).reshape(2, 2) 183 | self._value = transform(self._input[0]._value, self._A) 184 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 185 | 186 | def backward(self, disabled=False): 187 | self._input[0]._gradient += invtransform(self._gradient, self._A) 188 | 189 | class Param(Node): 190 | def __init__(self, val): 191 | self._input = [] 192 | self._value = val.astype(DTYPE) 193 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 194 | self._fixed = False 195 | 196 | @staticmethod 197 | def zeros(shape): 198 | return Param(np.zeros(shape, dtype=DTYPE)) 199 | 200 | @staticmethod 201 | def randn(shape, var=-1): 202 | if var < 0: 203 | var = np.sqrt(2.0 / np.prod(shape)) 204 | return Param(var * np.random.randn(*shape).astype(DTYPE)) 205 | 206 | class FC(Op): 207 | def __init__(self, input, ndim): 208 | shp = input.shape 209 | w = Param.randn((np.prod(shp[1:]), ndim)) 210 | b = Param.zeros((ndim,)) 211 | self._input = [input, w, b] 212 | self._value = np.empty((shp[0], ndim), dtype=DTYPE) 213 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 214 | 215 | def forward(self): 216 | # FC = w.dot(x) + b 217 | data = self._input[0]._value.reshape(self._input[0].shape[0], -1) 218 | self._value = np.dot(data, self._input[1]._value) + self._input[2]._value 219 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 220 | 221 | def backward(self): 222 | self._input[0]._gradient += self._gradient.dot(self._input[1]._value.T).reshape(self._input[0]._gradient.shape) 223 | data = self._input[0]._value.reshape(self._input[0].shape[0], -1) 224 | self._input[1]._gradient += data.T.dot(self._gradient) 225 | self._input[2]._gradient += self._gradient.sum(axis=0) 226 | 227 | class Affine(FC): 228 | def __init__(self, input, w, b): 229 | shp = input.shape 230 | self._input = [input, w, b] 231 | self._value = np.empty((shp[0], b.size), dtype=DTYPE) 232 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 233 | 234 | class Conv(Op): 235 | def __init__(self, input, nfilters, window=5, stride=1): 236 | shp = input.shape 237 | w = Param.randn((nfilters, window, window, shp[3])) 238 | b = Param.zeros((nfilters)) 239 | self._input = [input, w, b] 240 | self._window = window 241 | self._nfilters = nfilters 242 | self._stride = stride 243 | self._value = np.empty((shp[0], shp[1], shp[2], nfilters), dtype=DTYPE) 244 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 245 | 246 | def forward(self): 247 | n = self._input[0].shape[0] 248 | shp = self.shape 249 | 250 | # Reshape images to (count, channels, height, width), then apply im2col 251 | im = self._input[0]._value.transpose((0, 3, 1, 2)) 252 | self._col = im2col(im, self._window, self._window, (self._window - 1) / 2, self._stride) 253 | 254 | # Now that all the windows are in matrix form, calculate w.dot(col) + b 255 | w = self._input[1]._value.reshape(self._nfilters, -1) 256 | b = self._input[2]._value.reshape(-1, 1) 257 | self._value = np.dot(w, self._col) + b 258 | 259 | # Reshape result from (nfilters, -1) to (count, height, width, nfilters) 260 | self._value = self._value.reshape(shp[3], n, shp[1], shp[2]).transpose((1, 2, 3, 0)) 261 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 262 | 263 | def backward(self): 264 | # Reshape gradient to (nfilters, -1) and back-propagate through the dot product 265 | gradient = self._gradient.transpose((3, 0, 1, 2)).reshape(self._nfilters, -1) 266 | self._input[1]._gradient += gradient.dot(self._col.T).reshape(self._input[1]._gradient.shape) 267 | self._input[2]._gradient += gradient.sum(axis=1) 268 | 269 | # The gradient w.r.t the images is similar, but we need to aggregate the results over the windows 270 | w = self._input[1]._value.reshape(self._nfilters, -1) 271 | shp = self._input[0].shape 272 | imgradient = col2im(w.T.dot(gradient), shp[0], shp[3], shp[1], shp[2], self._window, self._window, (self._window - 1) / 2, self._stride) 273 | 274 | # Reshape the result back to (count, height, width, channels) 275 | self._input[0]._gradient += imgradient.transpose((0, 2, 3, 1)) 276 | 277 | class Pool(Op): 278 | def __init__(self, input, window=2, stride=2): 279 | shp = input.shape 280 | self._input = [input] 281 | self._window = window 282 | self._stride = stride 283 | self._value = np.empty((shp[0], (shp[1] - window) / stride + 1, (shp[2] - window) / stride + 1, shp[3]), dtype=DTYPE) 284 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 285 | 286 | def forward(self): 287 | n = self._input[0].shape[0] 288 | shp = self.shape 289 | 290 | # Reshape images to (count, channels, height, width), then apply im2col 291 | im = self._input[0]._value.transpose((0, 3, 1, 2)) 292 | col = im2col(im, self._window, self._window, 0, self._stride) 293 | col = col.reshape(self._window * self._window, im.shape[1], -1).transpose((1, 2, 0)) 294 | 295 | # Find the maximum in every window and store its index using a mask 296 | self._mask = col.argmax(axis=2) 297 | self._mask = (self._mask[:, :, np.newaxis] == np.arange(self._window * self._window)) 298 | self._value = col[self._mask].reshape(shp[3], n, shp[1], shp[2]).transpose((1, 2, 3, 0)) 299 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 300 | 301 | def backward(self): 302 | shp = self._input[0].shape 303 | 304 | # The gradient is calculate using the mask indices then aggregated over the windows 305 | gradient = self._gradient.transpose((3, 0, 1, 2)).reshape(1, shp[3], -1) 306 | col = (self._mask.transpose((2, 0, 1)) * gradient).reshape(-1, gradient.shape[2]) 307 | imgradient = col2im(col, shp[0], shp[3], shp[1], shp[2], self._window, self._window, 0, self._stride) 308 | 309 | # Reshape the result back to (count, height, width, channels) 310 | self._input[0]._gradient += imgradient.transpose((0, 2, 3, 1)) 311 | 312 | class ScalarMul(Op): 313 | def __init__(self, input, scalar=1): 314 | self._input = [input] 315 | self._scalar = scalar 316 | self._value = np.empty((input.shape), dtype=DTYPE) 317 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 318 | 319 | def forward(self): 320 | self._value = self._input[0]._value * self._scalar 321 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 322 | 323 | def backward(self): 324 | self._input[0]._gradient += self._gradient * self._scalar 325 | 326 | class ScalarPow(Op): 327 | def __init__(self, input, scalar=1): 328 | self._input = [input] 329 | self._scalar = scalar 330 | self._value = np.empty((input.shape), dtype=DTYPE) 331 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 332 | 333 | def forward(self): 334 | self._temp = self._input[0]._value**(self._scalar - 1) 335 | self._value = self._temp * self._input[0]._value 336 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 337 | 338 | def backward(self): 339 | self._input[0]._gradient += self._scalar * self._temp * self._gradient 340 | 341 | class Max(Op): 342 | def __init__(self, input1, input2): 343 | assert input1.ndim == input2.ndim 344 | for i in np.arange(input1.ndim): 345 | assert input1.shape[i] == input2.shape[i] 346 | self._input = [input1, input2] 347 | self._value = np.empty((input1.shape), dtype=DTYPE) 348 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 349 | 350 | def forward(self): 351 | self._mask = self._input[0]._value > self._input[1]._value 352 | self._value = np.where(self._mask, self._input[0]._value, self._input[1]._value) 353 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 354 | 355 | def backward(self): 356 | self._input[0]._gradient += np.where(self._mask, self._gradient, 0) 357 | self._input[1]._gradient += np.where(self._mask, 0, self._gradient) 358 | 359 | class Relu(Op): 360 | def __init__(self, input, leak=0.01): 361 | self._input = [input] 362 | self._value = np.empty((input.shape), dtype=DTYPE) 363 | self._leak = leak 364 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 365 | 366 | def forward(self): 367 | self._mask = self._input[0]._value > 0 368 | self._value = np.where(self._mask, self._input[0]._value, self._leak * self._input[0]._value) 369 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 370 | 371 | def backward(self): 372 | self._input[0]._gradient += np.where(self._mask, self._gradient, self._leak * self._gradient) 373 | 374 | class Dropout(Op): 375 | def __init__(self, input, prob=0.5): 376 | self._input = [input] 377 | self._value = np.empty((input.shape), dtype=DTYPE) 378 | self._prob = prob 379 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 380 | 381 | def forward(self, disabled=False): 382 | if disabled: 383 | self._value = self._input[0]._value 384 | else: 385 | shp = self._input[0].shape 386 | self._mask = np.random.rand(*shp).astype(DTYPE) < self._prob 387 | self._value = np.where(self._mask, self._input[0]._value / self._prob, 0) 388 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 389 | 390 | def backward(self, disabled=False): 391 | if disabled: 392 | self._input[0]._gradient += self._gradient 393 | else: 394 | self._input[0]._gradient += np.where(self._mask, self._gradient / self._prob, 0) 395 | 396 | class Dot(Op): 397 | def __init__(self, input1, input2): 398 | assert input1.ndim == 2 and input2.ndim == 2 399 | assert input1.shape[1] == input2.shape[0] 400 | self._input = [input1, input2] 401 | self._value = np.empty((input1.shape[0], input[2].shape[1]), dtype=DTYPE) 402 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 403 | 404 | def forward(self): 405 | self._value = np.dot(self._input[0]._value, self._input[1]._value) 406 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 407 | 408 | def backward(self): 409 | self._input[0]._gradient += self._gradient.dot(self._input[1]._value.T) 410 | self._input[1]._gradient += self._input[0]._value.T.dot(self._gradient) 411 | 412 | class Mul(Op): 413 | def __init__(self, input1, input2): 414 | self._input = [input1, input2] 415 | self._value = np.empty((input1.shape), dtype=DTYPE) 416 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 417 | 418 | def forward(self): 419 | self._value = self._input[0]._value * self._input[1]._value 420 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 421 | 422 | def backward(self): 423 | self._input[0]._gradient += self._gradient * self._input[1]._value 424 | self._input[1]._gradient += self._gradient * self._input[0]._value 425 | 426 | class Add(Op): 427 | def __init__(self, input1, input2): 428 | self._input = [input1, input2] 429 | self._value = np.empty((input1.shape), dtype=DTYPE) 430 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 431 | 432 | def forward(self): 433 | self._value = self._input[0]._value + self._input[1]._value 434 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 435 | 436 | def backward(self): 437 | self._input[0]._gradient += self._gradient 438 | self._input[1]._gradient += self._gradient 439 | 440 | class Concat(Op): 441 | def __init__(self, input1, input2): 442 | self._input = [input1, input2] 443 | self._value = np.empty((input1.shape[0], input1.shape[1] + input2.shape[1]), dtype=DTYPE) 444 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 445 | 446 | def forward(self): 447 | self._value = np.c_[self._input[0]._value.reshape(self._input[0].shape[0], -1), self._input[1]._value.reshape(self._input[1].shape[0], -1)] 448 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 449 | 450 | def backward(self): 451 | k = np.prod(self._input[0].shape[1:]) 452 | self._input[0]._gradient += self._gradient[:, :k].reshape(self._input[0]._gradient.shape) 453 | self._input[1]._gradient += self._gradient[:, k:].reshape(self._input[1]._gradient.shape) 454 | 455 | class Neg(Op): 456 | def __init__(self, input): 457 | self._input = [input] 458 | self._value = np.empty((input.shape), dtype=DTYPE) 459 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 460 | 461 | def forward(self): 462 | self._value = -self._input[0]._value 463 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 464 | 465 | def backward(self): 466 | self._input[0]._gradient -= self._gradient 467 | 468 | class Abs(Op): 469 | def __init__(self, input): 470 | self._input = [input] 471 | self._value = np.empty((input.shape), dtype=DTYPE) 472 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 473 | 474 | def forward(self): 475 | self._value = np.abs(self._input[0]._value) 476 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 477 | 478 | def backward(self): 479 | self._input[0]._gradient += self._gradient * np.sign(self._input[0]._value) 480 | 481 | class Trans(Op): 482 | def __init__(self, input): 483 | self._input = [input] 484 | self._value = np.empty((input.shape), dtype=DTYPE) 485 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 486 | 487 | def forward(self): 488 | self._value = self._input[0]._value.T 489 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 490 | 491 | def backward(self): 492 | self._input[0]._gradient += self._gradient.T 493 | 494 | class Sigmoid(Op): 495 | def __init__(self, input): 496 | self._input = [input] 497 | self._value = np.empty((input.shape), dtype=DTYPE) 498 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 499 | 500 | def forward(self): 501 | self._value = numeric.sigmoid(self._input[0]._value) 502 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 503 | 504 | def backward(self): 505 | self._input[0]._gradient += self._value * (1 - self._value) * self._gradient 506 | 507 | class Tanh(Op): 508 | def __init__(self, input): 509 | self._input = [input] 510 | self._value = np.empty((input.shape), dtype=DTYPE) 511 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 512 | 513 | def forward(self): 514 | self._value = 2 * numeric.sigmoid(2 * self._input[0]._value) - 1 # tanh = 2 sig(2x) - 1 515 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 516 | 517 | def backward(self): 518 | self._input[0]._gradient += (1 - self._value * self._value) * self._gradient # 1 - tanh^2 519 | 520 | class Sum(Op): 521 | def __init__(self, input): 522 | self._input = [input] 523 | self._value = np.empty((1,), dtype=DTYPE) 524 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 525 | 526 | def forward(self): 527 | self._value = np.sum(self._input[0]._value).reshape(1) 528 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 529 | 530 | def backward(self): 531 | self._input[0]._gradient += self._gradient 532 | 533 | class Loss(Op): 534 | def __init__(self, input, label): 535 | self._input = [input, label] 536 | self._value = np.empty((1,), dtype=DTYPE) 537 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 538 | 539 | @property 540 | def labels(self): 541 | return self._input[1]._value 542 | 543 | @property 544 | def result(self): 545 | return self._input[1]._result # result from label 546 | 547 | class Label(Node): 548 | def __init__(self): 549 | self._input = [] 550 | self._value = np.zeros(1, dtype=np.int32) 551 | self._result = np.zeros(1, dtype=np.int32) 552 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 553 | 554 | @property 555 | def result(self): 556 | return self._result 557 | 558 | class Xent(Loss): 559 | def forward(self): 560 | # A stable calculation, xent(sigmoid(x)) = (1 - t) + log(1 + exp(-x)) 561 | self._input[1]._result = numeric.sigmoid(self._input[0]._value) 562 | labels = self._input[1]._value 563 | xent = (1 - labels.astype(DTYPE)) * self._input[0]._value + numeric.log1pexp(-self._input[0]._value) 564 | self._value = (np.sum(xent) / DTYPE(xent.size)).reshape(1) 565 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 566 | 567 | def backward(self): 568 | diff = self._input[1]._result - self._input[1]._value 569 | self._input[0]._gradient += self._gradient * diff / DTYPE(diff.size) 570 | 571 | class Softmax(Loss): 572 | def forward(self): 573 | self._input[1]._result = numeric.softmax(self._input[0]._value) 574 | labels = self._input[1]._value 575 | logz = numeric.logsumexp(self._input[0]._value) 576 | value = logz - self._input[0]._value[np.arange(labels.shape[0]), labels.reshape(-1)] 577 | self._value = (np.sum(value) / DTYPE(value.size)).reshape(1) 578 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 579 | 580 | def backward(self): 581 | diff = self._input[1]._result - numeric.onehot(self._input[1]._value, self._input[0].shape[1]) 582 | self._input[0]._gradient += self._gradient * diff / DTYPE(diff.size) 583 | 584 | class Hinge(Loss): 585 | def forward(self): 586 | self._input[1]._result = self._input[0]._value 587 | labels = self._input[1]._value 588 | self._target = (2 * labels - 1) 589 | value = 1 - self._target * self._input[0]._value 590 | self._mask = value > 0 591 | self._value = (np.where(self._mask, value, 0).sum() / DTYPE(value.size)).reshape(1) 592 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 593 | 594 | def backward(self): 595 | self._input[0]._gradient += self._gradient * np.where(self._mask, -self._target, 0) / DTYPE(self._target.size) 596 | 597 | class MultiHinge(Loss): 598 | def forward(self): 599 | self._input[1]._result = self._input[0]._value 600 | all = np.arange(self._input[1]._value.shape[0]) 601 | labels = self._input[1]._value 602 | correct = self._input[0]._value[all, labels.reshape(-1)] 603 | value = self._input[0]._value - correct.reshape(-1, 1) + 1 # y_delta = 1 604 | value[all, labels.reshape(-1)] = 0 # t_delta = 0 605 | self._argmax = np.argmax(value, axis=1) 606 | value = value[all, self._argmax] 607 | self._mask = value > 0 608 | self._value = (np.where(self._mask, value, 0).sum() / DTYPE(value.size)).reshape(1) 609 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 610 | 611 | def backward(self): 612 | all = np.arange(self._input[1]._value.shape[0]) 613 | labels = self._input[1]._value 614 | mask = np.zeros(self._input[0].shape, dtype=DTYPE) 615 | mask[all[self._mask], self._argmax[self._mask]] = 1 616 | mask[all[self._mask], labels.reshape(-1)[self._mask]] = -1 617 | self._input[0]._gradient += self._gradient * mask / DTYPE(mask.size) 618 | 619 | class Squared(Loss): 620 | def forward(self): 621 | self._input[1]._result = self._input[0]._value 622 | self._diff = self._input[1]._value - self._input[0]._value 623 | self._value = (0.5 * np.sum(self._diff**2) / DTYPE(self._diff.size)).reshape(1) 624 | self._gradient = np.zeros(self._value.shape, dtype=DTYPE) 625 | 626 | def backward(self): 627 | self._input[0]._gradient += self._gradient * -self._diff / DTYPE(self._diff.size) 628 | -------------------------------------------------------------------------------- /pydeeplearn/core/net.py: -------------------------------------------------------------------------------- 1 | # Base class for a neural network (or any function composition) 2 | # Works with functions connected in a general DAG (not just a chain) 3 | # Also supports snapshot saving and loading, and exporting to Graphviz dot files 4 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 5 | # License: GPLv2 for non-commercial research purposes only 6 | 7 | import numpy as np 8 | from datetime import datetime 9 | import os, cPickle 10 | from solve import * 11 | from layers import * 12 | 13 | class Net: 14 | def __init__(self, objective, update=RMSprop(), step=InverseDecay(), name='net', root_dir=None): 15 | self.setup(update, step, name, root_dir) 16 | 17 | self._obj = objective 18 | self.process_objective() 19 | 20 | def setup(self, update=RMSprop(), step=InverseDecay(), name='net', root_dir=None): 21 | self._update = update 22 | self._step = step 23 | self._epoch = 0 24 | self._iter = 0 25 | self._cwd = root_dir if root_dir is not None else os.getcwd() 26 | self._name = name 27 | 28 | def process_objective(self): 29 | self._params = set() 30 | self._labels = set() 31 | self._data = set() 32 | 33 | self.toposort() 34 | 35 | self._params = list(self._params) 36 | self._data = list(self._data) 37 | self._labels = list(self._labels) 38 | 39 | @property 40 | def result(self): 41 | return np.concatenate([x.result for x in self._labels]) 42 | 43 | @property 44 | def groundtruth(self): 45 | return np.concatenate([x.value for x in self._labels]) 46 | 47 | @property 48 | def value(self): 49 | return self._obj.value 50 | 51 | def toposort(self): 52 | # Recursive depth-first DAG topological sort (from one sink node) 53 | visited = set() 54 | self._sorted_nodes = [] 55 | self._toposort(self._obj, visited) 56 | 57 | def _toposort(self, node, visited): 58 | visited.add(node) 59 | 60 | # Also, collect param, data, and label nodes 61 | if isinstance(node, Param): 62 | self._params.add(node) 63 | elif isinstance(node, Data): 64 | self._data.add(node) 65 | elif isinstance(node, Label): 66 | self._labels.add(node) 67 | 68 | for x in node.input: 69 | if not x in visited: 70 | self._toposort(x, visited) 71 | self._sorted_nodes.append(node) 72 | 73 | def forward(self, deterministic=False): 74 | for node in self._sorted_nodes: 75 | if isinstance(node, Dropout) or isinstance(node, Preprocess): 76 | node.forward(disabled=deterministic) 77 | else: 78 | node.forward() 79 | 80 | def backward(self, deterministic=False): 81 | self._obj._gradient[:] = 1 82 | 83 | for node in reversed(self._sorted_nodes): 84 | if isinstance(node, Dropout) or isinstance(node, Preprocess): 85 | node.backward(disabled=deterministic) 86 | else: 87 | node.backward() 88 | 89 | def print_progress(self): 90 | gnorm = np.linalg.norm(np.concatenate([p.gradient.flatten() for p in self._params])) 91 | ts = datetime.now().strftime('%b-%d %I:%M:%S %p') 92 | print '%s epoch: %d, iter: %d, cost: %.3f, gnorm: %.3f' % (ts, self._epoch + 1, self._iter + 1, self.value[0], gnorm) 93 | 94 | def to_dot(self, filename): 95 | node_index = {} 96 | for i in np.arange(len(self._sorted_nodes)): 97 | node_index[self._sorted_nodes[i]] = i 98 | 99 | f = open(filename, 'w') 100 | f.write('digraph graphname {\n') 101 | for node in self._sorted_nodes: 102 | f.write('%d [label="%s"];\n' % (node_index[node], str(node))) 103 | if isinstance(node, Op): 104 | f.write('%d [shape=box];\n' % (node_index[node])); 105 | for node2 in self._sorted_nodes: 106 | for node1 in node2.input: 107 | f.write('%d -> %d;\n' % (node_index[node1], node_index[node2])) 108 | f.write('}\n') 109 | f.close() 110 | 111 | @staticmethod 112 | def save(net, filename): 113 | f = open(filename, 'wb') 114 | net._iter += 1 115 | cPickle.dump(net, f) 116 | net._iter -= 1 117 | f.close() 118 | 119 | @staticmethod 120 | def load(filename): 121 | f = open(filename, 'rb') 122 | net = cPickle.load(f) 123 | net._cwd = os.path.dirname(filename) 124 | f.close() 125 | return net 126 | 127 | def get_random_indices(self, n, batchsize): 128 | extra = (-(n % batchsize)) % batchsize 129 | idx = np.random.permutation(n + extra) # randomly permute the samples 130 | idx %= n # exactly divides batchsize 131 | return idx 132 | 133 | def gradient_check(self, h=1e-5): 134 | numerical = [np.empty(param.gradient.shape, dtype=param.dtype) for param in self._params] 135 | analytic = [] 136 | 137 | for i in np.arange(len(self._params)): 138 | param = self._params[i] 139 | for d in np.arange(param.value.size): 140 | param._value.flat[d] += h # set to f(x + h) 141 | f1 = self.forward(deterministic=True) 142 | param._value.flat[d] -= 2 * h # set to f(x - h) 143 | f2 = self.forward(deterministic=True) 144 | param._value.flat[d] += h # reset 145 | numerical[i].flat[d] = (f1 - f2) / (2 * h) 146 | self.forward(deterministic=True) 147 | self.backward(deterministic=True) 148 | analytic.append(param.gradient) # analytic gradient f'(x) 149 | 150 | numerical = np.concatenate([x.flatten() for x in numerical]) 151 | analytic = np.concatenate([x.flatten() for x in analytic]) 152 | return np.abs(numerical - analytic) / (np.abs(numerical) + np.abs(analytic)) 153 | -------------------------------------------------------------------------------- /pydeeplearn/core/numeric.py: -------------------------------------------------------------------------------- 1 | # Numerically stable routines, and some other helper functions 2 | # Guarded against loss of significance and/or catastrophic cancellation 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | 8 | def eps(dtype=np.float32): 9 | return np.finfo(dtype).eps 10 | 11 | def root(x, n): # the positive real nth root of x 12 | r = np.roots([1] + [0]*(n - 1) + [-x]) 13 | r = np.real(r)[(np.imag(r) == 0) & (r > 0)] 14 | return r[0] 15 | 16 | def sigmoid(x): # 1 / (1 + exp(-x)) 17 | mask = x >= 0 18 | result = np.empty(x.shape, dtype=x.dtype) 19 | result[mask] = 1 / (1 + np.exp(-x[mask])) 20 | z = np.exp(x[~mask]) 21 | result[~mask] = z / (1 + z) 22 | return result 23 | 24 | def log1p(x): # log(1 + x) 25 | mask = np.abs(x) < 0.0070981273157505465 # root(3 * eps(), 3), 2nd order Taylor approx. 26 | result = np.empty(x.shape, dtype=x.dtype) 27 | xmask = x[mask] 28 | result[mask] = xmask - xmask*xmask / 2 # log(1 + x) = x - x^2 / 2 for small x 29 | result[~mask] = np.log(1 + x[~mask]) 30 | return result 31 | 32 | def expm1(x): # exp(x) - 1 33 | mask = np.abs(x) < 0.0070981273157505465 # root(3 * eps(), 3), 2nd order Taylor approx. 34 | result = np.empty(x.shape, dtype=x.dtype) 35 | xmask = x[mask] 36 | result[mask] = xmask + xmask*xmask / 2 # exp(x) - 1 = x + x^2 / 2 for small x 37 | result[~mask] = np.exp(x[~mask]) - 1 38 | return result 39 | 40 | def log1pexp(x): # log(1 + exp(x)) 41 | mask = x >= 0 42 | result = np.empty(x.shape, dtype=x.dtype) 43 | result[mask] = x[mask] + log1p(np.exp(-x[mask])) 44 | result[~mask] = log1p(np.exp(x[~mask])) 45 | return result 46 | 47 | def logsumexp(x): # log(sum(exp(x_i))) 48 | a = x.max(axis=1).reshape(-1, 1) 49 | return np.log(np.sum(np.exp(x - a), axis=1)) + a.reshape(-1) 50 | 51 | def softmax(x): # exp(x_c) / sum(exp(x_i)) 52 | result = np.exp(x - x.max(axis=1).reshape(-1, 1)) 53 | z = result.sum(axis=1).reshape(-1, 1) 54 | return result / z 55 | 56 | def onehot(labels, C): 57 | result = np.zeros((labels.size, C), dtype=labels.dtype) 58 | result[np.arange(labels.size), labels.reshape(-1)] = 1 59 | return result 60 | -------------------------------------------------------------------------------- /pydeeplearn/core/solve.py: -------------------------------------------------------------------------------- 1 | # Stochastic gradient descent support 2 | # Updates included are vanilla, momentum, Nesterov's, Adagrad, and RMSprop 3 | # Also supports fixed and inverse step decays 4 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 5 | # License: GPLv2 for non-commercial research purposes only 6 | 7 | import numpy as np 8 | 9 | class ParamDict(dict): 10 | def __missing__(self, param): 11 | self[param] = np.zeros(param.value.shape, dtype=param.dtype) 12 | return self[param] 13 | 14 | class Step: 15 | def __init__(self, alpha0): 16 | self._alpha0 = alpha0 17 | 18 | def get(self): 19 | return self._alpha0 20 | 21 | class FixedDecay(Step): 22 | def __init__(self, alpha0=0.01, niter=100, gamma=0.95): 23 | self._alpha0 = alpha0 24 | self._niter = niter 25 | self._gamma = gamma 26 | 27 | def get(self, t): 28 | return self._alpha0 * self._gamma**int(t / self._niter) 29 | 30 | class InverseDecay(Step): 31 | def __init__(self, alpha0=0.01, gamma=0.01, degree=1.0): 32 | self._alpha0 = alpha0 33 | self._gamma = gamma 34 | self._degree = degree 35 | 36 | def get(self, t): 37 | return self._alpha0 / (1 + self._gamma * t)**self._degree 38 | 39 | class ExponentialDecay(Step): 40 | def __init__(self, alpha0=0.01, gamma=0.0005): 41 | self._alpha0 = alpha0 42 | self._gamma = gamma 43 | 44 | def get(self, t): 45 | return self._alpha0 * np.exp(-self._gamma * t) 46 | 47 | class Update: # Vanilla SGD (no momentum) 48 | def apply(self, params, rate): 49 | for param in params: 50 | param._value += -rate * param.gradient 51 | 52 | class NAG(Update): 53 | def __init__(self, momentum=0.9): 54 | self._momentum = momentum 55 | self._ahead_momentum = momentum 56 | self._updates = ParamDict() 57 | 58 | def apply(self, params, rate): 59 | for param in params: 60 | old_update = self._updates[param] 61 | self._updates[param] = self._momentum * old_update - rate * self._param.gradient 62 | param._value += -self._ahead_momentum * old_update + (1 + self._ahead_momentum) * self._updates[param] 63 | 64 | class SGD(NAG): 65 | def __init__(self, momentum=0.9): 66 | self._momentum = momentum 67 | self._ahead_momentum = 0 68 | self._updates = ParamDict() 69 | 70 | class RMSprop(Update): 71 | def __init__(self, cache_decay=0.99): 72 | self._cache_weight = cache_decay 73 | self._gradient_weight = 1 - cache_decay 74 | self._cache = ParamDict() 75 | 76 | def apply(self, params, rate): 77 | for param in params: 78 | self._cache[param] = self._cache_weight * self._cache[param] + self._gradient_weight * param.gradient**2 79 | param._value += -rate * param.gradient / (np.sqrt(self._cache[param]) + 1e-6) 80 | 81 | class Adagrad(RMSprop): 82 | def __init__(self): 83 | self._cache_weight = 1 84 | self._gradient_weight = 1 85 | self._cache = ParamDict() 86 | -------------------------------------------------------------------------------- /pydeeplearn/image/__init__.py: -------------------------------------------------------------------------------- 1 | from image import * 2 | __all__ = ['image'] 3 | -------------------------------------------------------------------------------- /pydeeplearn/image/image.pyx: -------------------------------------------------------------------------------- 1 | # Cythonized functions: im2col, col2im, transform, invtransform 2 | # Started from Andrej Karpathy's code, made faster by avoiding np.pad 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | cimport numpy as np 8 | cimport cython 9 | 10 | DTYPE = np.float32 11 | ctypedef np.float32_t DTYPE_t 12 | 13 | @cython.boundscheck(False) 14 | @cython.wraparound(False) 15 | def im2col(np.ndarray[DTYPE_t, ndim=4] im, int filterH, int filterW, int padding, int stride): 16 | cdef int N = im.shape[0], C = im.shape[1], H = im.shape[2], W = im.shape[3] 17 | cdef np.ndarray[DTYPE_t, ndim=4] im_padded = np.empty((N, C, H + 2 * padding, W + 2 * padding), dtype=DTYPE) 18 | if padding > 0: 19 | im_padded[:, :, padding:-padding, padding:-padding] = im 20 | im_padded[:, :, :padding, :] = 0 21 | im_padded[:, :, -padding:, :] = 0 22 | im_padded[:, :, :, :padding] = 0 23 | im_padded[:, :, :, -padding:] = 0 24 | else: 25 | im_padded[:] = im 26 | 27 | cdef int newH = (H + 2 * padding - filterH) / stride + 1 28 | cdef int newW = (W + 2 * padding - filterW) / stride + 1 29 | 30 | cdef np.ndarray[DTYPE_t, ndim=2] col = np.empty((C * filterH * filterW, N * newH * newW), dtype=DTYPE) 31 | cdef int i, hj, wj, ci, hi, wi, c, r 32 | 33 | for hi in range(filterH): 34 | for wi in range(filterW): 35 | for ci in range(C): 36 | r = hi * filterW * C + wi * C + ci 37 | for i in range(N): 38 | for hj in range(newH): 39 | for wj in range(newW): 40 | c = i * newH * newH + hj * newW + wj 41 | col[r, c] = im_padded[i, ci, stride * hj + hi, stride * wj + wi] 42 | return col 43 | 44 | @cython.boundscheck(False) 45 | @cython.wraparound(False) 46 | def col2im(np.ndarray[DTYPE_t, ndim=2] col, int N, int C, int H, int W, 47 | int filterH, int filterW, int padding, int stride): 48 | cdef int newH = (H + 2 * padding - filterH) / stride + 1 49 | cdef int newW = (W + 2 * padding - filterW) / stride + 1 50 | 51 | cdef np.ndarray[DTYPE_t, ndim=4] im_padded = np.zeros((N, C, H + 2 * padding, W + 2 * padding), dtype=DTYPE) 52 | cdef int i, hj, wj, ci, hi, wi, c, r 53 | 54 | for ci in range(C): 55 | for hi in range(filterH): 56 | for wi in range(filterW): 57 | r = hi * filterW * C + wi * C + ci 58 | for i in range(N): 59 | for hj in range(newH): 60 | for wj in range(newW): 61 | c = i * newH * newH + hj * newW + wj 62 | im_padded[i, ci, stride * hj + hi, stride * wj + wi] += col[r, c] 63 | 64 | if padding > 0: 65 | return im_padded[:, :, padding:-padding, padding:-padding] 66 | return im_padded 67 | 68 | @cython.boundscheck(False) 69 | @cython.wraparound(False) 70 | def transform(np.ndarray[DTYPE_t, ndim=4] im, np.ndarray[DTYPE_t, ndim=2] A): 71 | cdef int N = im.shape[0], H = im.shape[1], W = im.shape[2], C = im.shape[3] 72 | 73 | cdef np.ndarray[DTYPE_t, ndim=4] transformed = np.empty((N, H, W, C), dtype=DTYPE) 74 | cdef int i, hj, wj, hk, wk, ci, hi, wi 75 | cdef DTYPE_t hin, win, alphaw, alphah 76 | 77 | for hi in range(H): 78 | for wi in range(W): 79 | hin, win = hi - H / 2.0, wi - W / 2.0 80 | alphah = A[0, 0] * hin + A[0, 1] * win + H / 2.0 81 | alphaw = A[1, 0] * hin + A[1, 1] * win + W / 2.0 82 | alphah = 0 if alphah < 0 else H - 1 if alphah >= H else alphah 83 | alphaw = 0 if alphaw < 0 else W - 1 if alphaw >= W else alphaw 84 | 85 | hj, wj = int(alphah), int(alphaw) 86 | hk, wk = hj + 1, wj + 1 87 | hk = 0 if hk < 0 else H - 1 if hk >= H else hk 88 | wk = 0 if wk < 0 else W - 1 if wk >= W else wk 89 | 90 | alphah, alphaw = 1 - (alphah - hj), 1 - (alphaw - wj) 91 | 92 | for i in range(N): 93 | for ci in range(C): 94 | transformed[i, hi, wi, ci] = alphah * alphaw * im[i, hj, wj, ci] +\ 95 | alphah * (1 - alphaw) * im[i, hj, wk, ci] +\ 96 | (1 - alphah) * alphaw * im[i, hk, wj, ci] +\ 97 | (1 - alphah) * (1 - alphaw) * im[i, hk, wk, ci] 98 | return transformed 99 | 100 | @cython.boundscheck(False) 101 | @cython.wraparound(False) 102 | def invtransform(np.ndarray[DTYPE_t, ndim=4] transformed, np.ndarray[DTYPE_t, ndim=2] A): 103 | cdef int N = transformed.shape[0], H = transformed.shape[1], W = transformed.shape[2], C = transformed.shape[3] 104 | 105 | cdef np.ndarray[DTYPE_t, ndim=4] im = np.zeros((N, H, W, C), dtype=DTYPE) 106 | cdef int i, hj, wj, hk, wk, ci, hi, wi 107 | cdef DTYPE_t hin, win, alphaw, alphah 108 | 109 | for hi in range(H): 110 | for wi in range(W): 111 | hin, win = hi - H / 2.0, wi - W / 2.0 112 | alphah = A[0, 0] * hin + A[0, 1] * win + H / 2.0 113 | alphaw = A[1, 0] * hin + A[1, 1] * win + W / 2.0 114 | 115 | if alphah >= 0 and alphah < H and alphaw >= 0 and alphaw < W: 116 | hj, wj = int(alphah), int(alphaw) 117 | hk, wk = hj + 1, wj + 1 118 | hk = 0 if hk < 0 else H - 1 if hk >= H else hk 119 | wk = 0 if wk < 0 else W - 1 if wk >= W else wk 120 | 121 | alphah, alphaw = 1 - (alphah - hj), 1 - (alphaw - wj) 122 | 123 | for i in range(N): 124 | for ci in range(C): 125 | im[i, hj, wj, ci] += alphah * alphaw * transformed[i, hi, wi, ci] 126 | im[i, hj, wk, ci] += alphah * (1 - alphaw) * transformed[i, hi, wi, ci] 127 | im[i, hk, wj, ci] += (1 - alphah) * alphaw * transformed[i, hi, wi, ci] 128 | im[i, hk, wk, ci] += (1 - alphah) * (1 - alphaw) * transformed[i, hi, wi, ci] 129 | return im 130 | -------------------------------------------------------------------------------- /pydeeplearn/image/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | from distutils.extension import Extension 3 | from Cython.Distutils import build_ext 4 | import numpy as np 5 | 6 | setup( 7 | cmdclass = {'build_ext': build_ext}, 8 | ext_modules = [Extension('image', ["image.pyx"], language = 'c++', 9 | include_dirs = [np.get_include()])] 10 | ) 11 | -------------------------------------------------------------------------------- /pydeeplearn/net/__init__.py: -------------------------------------------------------------------------------- 1 | import cnn, rnn 2 | __all__ = ['cnn', 'rnn'] 3 | -------------------------------------------------------------------------------- /pydeeplearn/net/cnn.py: -------------------------------------------------------------------------------- 1 | # Convolutional Neural Network (CNN) 2 | # Objective function (or network structure) is fixed and given as input 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | from ..core.net import * 8 | 9 | # TODO: multiple data / labels, data augmentations, deconv layer 10 | class CNN(Net): 11 | def set_input(self, data, labels=None): 12 | # CNNs assumed to have one data node and one label node for now 13 | self._data[0]._value = data if data.ndim == 4 else data[np.newaxis] 14 | self._labels[0]._value = labels if labels is not None else np.zeros(self._data[0].shape[0], dtype=np.int32) 15 | 16 | def predict(self, data, batchsize=100): 17 | labels = [] 18 | if data.ndim < 4: data = data[np.newaxis] 19 | 20 | for i in np.arange(0, data.shape[0], batchsize): 21 | # set the data and the labels 22 | self.set_input(data[i:i + batchsize]) 23 | 24 | # calculate result and add to list 25 | self.forward(deterministic=True) 26 | labels.append(self.result) 27 | return np.concatenate(labels) 28 | 29 | def train(self, data, labels, batchsize=100, epochs=10, progress=10, snapshot=-1): 30 | if batchsize > data.shape[0]: batchsize = data.shape[0] 31 | 32 | for e in np.arange(epochs): 33 | idx = self.get_random_indices(data.shape[0], batchsize) 34 | 35 | for i in np.arange(0, idx.size, batchsize): # for each batch in this epoch 36 | # set the input to the current batch 37 | batchidx = idx[i:i + batchsize] 38 | self.set_input(data[batchidx], labels[batchidx]) 39 | 40 | # calculate f(x) and f'(x) 41 | self.forward() 42 | self.backward() 43 | 44 | # calculate the updates and apply then 45 | alpha = self._step.get(self._iter) 46 | self._update.apply(self._params, alpha) 47 | 48 | if progress > 0 and (self._iter + 1) % progress == 0: 49 | self.print_progress() 50 | if snapshot > 0 and (self._iter + 1) % snapshot == 0: 51 | filename = os.path.join(self._cwd, '%s_%012d.pkl' % (self._name, self._iter + 1)) 52 | Net.save(self, filename) 53 | 54 | self._iter += 1 55 | self._epoch += 1 56 | -------------------------------------------------------------------------------- /pydeeplearn/net/rnn.py: -------------------------------------------------------------------------------- 1 | # Recursive Neural Network (RNN) 2 | # Objective function is varying w.r.t. the input sentence / dependency tree 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | from ..core.net import * 8 | 9 | class RNN(Net): 10 | def __init__(self, wordvecs, lambdaa=1e-3, update=RMSprop(), step=InverseDecay(), name='net', root_dir=None): 11 | self.setup(update, step, name, root_dir) 12 | 13 | # The wordvecs object is used to initialize word representations to glove/word2vec 14 | d = wordvecs._d 15 | self._wordvecs = wordvecs 16 | self._w = Param(np.r_[np.eye(d), np.eye(d)] * 0.5 + np.random.randn(2 * d, d) * 0.01) 17 | self._b = Param.zeros((d,)) 18 | self._wscore = Param.randn((d, 5)) 19 | self._bscore = Param.zeros((5,)) 20 | self._words = {} 21 | self._lambdaa = lambdaa 22 | 23 | def _get_param(self, word): 24 | word_lower = word.lower() 25 | if word_lower not in self._words: 26 | wordvec = self._wordvecs.get_vector(word_lower).reshape(1, -1) 27 | self._words[word_lower] = Param(wordvec) 28 | return self._words[word_lower] 29 | 30 | def parse(self, trees): 31 | if not isinstance(trees, list): 32 | trees = [trees] 33 | 34 | loss_weight = 1.0 / np.sum([tree.size for tree in trees]) 35 | self._obj = self._lambdaa * (Sum(self._w**2) + Sum(self._wscore**2)) 36 | for tree in trees: 37 | self._obj = self._obj + loss_weight * self._parse(tree) 38 | 39 | self.process_objective() 40 | 41 | def _parse(self, tree): 42 | s1 = [tree] # pre-order stack 43 | s2 = [] # post-order stack 44 | 45 | while len(s1) > 0: 46 | node = s1.pop() 47 | if node.left is not None: # and node.right is not None 48 | s1.append(node.left) 49 | s1.append(node.right) 50 | s2.append(node) 51 | 52 | expr = {} 53 | obj = None 54 | 55 | while len(s2) > 0: 56 | node = s2.pop() 57 | if node.left is not None: # and node.right is not None 58 | expr[node] = Tanh(Affine(Concat(expr[node.left], expr[node.right]), self._w, self._b)) 59 | else: 60 | expr[node] = self._get_param(node.value) 61 | 62 | label = Label() 63 | label._value = np.array([int(node.label)]) 64 | if obj is None: 65 | obj = Softmax(Affine(expr[node], self._wscore, self._bscore), label) 66 | else: 67 | obj = obj + Softmax(Affine(expr[node], self._wscore, self._bscore), label) 68 | 69 | return obj 70 | 71 | def predict(self, trees): 72 | labels = [] 73 | gt = [] 74 | 75 | for i in np.arange(len(trees)): 76 | # set the data and the labels 77 | self.parse(trees[i]) 78 | 79 | # calculate result and add to list 80 | self.forward() 81 | labels.append(self.result) 82 | gt.append(self.groundtruth) 83 | 84 | return (np.concatenate(labels), np.concatenate(gt)) 85 | 86 | def train(self, trees, batchsize=100, epochs=10, progress=10, snapshot=-1): 87 | if batchsize > len(trees): batchsize = len(trees) 88 | 89 | for e in np.arange(epochs): 90 | idx = self.get_random_indices(len(trees), batchsize) 91 | 92 | for i in np.arange(0, idx.size, batchsize): # for each batch in this epoch 93 | # set the input to the current batch 94 | self.parse(trees[i: i + batchsize]) 95 | 96 | # calculate f(x) and f'(x) 97 | self.forward() 98 | self.backward() 99 | 100 | # calculate the updates and apply then 101 | alpha = self._step.get(self._iter) 102 | self._update.apply([p for p in self._params if not p._fixed], alpha) 103 | 104 | if progress > 0 and (self._iter + 1) % progress == 0: 105 | self.print_progress() 106 | if snapshot > 0 and (self._iter + 1) % snapshot == 0: 107 | filename = os.path.join(self._cwd, '%s_%012d.pkl' % (self._name, self._iter + 1)) 108 | Net.save(self, filename) 109 | 110 | self._iter += 1 111 | self._epoch += 1 112 | -------------------------------------------------------------------------------- /pydeeplearn/nlp/__init__.py: -------------------------------------------------------------------------------- 1 | import ptbtree, wordvectors 2 | __all__ = ['ptbtree', 'wordvectors'] 3 | -------------------------------------------------------------------------------- /pydeeplearn/nlp/ptbtree.py: -------------------------------------------------------------------------------- 1 | # PTB reader (Penn Treebank Project) 2 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 3 | # License: GPLv2 for non-commercial research purposes only 4 | 5 | class PTBTree: 6 | def __init__(self, value, label, left, right, size): 7 | self._value = value 8 | self._label = label 9 | self._left = left 10 | self._right = right 11 | self._size = size 12 | 13 | def _shifted_str(self, level=0): 14 | s = ('%02d: ' % level) + '_' * (level * 4) 15 | if self._left is not None: # and self._right is not None 16 | s += ' : %s\n' % (self._label) 17 | s += self._left._shifted_str(level + 1) 18 | s += self._right._shifted_str(level + 1) 19 | else: 20 | s += '%s : %s\n' % (self._value, self._label) 21 | return s 22 | 23 | def __str__(self): 24 | return self._shifted_str(0) 25 | 26 | def get_words(self, l = []): 27 | if self._left is not None: # and self._right is not None 28 | self._left.get_words(l) 29 | self._right.get_words(l) 30 | else: 31 | l.append(self._value) 32 | return l 33 | 34 | @property 35 | def left(self): 36 | return self._left 37 | 38 | @property 39 | def right(self): 40 | return self._right 41 | 42 | @property 43 | def value(self): 44 | return self._value 45 | 46 | @property 47 | def label(self): 48 | return self._label 49 | 50 | @property 51 | def size(self): 52 | return self._size 53 | 54 | @staticmethod 55 | def parse(s): 56 | if len(s) == 0: 57 | return None 58 | return PTBTree._parse(s)[1] 59 | 60 | @staticmethod 61 | def _parse(s, i=0): 62 | j = s.find(' ', i) 63 | label = s[i + 1:j] # skip '(' 64 | 65 | j += 1 66 | if s[j] == '(': 67 | k, left = PTBTree._parse(s, j) 68 | k, right = PTBTree._parse(s, k + 1) # skip ' ' 69 | value = left._value + ' ' + right._value 70 | size = left._size + right._size + 1 71 | else: 72 | k = s.find(')', j) 73 | value = s[j:k] 74 | left = right = None 75 | size = 1 76 | 77 | k += 1 # skip ')' 78 | return k, PTBTree(value, label, left, right, size) 79 | -------------------------------------------------------------------------------- /pydeeplearn/nlp/wordvectors.py: -------------------------------------------------------------------------------- 1 | # Word vectors class, supports projection and analogies 2 | # Reads GloVe format (Pennington et al), easy to extend to word2vec (Mikolov et al) 3 | # Author: Sameh Khamis (sameh@umiacs.umd.edu) 4 | # License: GPLv2 for non-commercial research purposes only 5 | 6 | import numpy as np 7 | import gzip, urllib, os 8 | 9 | class WordVectors: 10 | def __init__(self, d, n): 11 | self._d = d 12 | self._n = n 13 | 14 | self._worddict = {} 15 | self._reversedict = {} 16 | self._wordmatrix = np.random.randn(n, d).astype(np.float32) * 0.001 17 | self._wordnorms = np.linalg.norm(self._wordmatrix, axis=1) 18 | self._unknown = np.mean(self._wordmatrix, axis=0) 19 | 20 | @staticmethod 21 | def from_glove(d=300): 22 | vectorsfile = 'glove.6B.%dd.txt.gz' % d 23 | if not os.path.exists(vectorsfile): 24 | urllib.urlretrieve('http://www-nlp.stanford.edu/data/' + vectorsfile, vectorsfile) 25 | 26 | wordvectors = WordVectors(d, 400000) 27 | i = 0 28 | with gzip.open(vectorsfile, 'r') as f: 29 | for line in f: 30 | idx = line.find(' ') 31 | wordvectors._worddict[line[:idx]] = i 32 | wordvectors._reversedict[i] = line[:idx] 33 | wordvectors._wordmatrix[i] = np.array(line[idx + 1:].split(' '), dtype=np.float32) 34 | i += 1 35 | 36 | wordvectors._wordnorms = np.linalg.norm(wordvectors._wordmatrix, axis=1) 37 | wordvectors._unknown = np.mean(wordvectors._wordmatrix, axis=0) 38 | return wordvectors 39 | 40 | @staticmethod 41 | def from_word2vec(d): 42 | raise Exception("not implemented yet!") 43 | 44 | def project(self, W): 45 | assert W.shape[0] == self._d 46 | self._d = W.shape[1] 47 | self._wordmatrix = self._wordmatrix.dot(W.astype(np.float32)) 48 | self._wordnorms = np.linalg.norm(self._wordmatrix, axis=1) 49 | self._unknown = np.mean(self._wordmatrix, axis=0) 50 | 51 | def get_vector(self, word): 52 | if word not in self._worddict: return self._unknown 53 | idx = self._worddict[word] 54 | return self._wordmatrix[idx] 55 | 56 | def get_analogy(self, worda, wordb, wordc, k=10): 57 | veca = self.get_vector(worda) 58 | vecb = self.get_vector(wordb) 59 | vecc = self.get_vector(wordc) 60 | vecd = vecb - veca + vecc 61 | cossim = np.dot(self._wordmatrix, vecd) / self._wordnorms / np.linalg.norm(vecd) 62 | top10 = np.argsort(cossim)[-k:][::-1] 63 | return [(self._reversedict[i], cossim[i]) for i in top10] 64 | --------------------------------------------------------------------------------