├── .coveragerc ├── .gitignore ├── .pylintrc ├── AUTHORS ├── CHANGES.md ├── CONTRIBUTING ├── CONTRIBUTORS ├── LICENSE.txt ├── README.rst ├── examples └── README.md ├── g3doc ├── api_docs │ └── python │ │ ├── estimators.md │ │ ├── index.md │ │ ├── io.md │ │ ├── models.md │ │ ├── ops.array_ops.md │ │ ├── ops.md │ │ ├── preprocessing.md │ │ └── trainer.md ├── get_started │ └── index.md ├── how_to │ └── index.md ├── images │ ├── text_classification_rnn_graph.png │ └── text_classification_rnn_loss.png └── index.md ├── scripts ├── docs │ ├── docs.py │ ├── gen_docs.sh │ └── gen_docs_combined.py ├── run_tests.sh └── travis_install.sh ├── setup.cfg ├── setup.py └── skflow └── __init__.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | 4 | [report] 5 | omit = 6 | */tests/* 7 | */ops/batch_norm_ops.py 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # vi temp files 2 | .*.swp 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | testenv/ 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *,cover 49 | tmp_saver3/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | 58 | # Sphinx documentation 59 | docs/_build/ 60 | 61 | # PyBuilder 62 | target/ 63 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | ignore=tests 4 | 5 | disable=invalid-name,wildcard-import,too-many-arguments,attribute-defined-outside-init,no-member,too-many-instance-attributes,too-few-public-methods,import-error,super-on-old-class,fixme,protected-access,locally-disabled,cyclic-import,too-many-locals,duplicate-code 6 | 7 | dummy-variables-rgx=(unused|)_.* 8 | 9 | reports=no 10 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | # This is the official list of Scikit Flow authors for copyright purposes. 2 | # This file is distinct from the CONTRIBUTORS files. 3 | # See the latter for an explanation. 4 | 5 | # Names should be added to this file as: 6 | # Name or Organization 7 | # The email address is not required for organizations. 8 | # 9 | # If you have contributed a few PRs, feel free to send a PR adding yourself here. 10 | 11 | Google Inc. 12 | Yuan Tang terrytangyuan@gmail.com 13 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | Scikit Flow Change Log 2 | ================= 3 | 4 | ## Development Version (master branch) 5 | * Performance and session related configurations, e.g. `num_cores`, `gpu_memory_fraction`, can now be wrapped in a ConfigAddon object and then passed into estimator. Example is available. 6 | * Added Monitor support mimicking scikit-learn that allows various monitoring tasks, e.g. loss for a validation set. 7 | * Prediction for multi-class classification in estimator is more memory efficient for large number of classes. 8 | * Various bug fixes: #108, #114, #109 9 | 10 | ## v0.1.0 (Feb 13th, 2016) 11 | 12 | * Initial release 13 | 14 | -------------------------------------------------------------------------------- /CONTRIBUTING: -------------------------------------------------------------------------------- 1 | Want to contribute? Great! First, read this page (including the small print at the end). 2 | 3 | ### Before you contribute 4 | Before we can use your code, you must sign the 5 | [Google Individual Contributor License Agreement] 6 | (https://cla.developers.google.com/about/google-individual) 7 | (CLA), which you can do online. The CLA is necessary mainly because you own the 8 | copyright to your changes, even after your contribution becomes part of our 9 | codebase, so we need your permission to use and distribute your code. We also 10 | need to be sure of various other things—for instance that you'll tell us if you 11 | know that your code infringes on other people's patents. You don't have to sign 12 | the CLA until after you've submitted your code for review and a member has 13 | approved it, but you must do it before we can put your code into our codebase. 14 | Before you start working on a larger contribution, you should get in touch with 15 | us first through the issue tracker with your idea so that we can help out and 16 | possibly guide you. Coordinating up front makes it much easier to avoid 17 | frustration later on. 18 | 19 | ### Code reviews 20 | All submissions, including submissions by project members, require review. We 21 | use Github pull requests for this purpose. 22 | 23 | ### The small print 24 | Contributions made by corporations are covered by a different agreement than 25 | the one above, the 26 | [Software Grant and Corporate Contributor License Agreement] 27 | (https://cla.developers.google.com/about/google-corporate). 28 | 29 | -------------------------------------------------------------------------------- /CONTRIBUTORS: -------------------------------------------------------------------------------- 1 | # People who have agreed to one of the CLAs and can contribute patches. 2 | # The AUTHORS file lists the copyright holders; this file 3 | # lists people. For example, Google employees are listed here 4 | # but not in AUTHORS, because Google holds the copyright. 5 | # 6 | # https://developers.google.com/open-source/cla/individual 7 | # https://developers.google.com/open-source/cla/corporate 8 | # 9 | # Names should be added to this file as: 10 | # Name 11 | # 12 | # If you have contributed a few PRs, feel free to send a PR adding yourself here. 13 | 14 | Illia Polosukhin illia.polosukhin@gmail.com 15 | Yuan Tang terrytangyuan@gmail.com 16 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright 2015-present The Scikit Flow Authors. All rights reserved. 2 | 3 | Apache License 4 | Version 2.0, January 2004 5 | http://www.apache.org/licenses/ 6 | 7 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 8 | 9 | 1. Definitions. 10 | 11 | "License" shall mean the terms and conditions for use, reproduction, 12 | and distribution as defined by Sections 1 through 9 of this document. 13 | 14 | "Licensor" shall mean the copyright owner or entity authorized by 15 | the copyright owner that is granting the License. 16 | 17 | "Legal Entity" shall mean the union of the acting entity and all 18 | other entities that control, are controlled by, or are under common 19 | control with that entity. For the purposes of this definition, 20 | "control" means (i) the power, direct or indirect, to cause the 21 | direction or management of such entity, whether by contract or 22 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 23 | outstanding shares, or (iii) beneficial ownership of such entity. 24 | 25 | "You" (or "Your") shall mean an individual or Legal Entity 26 | exercising permissions granted by this License. 27 | 28 | "Source" form shall mean the preferred form for making modifications, 29 | including but not limited to software source code, documentation 30 | source, and configuration files. 31 | 32 | "Object" form shall mean any form resulting from mechanical 33 | transformation or translation of a Source form, including but 34 | not limited to compiled object code, generated documentation, 35 | and conversions to other media types. 36 | 37 | "Work" shall mean the work of authorship, whether in Source or 38 | Object form, made available under the License, as indicated by a 39 | copyright notice that is included in or attached to the work 40 | (an example is provided in the Appendix below). 41 | 42 | "Derivative Works" shall mean any work, whether in Source or Object 43 | form, that is based on (or derived from) the Work and for which the 44 | editorial revisions, annotations, elaborations, or other modifications 45 | represent, as a whole, an original work of authorship. For the purposes 46 | of this License, Derivative Works shall not include works that remain 47 | separable from, or merely link (or bind by name) to the interfaces of, 48 | the Work and Derivative Works thereof. 49 | 50 | "Contribution" shall mean any work of authorship, including 51 | the original version of the Work and any modifications or additions 52 | to that Work or Derivative Works thereof, that is intentionally 53 | submitted to Licensor for inclusion in the Work by the copyright owner 54 | or by an individual or Legal Entity authorized to submit on behalf of 55 | the copyright owner. For the purposes of this definition, "submitted" 56 | means any form of electronic, verbal, or written communication sent 57 | to the Licensor or its representatives, including but not limited to 58 | communication on electronic mailing lists, source code control systems, 59 | and issue tracking systems that are managed by, or on behalf of, the 60 | Licensor for the purpose of discussing and improving the Work, but 61 | excluding communication that is conspicuously marked or otherwise 62 | designated in writing by the copyright owner as "Not a Contribution." 63 | 64 | "Contributor" shall mean Licensor and any individual or Legal Entity 65 | on behalf of whom a Contribution has been received by Licensor and 66 | subsequently incorporated within the Work. 67 | 68 | 2. Grant of Copyright License. Subject to the terms and conditions of 69 | this License, each Contributor hereby grants to You a perpetual, 70 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 71 | copyright license to reproduce, prepare Derivative Works of, 72 | publicly display, publicly perform, sublicense, and distribute the 73 | Work and such Derivative Works in Source or Object form. 74 | 75 | 3. Grant of Patent License. Subject to the terms and conditions of 76 | this License, each Contributor hereby grants to You a perpetual, 77 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 78 | (except as stated in this section) patent license to make, have made, 79 | use, offer to sell, sell, import, and otherwise transfer the Work, 80 | where such license applies only to those patent claims licensable 81 | by such Contributor that are necessarily infringed by their 82 | Contribution(s) alone or by combination of their Contribution(s) 83 | with the Work to which such Contribution(s) was submitted. If You 84 | institute patent litigation against any entity (including a 85 | cross-claim or counterclaim in a lawsuit) alleging that the Work 86 | or a Contribution incorporated within the Work constitutes direct 87 | or contributory patent infringement, then any patent licenses 88 | granted to You under this License for that Work shall terminate 89 | as of the date such litigation is filed. 90 | 91 | 4. Redistribution. You may reproduce and distribute copies of the 92 | Work or Derivative Works thereof in any medium, with or without 93 | modifications, and in Source or Object form, provided that You 94 | meet the following conditions: 95 | 96 | (a) You must give any other recipients of the Work or 97 | Derivative Works a copy of this License; and 98 | 99 | (b) You must cause any modified files to carry prominent notices 100 | stating that You changed the files; and 101 | 102 | (c) You must retain, in the Source form of any Derivative Works 103 | that You distribute, all copyright, patent, trademark, and 104 | attribution notices from the Source form of the Work, 105 | excluding those notices that do not pertain to any part of 106 | the Derivative Works; and 107 | 108 | (d) If the Work includes a "NOTICE" text file as part of its 109 | distribution, then any Derivative Works that You distribute must 110 | include a readable copy of the attribution notices contained 111 | within such NOTICE file, excluding those notices that do not 112 | pertain to any part of the Derivative Works, in at least one 113 | of the following places: within a NOTICE text file distributed 114 | as part of the Derivative Works; within the Source form or 115 | documentation, if provided along with the Derivative Works; or, 116 | within a display generated by the Derivative Works, if and 117 | wherever such third-party notices normally appear. The contents 118 | of the NOTICE file are for informational purposes only and 119 | do not modify the License. You may add Your own attribution 120 | notices within Derivative Works that You distribute, alongside 121 | or as an addendum to the NOTICE text from the Work, provided 122 | that such additional attribution notices cannot be construed 123 | as modifying the License. 124 | 125 | You may add Your own copyright statement to Your modifications and 126 | may provide additional or different license terms and conditions 127 | for use, reproduction, or distribution of Your modifications, or 128 | for any such Derivative Works as a whole, provided Your use, 129 | reproduction, and distribution of the Work otherwise complies with 130 | the conditions stated in this License. 131 | 132 | 5. Submission of Contributions. Unless You explicitly state otherwise, 133 | any Contribution intentionally submitted for inclusion in the Work 134 | by You to the Licensor shall be under the terms and conditions of 135 | this License, without any additional terms or conditions. 136 | Notwithstanding the above, nothing herein shall supersede or modify 137 | the terms of any separate license agreement you may have executed 138 | with Licensor regarding such Contributions. 139 | 140 | 6. Trademarks. This License does not grant permission to use the trade 141 | names, trademarks, service marks, or product names of the Licensor, 142 | except as required for reasonable and customary use in describing the 143 | origin of the Work and reproducing the content of the NOTICE file. 144 | 145 | 7. Disclaimer of Warranty. Unless required by applicable law or 146 | agreed to in writing, Licensor provides the Work (and each 147 | Contributor provides its Contributions) on an "AS IS" BASIS, 148 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 149 | implied, including, without limitation, any warranties or conditions 150 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 151 | PARTICULAR PURPOSE. You are solely responsible for determining the 152 | appropriateness of using or redistributing the Work and assume any 153 | risks associated with Your exercise of permissions under this License. 154 | 155 | 8. Limitation of Liability. In no event and under no legal theory, 156 | whether in tort (including negligence), contract, or otherwise, 157 | unless required by applicable law (such as deliberate and grossly 158 | negligent acts) or agreed to in writing, shall any Contributor be 159 | liable to You for damages, including any direct, indirect, special, 160 | incidental, or consequential damages of any character arising as a 161 | result of this License or out of the use or inability to use the 162 | Work (including but not limited to damages for loss of goodwill, 163 | work stoppage, computer failure or malfunction, or any and all 164 | other commercial damages or losses), even if such Contributor 165 | has been advised of the possibility of such damages. 166 | 167 | 9. Accepting Warranty or Additional Liability. While redistributing 168 | the Work or Derivative Works thereof, You may choose to offer, 169 | and charge a fee for, acceptance of support, warranty, indemnity, 170 | or other liability obligations and/or rights consistent with this 171 | License. However, in accepting such obligations, You may act only 172 | on Your own behalf and on Your sole responsibility, not on behalf 173 | of any other Contributor, and only if You agree to indemnify, 174 | defend, and hold each Contributor harmless for any liability 175 | incurred by, or claims asserted against, such Contributor by reason 176 | of your accepting any such warranty or additional liability. 177 | 178 | END OF TERMS AND CONDITIONS 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following 183 | boilerplate notice, with the fields enclosed by brackets "[]" 184 | replaced with your own identifying information. (Don't include 185 | the brackets!) The text should be enclosed in the appropriate 186 | comment syntax for the file format. We also recommend that a 187 | file or class name and description of purpose be included on the 188 | same "printed page" as the copyright notice for easier 189 | identification within third-party archives. 190 | 191 | Copyright 2015-present, Scikit Flow Authors. 192 | 193 | Licensed under the Apache License, Version 2.0 (the "License"); 194 | you may not use this file except in compliance with the License. 195 | You may obtain a copy of the License at 196 | 197 | http://www.apache.org/licenses/LICENSE-2.0 198 | 199 | Unless required by applicable law or agreed to in writing, software 200 | distributed under the License is distributed on an "AS IS" BASIS, 201 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 202 | See the License for the specific language governing permissions and 203 | limitations under the License. 204 | 205 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | SkFlow has been moved to Tensorflow. 2 | ==================================== 3 | 4 | SkFlow has been moved to http://github.com/tensorflow/tensorflow into contrib folder specifically located `here `__. 5 | The development will continue there. Please submit any issues and pull requests to Tensorflow repository instead. 6 | 7 | This repository will ramp down, including after next Tensorflow release we will wind down code here. 8 | Please see instructions on most recent installation `here `__. 9 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples has been moved to Tensorflow repo 2 | 3 | You can find all the latest examples in [Tensorflow repo](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/learn) 4 | 5 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/estimators.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # Estimators 6 | [TOC] 7 | 8 | Scikit Flow Estimators. 9 | 10 | ## Other Functions and Classes 11 | - - - 12 | 13 | ### `class skflow.TensorFlowClassifier` {#TensorFlowClassifier} 14 | 15 | TensorFlow Linear Classifier model. 16 | - - - 17 | 18 | #### `skflow.TensorFlowClassifier.__init__(n_classes, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, class_weight=None, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowClassifier.__init__} 19 | 20 | 21 | 22 | 23 | - - - 24 | 25 | #### `skflow.TensorFlowClassifier.bias_` {#TensorFlowClassifier.bias_} 26 | 27 | Returns weights of the linear classifier. 28 | 29 | 30 | - - - 31 | 32 | #### `skflow.TensorFlowClassifier.fit(X, y, monitor=None, logdir=None)` {#TensorFlowClassifier.fit} 33 | 34 | Builds a neural network model given provided `model_fn` and training 35 | data X and y. 36 | 37 | Note: called first time constructs the graph and initializers 38 | variables. Consecutives times it will continue training the same model. 39 | This logic follows partial_fit() interface in scikit-learn. 40 | 41 | To restart learning, create new estimator. 42 | 43 | ##### Args: 44 | 45 | 46 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 47 | iterator that returns arrays of features. The training input 48 | samples for fitting the model. 49 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 50 | iterator that returns array of targets. The training target values 51 | (class labels in classification, real numbers in regression). 52 | * `monitor`: Monitor object to print training progress and invoke early stopping 53 | * `logdir`: the directory to save the log file that can be used for 54 | optional visualization. 55 | 56 | ##### Returns: 57 | 58 | Returns self. 59 | 60 | 61 | - - - 62 | 63 | #### `skflow.TensorFlowClassifier.get_params(deep=True)` {#TensorFlowClassifier.get_params} 64 | 65 | Get parameters for this estimator. 66 | 67 | Parameters 68 | ---------- 69 | deep: boolean, optional 70 | If True, will return the parameters for this estimator and 71 | contained subobjects that are estimators. 72 | 73 | Returns 74 | ------- 75 | params : mapping of string to any 76 | Parameter names mapped to their values. 77 | 78 | 79 | - - - 80 | 81 | #### `skflow.TensorFlowClassifier.get_tensor(name)` {#TensorFlowClassifier.get_tensor} 82 | 83 | Returns tensor by name. 84 | 85 | ##### Args: 86 | 87 | 88 | * `name`: string, name of the tensor. 89 | 90 | ##### Returns: 91 | 92 | Tensor. 93 | 94 | 95 | - - - 96 | 97 | #### `skflow.TensorFlowClassifier.get_tensor_value(name)` {#TensorFlowClassifier.get_tensor_value} 98 | 99 | Returns value of the tensor give by name. 100 | 101 | ##### Args: 102 | 103 | 104 | * `name`: string, name of the tensor. 105 | 106 | ##### Returns: 107 | 108 | Numpy array - value of the tensor. 109 | 110 | 111 | - - - 112 | 113 | #### `skflow.TensorFlowClassifier.partial_fit(X, y)` {#TensorFlowClassifier.partial_fit} 114 | 115 | Incremental fit on a batch of samples. 116 | 117 | This method is expected to be called several times consecutively 118 | on different or the same chunks of the dataset. This either can 119 | implement iterative training or out-of-core/online training. 120 | 121 | This is especially useful when the whole dataset is too big to 122 | fit in memory at the same time. Or when model is taking long time 123 | to converge, and you want to split up training into subparts. 124 | 125 | ##### Args: 126 | 127 | 128 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 129 | iterator that returns arrays of features. The training input 130 | samples for fitting the model. 131 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 132 | iterator that returns array of targets. The training target values 133 | (class label in classification, real numbers in regression). 134 | 135 | ##### Returns: 136 | 137 | Returns self. 138 | 139 | 140 | - - - 141 | 142 | #### `skflow.TensorFlowClassifier.predict(X, axis=1, batch_size=-1)` {#TensorFlowClassifier.predict} 143 | 144 | Predict class or regression for X. 145 | 146 | For a classification model, the predicted class for each sample in X is 147 | returned. For a regression model, the predicted value based on X is 148 | returned. 149 | 150 | ##### Args: 151 | 152 | 153 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 154 | * `axis`: Which axis to argmax for classification. 155 | By default axis 1 (next after batch) is used. 156 | Use 2 for sequence predictions. 157 | * `batch_size`: If test set is too big, use batch size to split 158 | it into mini batches. By default full dataset is used. 159 | 160 | ##### Returns: 161 | 162 | 163 | * `y`: array of shape [n_samples]. The predicted classes or predicted 164 | value. 165 | 166 | 167 | - - - 168 | 169 | #### `skflow.TensorFlowClassifier.predict_proba(X, batch_size=-1)` {#TensorFlowClassifier.predict_proba} 170 | 171 | Predict class probability of the input samples X. 172 | 173 | ##### Args: 174 | 175 | 176 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 177 | * `batch_size`: If test set is too big, use batch size to split 178 | it into mini batches. By default full dataset is used. 179 | 180 | ##### Returns: 181 | 182 | 183 | * `y`: array of shape [n_samples, n_classes]. The predicted 184 | probabilities for each class. 185 | 186 | 187 | - - - 188 | 189 | #### `skflow.TensorFlowClassifier.restore(cls, path, config_addon=None)` {#TensorFlowClassifier.restore} 190 | 191 | Restores model from give path. 192 | 193 | ##### Args: 194 | 195 | 196 | * `path`: Path to the checkpoints and other model information. 197 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 198 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 199 | 200 | ##### Returns: 201 | 202 | Estiamator, object of the subclass of TensorFlowEstimator. 203 | 204 | 205 | - - - 206 | 207 | #### `skflow.TensorFlowClassifier.save(path)` {#TensorFlowClassifier.save} 208 | 209 | Saves checkpoints and graph to given path. 210 | 211 | ##### Args: 212 | 213 | 214 | * `path`: Folder to save model to. 215 | 216 | 217 | - - - 218 | 219 | #### `skflow.TensorFlowClassifier.score(X, y, sample_weight=None)` {#TensorFlowClassifier.score} 220 | 221 | Returns the mean accuracy on the given test data and labels. 222 | 223 | In multi-label classification, this is the subset accuracy 224 | which is a harsh metric since you require for each sample that 225 | each label set be correctly predicted. 226 | 227 | Parameters 228 | ---------- 229 | X : array-like, shape = (n_samples, n_features) 230 | Test samples. 231 | 232 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 233 | True labels for X. 234 | 235 | sample_weight : array-like, shape = [n_samples], optional 236 | Sample weights. 237 | 238 | Returns 239 | ------- 240 | score : float 241 | Mean accuracy of self.predict(X) wrt. y. 242 | 243 | 244 | - - - 245 | 246 | #### `skflow.TensorFlowClassifier.set_params(**params)` {#TensorFlowClassifier.set_params} 247 | 248 | Set the parameters of this estimator. 249 | 250 | The method works on simple estimators as well as on nested objects 251 | (such as pipelines). The former have parameters of the form 252 | ``__`` so that it's possible to update each 253 | component of a nested object. 254 | 255 | Returns 256 | ------- 257 | self 258 | 259 | 260 | - - - 261 | 262 | #### `skflow.TensorFlowClassifier.weights_` {#TensorFlowClassifier.weights_} 263 | 264 | Returns weights of the linear classifier. 265 | 266 | 267 | 268 | - - - 269 | 270 | ### `class skflow.TensorFlowDNNClassifier` {#TensorFlowDNNClassifier} 271 | 272 | TensorFlow DNN Classifier model. 273 | 274 | Parameters: 275 | hidden_units: List of hidden units per layer. 276 | n_classes: Number of classes in the target. 277 | tf_master: TensorFlow master. Empty string is default for local. 278 | batch_size: Mini batch size. 279 | steps: Number of steps to run over data. 280 | optimizer: Optimizer name (or class), for example "SGD", "Adam", 281 | "Adagrad". 282 | learning_rate: If this is constant float value, no decay function is used. 283 | Instead, a customized decay function can be passed that accepts 284 | global_step as parameter and returns a Tensor. 285 | e.g. exponential decay function: 286 | def exp_decay(global_step): 287 | return tf.train.exponential_decay( 288 | learning_rate=0.1, global_step, 289 | decay_steps=2, decay_rate=0.001) 290 | class_weight: None or list of n_classes floats. Weight associated with 291 | classes for loss computation. If not given, all classes are suppose to have 292 | weight one. 293 | tf_random_seed: Random seed for TensorFlow initializers. 294 | Setting this value, allows consistency between reruns. 295 | continue_training: when continue_training is True, once initialized 296 | model will be continuely trained on every call of fit. 297 | config_addon: ConfigAddon object that controls the configurations of the session, 298 | e.g. num_cores, gpu_memory_fraction, etc. 299 | max_to_keep: The maximum number of recent checkpoint files to keep. 300 | As new files are created, older files are deleted. 301 | If None or 0, all checkpoint files are kept. 302 | Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) 303 | keep_checkpoint_every_n_hours: Number of hours between each checkpoint 304 | to be saved. The default value of 10,000 hours effectively disables the feature. 305 | - - - 306 | 307 | #### `skflow.TensorFlowDNNClassifier.__init__(hidden_units, n_classes, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, class_weight=None, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowDNNClassifier.__init__} 308 | 309 | 310 | 311 | 312 | - - - 313 | 314 | #### `skflow.TensorFlowDNNClassifier.bias_` {#TensorFlowDNNClassifier.bias_} 315 | 316 | Returns bias of the DNN's bias layers. 317 | 318 | 319 | - - - 320 | 321 | #### `skflow.TensorFlowDNNClassifier.fit(X, y, monitor=None, logdir=None)` {#TensorFlowDNNClassifier.fit} 322 | 323 | Builds a neural network model given provided `model_fn` and training 324 | data X and y. 325 | 326 | Note: called first time constructs the graph and initializers 327 | variables. Consecutives times it will continue training the same model. 328 | This logic follows partial_fit() interface in scikit-learn. 329 | 330 | To restart learning, create new estimator. 331 | 332 | ##### Args: 333 | 334 | 335 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 336 | iterator that returns arrays of features. The training input 337 | samples for fitting the model. 338 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 339 | iterator that returns array of targets. The training target values 340 | (class labels in classification, real numbers in regression). 341 | * `monitor`: Monitor object to print training progress and invoke early stopping 342 | * `logdir`: the directory to save the log file that can be used for 343 | optional visualization. 344 | 345 | ##### Returns: 346 | 347 | Returns self. 348 | 349 | 350 | - - - 351 | 352 | #### `skflow.TensorFlowDNNClassifier.get_params(deep=True)` {#TensorFlowDNNClassifier.get_params} 353 | 354 | Get parameters for this estimator. 355 | 356 | Parameters 357 | ---------- 358 | deep: boolean, optional 359 | If True, will return the parameters for this estimator and 360 | contained subobjects that are estimators. 361 | 362 | Returns 363 | ------- 364 | params : mapping of string to any 365 | Parameter names mapped to their values. 366 | 367 | 368 | - - - 369 | 370 | #### `skflow.TensorFlowDNNClassifier.get_tensor(name)` {#TensorFlowDNNClassifier.get_tensor} 371 | 372 | Returns tensor by name. 373 | 374 | ##### Args: 375 | 376 | 377 | * `name`: string, name of the tensor. 378 | 379 | ##### Returns: 380 | 381 | Tensor. 382 | 383 | 384 | - - - 385 | 386 | #### `skflow.TensorFlowDNNClassifier.get_tensor_value(name)` {#TensorFlowDNNClassifier.get_tensor_value} 387 | 388 | Returns value of the tensor give by name. 389 | 390 | ##### Args: 391 | 392 | 393 | * `name`: string, name of the tensor. 394 | 395 | ##### Returns: 396 | 397 | Numpy array - value of the tensor. 398 | 399 | 400 | - - - 401 | 402 | #### `skflow.TensorFlowDNNClassifier.partial_fit(X, y)` {#TensorFlowDNNClassifier.partial_fit} 403 | 404 | Incremental fit on a batch of samples. 405 | 406 | This method is expected to be called several times consecutively 407 | on different or the same chunks of the dataset. This either can 408 | implement iterative training or out-of-core/online training. 409 | 410 | This is especially useful when the whole dataset is too big to 411 | fit in memory at the same time. Or when model is taking long time 412 | to converge, and you want to split up training into subparts. 413 | 414 | ##### Args: 415 | 416 | 417 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 418 | iterator that returns arrays of features. The training input 419 | samples for fitting the model. 420 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 421 | iterator that returns array of targets. The training target values 422 | (class label in classification, real numbers in regression). 423 | 424 | ##### Returns: 425 | 426 | Returns self. 427 | 428 | 429 | - - - 430 | 431 | #### `skflow.TensorFlowDNNClassifier.predict(X, axis=1, batch_size=-1)` {#TensorFlowDNNClassifier.predict} 432 | 433 | Predict class or regression for X. 434 | 435 | For a classification model, the predicted class for each sample in X is 436 | returned. For a regression model, the predicted value based on X is 437 | returned. 438 | 439 | ##### Args: 440 | 441 | 442 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 443 | * `axis`: Which axis to argmax for classification. 444 | By default axis 1 (next after batch) is used. 445 | Use 2 for sequence predictions. 446 | * `batch_size`: If test set is too big, use batch size to split 447 | it into mini batches. By default full dataset is used. 448 | 449 | ##### Returns: 450 | 451 | 452 | * `y`: array of shape [n_samples]. The predicted classes or predicted 453 | value. 454 | 455 | 456 | - - - 457 | 458 | #### `skflow.TensorFlowDNNClassifier.predict_proba(X, batch_size=-1)` {#TensorFlowDNNClassifier.predict_proba} 459 | 460 | Predict class probability of the input samples X. 461 | 462 | ##### Args: 463 | 464 | 465 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 466 | * `batch_size`: If test set is too big, use batch size to split 467 | it into mini batches. By default full dataset is used. 468 | 469 | ##### Returns: 470 | 471 | 472 | * `y`: array of shape [n_samples, n_classes]. The predicted 473 | probabilities for each class. 474 | 475 | 476 | - - - 477 | 478 | #### `skflow.TensorFlowDNNClassifier.restore(cls, path, config_addon=None)` {#TensorFlowDNNClassifier.restore} 479 | 480 | Restores model from give path. 481 | 482 | ##### Args: 483 | 484 | 485 | * `path`: Path to the checkpoints and other model information. 486 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 487 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 488 | 489 | ##### Returns: 490 | 491 | Estiamator, object of the subclass of TensorFlowEstimator. 492 | 493 | 494 | - - - 495 | 496 | #### `skflow.TensorFlowDNNClassifier.save(path)` {#TensorFlowDNNClassifier.save} 497 | 498 | Saves checkpoints and graph to given path. 499 | 500 | ##### Args: 501 | 502 | 503 | * `path`: Folder to save model to. 504 | 505 | 506 | - - - 507 | 508 | #### `skflow.TensorFlowDNNClassifier.score(X, y, sample_weight=None)` {#TensorFlowDNNClassifier.score} 509 | 510 | Returns the mean accuracy on the given test data and labels. 511 | 512 | In multi-label classification, this is the subset accuracy 513 | which is a harsh metric since you require for each sample that 514 | each label set be correctly predicted. 515 | 516 | Parameters 517 | ---------- 518 | X : array-like, shape = (n_samples, n_features) 519 | Test samples. 520 | 521 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 522 | True labels for X. 523 | 524 | sample_weight : array-like, shape = [n_samples], optional 525 | Sample weights. 526 | 527 | Returns 528 | ------- 529 | score : float 530 | Mean accuracy of self.predict(X) wrt. y. 531 | 532 | 533 | - - - 534 | 535 | #### `skflow.TensorFlowDNNClassifier.set_params(**params)` {#TensorFlowDNNClassifier.set_params} 536 | 537 | Set the parameters of this estimator. 538 | 539 | The method works on simple estimators as well as on nested objects 540 | (such as pipelines). The former have parameters of the form 541 | ``__`` so that it's possible to update each 542 | component of a nested object. 543 | 544 | Returns 545 | ------- 546 | self 547 | 548 | 549 | - - - 550 | 551 | #### `skflow.TensorFlowDNNClassifier.weights_` {#TensorFlowDNNClassifier.weights_} 552 | 553 | Returns weights of the DNN weight layers. 554 | 555 | 556 | 557 | - - - 558 | 559 | ### `class skflow.TensorFlowDNNRegressor` {#TensorFlowDNNRegressor} 560 | 561 | TensorFlow DNN Regressor model. 562 | 563 | Parameters: 564 | hidden_units: List of hidden units per layer. 565 | tf_master: TensorFlow master. Empty string is default for local. 566 | batch_size: Mini batch size. 567 | steps: Number of steps to run over data. 568 | optimizer: Optimizer name (or class), for example "SGD", "Adam", 569 | "Adagrad". 570 | learning_rate: If this is constant float value, no decay function is used. 571 | Instead, a customized decay function can be passed that accepts 572 | global_step as parameter and returns a Tensor. 573 | e.g. exponential decay function: 574 | def exp_decay(global_step): 575 | return tf.train.exponential_decay( 576 | learning_rate=0.1, global_step, 577 | decay_steps=2, decay_rate=0.001) 578 | tf_random_seed: Random seed for TensorFlow initializers. 579 | Setting this value, allows consistency between reruns. 580 | continue_training: when continue_training is True, once initialized 581 | model will be continuely trained on every call of fit. 582 | config_addon: ConfigAddon object that controls the configurations of the session, 583 | e.g. num_cores, gpu_memory_fraction, etc. 584 | verbose: Controls the verbosity, possible values: 585 | 0: the algorithm and debug information is muted. 586 | 1: trainer prints the progress. 587 | 2: log device placement is printed. 588 | max_to_keep: The maximum number of recent checkpoint files to keep. 589 | As new files are created, older files are deleted. 590 | If None or 0, all checkpoint files are kept. 591 | Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) 592 | keep_checkpoint_every_n_hours: Number of hours between each checkpoint 593 | to be saved. The default value of 10,000 hours effectively disables the feature. 594 | - - - 595 | 596 | #### `skflow.TensorFlowDNNRegressor.__init__(hidden_units, n_classes=0, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowDNNRegressor.__init__} 597 | 598 | 599 | 600 | 601 | - - - 602 | 603 | #### `skflow.TensorFlowDNNRegressor.bias_` {#TensorFlowDNNRegressor.bias_} 604 | 605 | Returns bias of the DNN's bias layers. 606 | 607 | 608 | - - - 609 | 610 | #### `skflow.TensorFlowDNNRegressor.fit(X, y, monitor=None, logdir=None)` {#TensorFlowDNNRegressor.fit} 611 | 612 | Builds a neural network model given provided `model_fn` and training 613 | data X and y. 614 | 615 | Note: called first time constructs the graph and initializers 616 | variables. Consecutives times it will continue training the same model. 617 | This logic follows partial_fit() interface in scikit-learn. 618 | 619 | To restart learning, create new estimator. 620 | 621 | ##### Args: 622 | 623 | 624 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 625 | iterator that returns arrays of features. The training input 626 | samples for fitting the model. 627 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 628 | iterator that returns array of targets. The training target values 629 | (class labels in classification, real numbers in regression). 630 | * `monitor`: Monitor object to print training progress and invoke early stopping 631 | * `logdir`: the directory to save the log file that can be used for 632 | optional visualization. 633 | 634 | ##### Returns: 635 | 636 | Returns self. 637 | 638 | 639 | - - - 640 | 641 | #### `skflow.TensorFlowDNNRegressor.get_params(deep=True)` {#TensorFlowDNNRegressor.get_params} 642 | 643 | Get parameters for this estimator. 644 | 645 | Parameters 646 | ---------- 647 | deep: boolean, optional 648 | If True, will return the parameters for this estimator and 649 | contained subobjects that are estimators. 650 | 651 | Returns 652 | ------- 653 | params : mapping of string to any 654 | Parameter names mapped to their values. 655 | 656 | 657 | - - - 658 | 659 | #### `skflow.TensorFlowDNNRegressor.get_tensor(name)` {#TensorFlowDNNRegressor.get_tensor} 660 | 661 | Returns tensor by name. 662 | 663 | ##### Args: 664 | 665 | 666 | * `name`: string, name of the tensor. 667 | 668 | ##### Returns: 669 | 670 | Tensor. 671 | 672 | 673 | - - - 674 | 675 | #### `skflow.TensorFlowDNNRegressor.get_tensor_value(name)` {#TensorFlowDNNRegressor.get_tensor_value} 676 | 677 | Returns value of the tensor give by name. 678 | 679 | ##### Args: 680 | 681 | 682 | * `name`: string, name of the tensor. 683 | 684 | ##### Returns: 685 | 686 | Numpy array - value of the tensor. 687 | 688 | 689 | - - - 690 | 691 | #### `skflow.TensorFlowDNNRegressor.partial_fit(X, y)` {#TensorFlowDNNRegressor.partial_fit} 692 | 693 | Incremental fit on a batch of samples. 694 | 695 | This method is expected to be called several times consecutively 696 | on different or the same chunks of the dataset. This either can 697 | implement iterative training or out-of-core/online training. 698 | 699 | This is especially useful when the whole dataset is too big to 700 | fit in memory at the same time. Or when model is taking long time 701 | to converge, and you want to split up training into subparts. 702 | 703 | ##### Args: 704 | 705 | 706 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 707 | iterator that returns arrays of features. The training input 708 | samples for fitting the model. 709 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 710 | iterator that returns array of targets. The training target values 711 | (class label in classification, real numbers in regression). 712 | 713 | ##### Returns: 714 | 715 | Returns self. 716 | 717 | 718 | - - - 719 | 720 | #### `skflow.TensorFlowDNNRegressor.predict(X, axis=1, batch_size=-1)` {#TensorFlowDNNRegressor.predict} 721 | 722 | Predict class or regression for X. 723 | 724 | For a classification model, the predicted class for each sample in X is 725 | returned. For a regression model, the predicted value based on X is 726 | returned. 727 | 728 | ##### Args: 729 | 730 | 731 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 732 | * `axis`: Which axis to argmax for classification. 733 | By default axis 1 (next after batch) is used. 734 | Use 2 for sequence predictions. 735 | * `batch_size`: If test set is too big, use batch size to split 736 | it into mini batches. By default full dataset is used. 737 | 738 | ##### Returns: 739 | 740 | 741 | * `y`: array of shape [n_samples]. The predicted classes or predicted 742 | value. 743 | 744 | 745 | - - - 746 | 747 | #### `skflow.TensorFlowDNNRegressor.predict_proba(X, batch_size=-1)` {#TensorFlowDNNRegressor.predict_proba} 748 | 749 | Predict class probability of the input samples X. 750 | 751 | ##### Args: 752 | 753 | 754 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 755 | * `batch_size`: If test set is too big, use batch size to split 756 | it into mini batches. By default full dataset is used. 757 | 758 | ##### Returns: 759 | 760 | 761 | * `y`: array of shape [n_samples, n_classes]. The predicted 762 | probabilities for each class. 763 | 764 | 765 | - - - 766 | 767 | #### `skflow.TensorFlowDNNRegressor.restore(cls, path, config_addon=None)` {#TensorFlowDNNRegressor.restore} 768 | 769 | Restores model from give path. 770 | 771 | ##### Args: 772 | 773 | 774 | * `path`: Path to the checkpoints and other model information. 775 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 776 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 777 | 778 | ##### Returns: 779 | 780 | Estiamator, object of the subclass of TensorFlowEstimator. 781 | 782 | 783 | - - - 784 | 785 | #### `skflow.TensorFlowDNNRegressor.save(path)` {#TensorFlowDNNRegressor.save} 786 | 787 | Saves checkpoints and graph to given path. 788 | 789 | ##### Args: 790 | 791 | 792 | * `path`: Folder to save model to. 793 | 794 | 795 | - - - 796 | 797 | #### `skflow.TensorFlowDNNRegressor.score(X, y, sample_weight=None)` {#TensorFlowDNNRegressor.score} 798 | 799 | Returns the coefficient of determination R^2 of the prediction. 800 | 801 | The coefficient R^2 is defined as (1 - u/v), where u is the regression 802 | sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual 803 | sum of squares ((y_true - y_true.mean()) ** 2).sum(). 804 | Best possible score is 1.0 and it can be negative (because the 805 | model can be arbitrarily worse). A constant model that always 806 | predicts the expected value of y, disregarding the input features, 807 | would get a R^2 score of 0.0. 808 | 809 | Parameters 810 | ---------- 811 | X : array-like, shape = (n_samples, n_features) 812 | Test samples. 813 | 814 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 815 | True values for X. 816 | 817 | sample_weight : array-like, shape = [n_samples], optional 818 | Sample weights. 819 | 820 | Returns 821 | ------- 822 | score : float 823 | R^2 of self.predict(X) wrt. y. 824 | 825 | 826 | - - - 827 | 828 | #### `skflow.TensorFlowDNNRegressor.set_params(**params)` {#TensorFlowDNNRegressor.set_params} 829 | 830 | Set the parameters of this estimator. 831 | 832 | The method works on simple estimators as well as on nested objects 833 | (such as pipelines). The former have parameters of the form 834 | ``__`` so that it's possible to update each 835 | component of a nested object. 836 | 837 | Returns 838 | ------- 839 | self 840 | 841 | 842 | - - - 843 | 844 | #### `skflow.TensorFlowDNNRegressor.weights_` {#TensorFlowDNNRegressor.weights_} 845 | 846 | Returns weights of the DNN weight layers. 847 | 848 | 849 | 850 | - - - 851 | 852 | ### `class skflow.TensorFlowEstimator` {#TensorFlowEstimator} 853 | 854 | Base class for all TensorFlow estimators. 855 | 856 | Parameters: 857 | model_fn: Model function, that takes input X, y tensors and outputs 858 | prediction and loss tensors. 859 | n_classes: Number of classes in the target. 860 | tf_master: TensorFlow master. Empty string is default for local. 861 | batch_size: Mini batch size. 862 | steps: Number of steps to run over data. 863 | optimizer: Optimizer name (or class), for example "SGD", "Adam", 864 | "Adagrad". 865 | learning_rate: If this is constant float value, no decay function is used. 866 | Instead, a customized decay function can be passed that accepts 867 | global_step as parameter and returns a Tensor. 868 | e.g. exponential decay function: 869 | def exp_decay(global_step): 870 | return tf.train.exponential_decay( 871 | learning_rate=0.1, global_step, 872 | decay_steps=2, decay_rate=0.001) 873 | class_weight: None or list of n_classes floats. Weight associated with 874 | classes for loss computation. If not given, all classes are suppose to have 875 | weight one. 876 | tf_random_seed: Random seed for TensorFlow initializers. 877 | Setting this value, allows consistency between reruns. 878 | continue_training: when continue_training is True, once initialized 879 | model will be continuely trained on every call of fit. 880 | config_addon: ConfigAddon object that controls the configurations of the session, 881 | e.g. num_cores, gpu_memory_fraction, etc. 882 | verbose: Controls the verbosity, possible values: 883 | 0: the algorithm and debug information is muted. 884 | 1: trainer prints the progress. 885 | 2: log device placement is printed. 886 | max_to_keep: The maximum number of recent checkpoint files to keep. 887 | As new files are created, older files are deleted. 888 | If None or 0, all checkpoint files are kept. 889 | Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) 890 | keep_checkpoint_every_n_hours: Number of hours between each checkpoint 891 | to be saved. The default value of 10,000 hours effectively disables the feature. 892 | - - - 893 | 894 | #### `skflow.TensorFlowEstimator.__init__(model_fn, n_classes, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, class_weight=None, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowEstimator.__init__} 895 | 896 | 897 | 898 | 899 | - - - 900 | 901 | #### `skflow.TensorFlowEstimator.fit(X, y, monitor=None, logdir=None)` {#TensorFlowEstimator.fit} 902 | 903 | Builds a neural network model given provided `model_fn` and training 904 | data X and y. 905 | 906 | Note: called first time constructs the graph and initializers 907 | variables. Consecutives times it will continue training the same model. 908 | This logic follows partial_fit() interface in scikit-learn. 909 | 910 | To restart learning, create new estimator. 911 | 912 | ##### Args: 913 | 914 | 915 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 916 | iterator that returns arrays of features. The training input 917 | samples for fitting the model. 918 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 919 | iterator that returns array of targets. The training target values 920 | (class labels in classification, real numbers in regression). 921 | * `monitor`: Monitor object to print training progress and invoke early stopping 922 | * `logdir`: the directory to save the log file that can be used for 923 | optional visualization. 924 | 925 | ##### Returns: 926 | 927 | Returns self. 928 | 929 | 930 | - - - 931 | 932 | #### `skflow.TensorFlowEstimator.get_params(deep=True)` {#TensorFlowEstimator.get_params} 933 | 934 | Get parameters for this estimator. 935 | 936 | Parameters 937 | ---------- 938 | deep: boolean, optional 939 | If True, will return the parameters for this estimator and 940 | contained subobjects that are estimators. 941 | 942 | Returns 943 | ------- 944 | params : mapping of string to any 945 | Parameter names mapped to their values. 946 | 947 | 948 | - - - 949 | 950 | #### `skflow.TensorFlowEstimator.get_tensor(name)` {#TensorFlowEstimator.get_tensor} 951 | 952 | Returns tensor by name. 953 | 954 | ##### Args: 955 | 956 | 957 | * `name`: string, name of the tensor. 958 | 959 | ##### Returns: 960 | 961 | Tensor. 962 | 963 | 964 | - - - 965 | 966 | #### `skflow.TensorFlowEstimator.get_tensor_value(name)` {#TensorFlowEstimator.get_tensor_value} 967 | 968 | Returns value of the tensor give by name. 969 | 970 | ##### Args: 971 | 972 | 973 | * `name`: string, name of the tensor. 974 | 975 | ##### Returns: 976 | 977 | Numpy array - value of the tensor. 978 | 979 | 980 | - - - 981 | 982 | #### `skflow.TensorFlowEstimator.partial_fit(X, y)` {#TensorFlowEstimator.partial_fit} 983 | 984 | Incremental fit on a batch of samples. 985 | 986 | This method is expected to be called several times consecutively 987 | on different or the same chunks of the dataset. This either can 988 | implement iterative training or out-of-core/online training. 989 | 990 | This is especially useful when the whole dataset is too big to 991 | fit in memory at the same time. Or when model is taking long time 992 | to converge, and you want to split up training into subparts. 993 | 994 | ##### Args: 995 | 996 | 997 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 998 | iterator that returns arrays of features. The training input 999 | samples for fitting the model. 1000 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1001 | iterator that returns array of targets. The training target values 1002 | (class label in classification, real numbers in regression). 1003 | 1004 | ##### Returns: 1005 | 1006 | Returns self. 1007 | 1008 | 1009 | - - - 1010 | 1011 | #### `skflow.TensorFlowEstimator.predict(X, axis=1, batch_size=-1)` {#TensorFlowEstimator.predict} 1012 | 1013 | Predict class or regression for X. 1014 | 1015 | For a classification model, the predicted class for each sample in X is 1016 | returned. For a regression model, the predicted value based on X is 1017 | returned. 1018 | 1019 | ##### Args: 1020 | 1021 | 1022 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1023 | * `axis`: Which axis to argmax for classification. 1024 | By default axis 1 (next after batch) is used. 1025 | Use 2 for sequence predictions. 1026 | * `batch_size`: If test set is too big, use batch size to split 1027 | it into mini batches. By default full dataset is used. 1028 | 1029 | ##### Returns: 1030 | 1031 | 1032 | * `y`: array of shape [n_samples]. The predicted classes or predicted 1033 | value. 1034 | 1035 | 1036 | - - - 1037 | 1038 | #### `skflow.TensorFlowEstimator.predict_proba(X, batch_size=-1)` {#TensorFlowEstimator.predict_proba} 1039 | 1040 | Predict class probability of the input samples X. 1041 | 1042 | ##### Args: 1043 | 1044 | 1045 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1046 | * `batch_size`: If test set is too big, use batch size to split 1047 | it into mini batches. By default full dataset is used. 1048 | 1049 | ##### Returns: 1050 | 1051 | 1052 | * `y`: array of shape [n_samples, n_classes]. The predicted 1053 | probabilities for each class. 1054 | 1055 | 1056 | - - - 1057 | 1058 | #### `skflow.TensorFlowEstimator.restore(cls, path, config_addon=None)` {#TensorFlowEstimator.restore} 1059 | 1060 | Restores model from give path. 1061 | 1062 | ##### Args: 1063 | 1064 | 1065 | * `path`: Path to the checkpoints and other model information. 1066 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 1067 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 1068 | 1069 | ##### Returns: 1070 | 1071 | Estiamator, object of the subclass of TensorFlowEstimator. 1072 | 1073 | 1074 | - - - 1075 | 1076 | #### `skflow.TensorFlowEstimator.save(path)` {#TensorFlowEstimator.save} 1077 | 1078 | Saves checkpoints and graph to given path. 1079 | 1080 | ##### Args: 1081 | 1082 | 1083 | * `path`: Folder to save model to. 1084 | 1085 | 1086 | - - - 1087 | 1088 | #### `skflow.TensorFlowEstimator.set_params(**params)` {#TensorFlowEstimator.set_params} 1089 | 1090 | Set the parameters of this estimator. 1091 | 1092 | The method works on simple estimators as well as on nested objects 1093 | (such as pipelines). The former have parameters of the form 1094 | ``__`` so that it's possible to update each 1095 | component of a nested object. 1096 | 1097 | Returns 1098 | ------- 1099 | self 1100 | 1101 | 1102 | 1103 | - - - 1104 | 1105 | ### `class skflow.TensorFlowLinearClassifier` {#TensorFlowLinearClassifier} 1106 | 1107 | TensorFlow Linear Classifier model. 1108 | - - - 1109 | 1110 | #### `skflow.TensorFlowLinearClassifier.__init__(n_classes, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, class_weight=None, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowLinearClassifier.__init__} 1111 | 1112 | 1113 | 1114 | 1115 | - - - 1116 | 1117 | #### `skflow.TensorFlowLinearClassifier.bias_` {#TensorFlowLinearClassifier.bias_} 1118 | 1119 | Returns weights of the linear classifier. 1120 | 1121 | 1122 | - - - 1123 | 1124 | #### `skflow.TensorFlowLinearClassifier.fit(X, y, monitor=None, logdir=None)` {#TensorFlowLinearClassifier.fit} 1125 | 1126 | Builds a neural network model given provided `model_fn` and training 1127 | data X and y. 1128 | 1129 | Note: called first time constructs the graph and initializers 1130 | variables. Consecutives times it will continue training the same model. 1131 | This logic follows partial_fit() interface in scikit-learn. 1132 | 1133 | To restart learning, create new estimator. 1134 | 1135 | ##### Args: 1136 | 1137 | 1138 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1139 | iterator that returns arrays of features. The training input 1140 | samples for fitting the model. 1141 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1142 | iterator that returns array of targets. The training target values 1143 | (class labels in classification, real numbers in regression). 1144 | * `monitor`: Monitor object to print training progress and invoke early stopping 1145 | * `logdir`: the directory to save the log file that can be used for 1146 | optional visualization. 1147 | 1148 | ##### Returns: 1149 | 1150 | Returns self. 1151 | 1152 | 1153 | - - - 1154 | 1155 | #### `skflow.TensorFlowLinearClassifier.get_params(deep=True)` {#TensorFlowLinearClassifier.get_params} 1156 | 1157 | Get parameters for this estimator. 1158 | 1159 | Parameters 1160 | ---------- 1161 | deep: boolean, optional 1162 | If True, will return the parameters for this estimator and 1163 | contained subobjects that are estimators. 1164 | 1165 | Returns 1166 | ------- 1167 | params : mapping of string to any 1168 | Parameter names mapped to their values. 1169 | 1170 | 1171 | - - - 1172 | 1173 | #### `skflow.TensorFlowLinearClassifier.get_tensor(name)` {#TensorFlowLinearClassifier.get_tensor} 1174 | 1175 | Returns tensor by name. 1176 | 1177 | ##### Args: 1178 | 1179 | 1180 | * `name`: string, name of the tensor. 1181 | 1182 | ##### Returns: 1183 | 1184 | Tensor. 1185 | 1186 | 1187 | - - - 1188 | 1189 | #### `skflow.TensorFlowLinearClassifier.get_tensor_value(name)` {#TensorFlowLinearClassifier.get_tensor_value} 1190 | 1191 | Returns value of the tensor give by name. 1192 | 1193 | ##### Args: 1194 | 1195 | 1196 | * `name`: string, name of the tensor. 1197 | 1198 | ##### Returns: 1199 | 1200 | Numpy array - value of the tensor. 1201 | 1202 | 1203 | - - - 1204 | 1205 | #### `skflow.TensorFlowLinearClassifier.partial_fit(X, y)` {#TensorFlowLinearClassifier.partial_fit} 1206 | 1207 | Incremental fit on a batch of samples. 1208 | 1209 | This method is expected to be called several times consecutively 1210 | on different or the same chunks of the dataset. This either can 1211 | implement iterative training or out-of-core/online training. 1212 | 1213 | This is especially useful when the whole dataset is too big to 1214 | fit in memory at the same time. Or when model is taking long time 1215 | to converge, and you want to split up training into subparts. 1216 | 1217 | ##### Args: 1218 | 1219 | 1220 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1221 | iterator that returns arrays of features. The training input 1222 | samples for fitting the model. 1223 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1224 | iterator that returns array of targets. The training target values 1225 | (class label in classification, real numbers in regression). 1226 | 1227 | ##### Returns: 1228 | 1229 | Returns self. 1230 | 1231 | 1232 | - - - 1233 | 1234 | #### `skflow.TensorFlowLinearClassifier.predict(X, axis=1, batch_size=-1)` {#TensorFlowLinearClassifier.predict} 1235 | 1236 | Predict class or regression for X. 1237 | 1238 | For a classification model, the predicted class for each sample in X is 1239 | returned. For a regression model, the predicted value based on X is 1240 | returned. 1241 | 1242 | ##### Args: 1243 | 1244 | 1245 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1246 | * `axis`: Which axis to argmax for classification. 1247 | By default axis 1 (next after batch) is used. 1248 | Use 2 for sequence predictions. 1249 | * `batch_size`: If test set is too big, use batch size to split 1250 | it into mini batches. By default full dataset is used. 1251 | 1252 | ##### Returns: 1253 | 1254 | 1255 | * `y`: array of shape [n_samples]. The predicted classes or predicted 1256 | value. 1257 | 1258 | 1259 | - - - 1260 | 1261 | #### `skflow.TensorFlowLinearClassifier.predict_proba(X, batch_size=-1)` {#TensorFlowLinearClassifier.predict_proba} 1262 | 1263 | Predict class probability of the input samples X. 1264 | 1265 | ##### Args: 1266 | 1267 | 1268 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1269 | * `batch_size`: If test set is too big, use batch size to split 1270 | it into mini batches. By default full dataset is used. 1271 | 1272 | ##### Returns: 1273 | 1274 | 1275 | * `y`: array of shape [n_samples, n_classes]. The predicted 1276 | probabilities for each class. 1277 | 1278 | 1279 | - - - 1280 | 1281 | #### `skflow.TensorFlowLinearClassifier.restore(cls, path, config_addon=None)` {#TensorFlowLinearClassifier.restore} 1282 | 1283 | Restores model from give path. 1284 | 1285 | ##### Args: 1286 | 1287 | 1288 | * `path`: Path to the checkpoints and other model information. 1289 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 1290 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 1291 | 1292 | ##### Returns: 1293 | 1294 | Estiamator, object of the subclass of TensorFlowEstimator. 1295 | 1296 | 1297 | - - - 1298 | 1299 | #### `skflow.TensorFlowLinearClassifier.save(path)` {#TensorFlowLinearClassifier.save} 1300 | 1301 | Saves checkpoints and graph to given path. 1302 | 1303 | ##### Args: 1304 | 1305 | 1306 | * `path`: Folder to save model to. 1307 | 1308 | 1309 | - - - 1310 | 1311 | #### `skflow.TensorFlowLinearClassifier.score(X, y, sample_weight=None)` {#TensorFlowLinearClassifier.score} 1312 | 1313 | Returns the mean accuracy on the given test data and labels. 1314 | 1315 | In multi-label classification, this is the subset accuracy 1316 | which is a harsh metric since you require for each sample that 1317 | each label set be correctly predicted. 1318 | 1319 | Parameters 1320 | ---------- 1321 | X : array-like, shape = (n_samples, n_features) 1322 | Test samples. 1323 | 1324 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 1325 | True labels for X. 1326 | 1327 | sample_weight : array-like, shape = [n_samples], optional 1328 | Sample weights. 1329 | 1330 | Returns 1331 | ------- 1332 | score : float 1333 | Mean accuracy of self.predict(X) wrt. y. 1334 | 1335 | 1336 | - - - 1337 | 1338 | #### `skflow.TensorFlowLinearClassifier.set_params(**params)` {#TensorFlowLinearClassifier.set_params} 1339 | 1340 | Set the parameters of this estimator. 1341 | 1342 | The method works on simple estimators as well as on nested objects 1343 | (such as pipelines). The former have parameters of the form 1344 | ``__`` so that it's possible to update each 1345 | component of a nested object. 1346 | 1347 | Returns 1348 | ------- 1349 | self 1350 | 1351 | 1352 | - - - 1353 | 1354 | #### `skflow.TensorFlowLinearClassifier.weights_` {#TensorFlowLinearClassifier.weights_} 1355 | 1356 | Returns weights of the linear classifier. 1357 | 1358 | 1359 | 1360 | - - - 1361 | 1362 | ### `class skflow.TensorFlowLinearRegressor` {#TensorFlowLinearRegressor} 1363 | 1364 | TensorFlow Linear Regression model. 1365 | - - - 1366 | 1367 | #### `skflow.TensorFlowLinearRegressor.__init__(n_classes=0, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowLinearRegressor.__init__} 1368 | 1369 | 1370 | 1371 | 1372 | - - - 1373 | 1374 | #### `skflow.TensorFlowLinearRegressor.bias_` {#TensorFlowLinearRegressor.bias_} 1375 | 1376 | Returns bias of the linear regression. 1377 | 1378 | 1379 | - - - 1380 | 1381 | #### `skflow.TensorFlowLinearRegressor.fit(X, y, monitor=None, logdir=None)` {#TensorFlowLinearRegressor.fit} 1382 | 1383 | Builds a neural network model given provided `model_fn` and training 1384 | data X and y. 1385 | 1386 | Note: called first time constructs the graph and initializers 1387 | variables. Consecutives times it will continue training the same model. 1388 | This logic follows partial_fit() interface in scikit-learn. 1389 | 1390 | To restart learning, create new estimator. 1391 | 1392 | ##### Args: 1393 | 1394 | 1395 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1396 | iterator that returns arrays of features. The training input 1397 | samples for fitting the model. 1398 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1399 | iterator that returns array of targets. The training target values 1400 | (class labels in classification, real numbers in regression). 1401 | * `monitor`: Monitor object to print training progress and invoke early stopping 1402 | * `logdir`: the directory to save the log file that can be used for 1403 | optional visualization. 1404 | 1405 | ##### Returns: 1406 | 1407 | Returns self. 1408 | 1409 | 1410 | - - - 1411 | 1412 | #### `skflow.TensorFlowLinearRegressor.get_params(deep=True)` {#TensorFlowLinearRegressor.get_params} 1413 | 1414 | Get parameters for this estimator. 1415 | 1416 | Parameters 1417 | ---------- 1418 | deep: boolean, optional 1419 | If True, will return the parameters for this estimator and 1420 | contained subobjects that are estimators. 1421 | 1422 | Returns 1423 | ------- 1424 | params : mapping of string to any 1425 | Parameter names mapped to their values. 1426 | 1427 | 1428 | - - - 1429 | 1430 | #### `skflow.TensorFlowLinearRegressor.get_tensor(name)` {#TensorFlowLinearRegressor.get_tensor} 1431 | 1432 | Returns tensor by name. 1433 | 1434 | ##### Args: 1435 | 1436 | 1437 | * `name`: string, name of the tensor. 1438 | 1439 | ##### Returns: 1440 | 1441 | Tensor. 1442 | 1443 | 1444 | - - - 1445 | 1446 | #### `skflow.TensorFlowLinearRegressor.get_tensor_value(name)` {#TensorFlowLinearRegressor.get_tensor_value} 1447 | 1448 | Returns value of the tensor give by name. 1449 | 1450 | ##### Args: 1451 | 1452 | 1453 | * `name`: string, name of the tensor. 1454 | 1455 | ##### Returns: 1456 | 1457 | Numpy array - value of the tensor. 1458 | 1459 | 1460 | - - - 1461 | 1462 | #### `skflow.TensorFlowLinearRegressor.partial_fit(X, y)` {#TensorFlowLinearRegressor.partial_fit} 1463 | 1464 | Incremental fit on a batch of samples. 1465 | 1466 | This method is expected to be called several times consecutively 1467 | on different or the same chunks of the dataset. This either can 1468 | implement iterative training or out-of-core/online training. 1469 | 1470 | This is especially useful when the whole dataset is too big to 1471 | fit in memory at the same time. Or when model is taking long time 1472 | to converge, and you want to split up training into subparts. 1473 | 1474 | ##### Args: 1475 | 1476 | 1477 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1478 | iterator that returns arrays of features. The training input 1479 | samples for fitting the model. 1480 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1481 | iterator that returns array of targets. The training target values 1482 | (class label in classification, real numbers in regression). 1483 | 1484 | ##### Returns: 1485 | 1486 | Returns self. 1487 | 1488 | 1489 | - - - 1490 | 1491 | #### `skflow.TensorFlowLinearRegressor.predict(X, axis=1, batch_size=-1)` {#TensorFlowLinearRegressor.predict} 1492 | 1493 | Predict class or regression for X. 1494 | 1495 | For a classification model, the predicted class for each sample in X is 1496 | returned. For a regression model, the predicted value based on X is 1497 | returned. 1498 | 1499 | ##### Args: 1500 | 1501 | 1502 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1503 | * `axis`: Which axis to argmax for classification. 1504 | By default axis 1 (next after batch) is used. 1505 | Use 2 for sequence predictions. 1506 | * `batch_size`: If test set is too big, use batch size to split 1507 | it into mini batches. By default full dataset is used. 1508 | 1509 | ##### Returns: 1510 | 1511 | 1512 | * `y`: array of shape [n_samples]. The predicted classes or predicted 1513 | value. 1514 | 1515 | 1516 | - - - 1517 | 1518 | #### `skflow.TensorFlowLinearRegressor.predict_proba(X, batch_size=-1)` {#TensorFlowLinearRegressor.predict_proba} 1519 | 1520 | Predict class probability of the input samples X. 1521 | 1522 | ##### Args: 1523 | 1524 | 1525 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1526 | * `batch_size`: If test set is too big, use batch size to split 1527 | it into mini batches. By default full dataset is used. 1528 | 1529 | ##### Returns: 1530 | 1531 | 1532 | * `y`: array of shape [n_samples, n_classes]. The predicted 1533 | probabilities for each class. 1534 | 1535 | 1536 | - - - 1537 | 1538 | #### `skflow.TensorFlowLinearRegressor.restore(cls, path, config_addon=None)` {#TensorFlowLinearRegressor.restore} 1539 | 1540 | Restores model from give path. 1541 | 1542 | ##### Args: 1543 | 1544 | 1545 | * `path`: Path to the checkpoints and other model information. 1546 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 1547 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 1548 | 1549 | ##### Returns: 1550 | 1551 | Estiamator, object of the subclass of TensorFlowEstimator. 1552 | 1553 | 1554 | - - - 1555 | 1556 | #### `skflow.TensorFlowLinearRegressor.save(path)` {#TensorFlowLinearRegressor.save} 1557 | 1558 | Saves checkpoints and graph to given path. 1559 | 1560 | ##### Args: 1561 | 1562 | 1563 | * `path`: Folder to save model to. 1564 | 1565 | 1566 | - - - 1567 | 1568 | #### `skflow.TensorFlowLinearRegressor.score(X, y, sample_weight=None)` {#TensorFlowLinearRegressor.score} 1569 | 1570 | Returns the coefficient of determination R^2 of the prediction. 1571 | 1572 | The coefficient R^2 is defined as (1 - u/v), where u is the regression 1573 | sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual 1574 | sum of squares ((y_true - y_true.mean()) ** 2).sum(). 1575 | Best possible score is 1.0 and it can be negative (because the 1576 | model can be arbitrarily worse). A constant model that always 1577 | predicts the expected value of y, disregarding the input features, 1578 | would get a R^2 score of 0.0. 1579 | 1580 | Parameters 1581 | ---------- 1582 | X : array-like, shape = (n_samples, n_features) 1583 | Test samples. 1584 | 1585 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 1586 | True values for X. 1587 | 1588 | sample_weight : array-like, shape = [n_samples], optional 1589 | Sample weights. 1590 | 1591 | Returns 1592 | ------- 1593 | score : float 1594 | R^2 of self.predict(X) wrt. y. 1595 | 1596 | 1597 | - - - 1598 | 1599 | #### `skflow.TensorFlowLinearRegressor.set_params(**params)` {#TensorFlowLinearRegressor.set_params} 1600 | 1601 | Set the parameters of this estimator. 1602 | 1603 | The method works on simple estimators as well as on nested objects 1604 | (such as pipelines). The former have parameters of the form 1605 | ``__`` so that it's possible to update each 1606 | component of a nested object. 1607 | 1608 | Returns 1609 | ------- 1610 | self 1611 | 1612 | 1613 | - - - 1614 | 1615 | #### `skflow.TensorFlowLinearRegressor.weights_` {#TensorFlowLinearRegressor.weights_} 1616 | 1617 | Returns weights of the linear regression. 1618 | 1619 | 1620 | 1621 | - - - 1622 | 1623 | ### `class skflow.TensorFlowRNNClassifier` {#TensorFlowRNNClassifier} 1624 | 1625 | TensorFlow RNN Classifier model. 1626 | 1627 | Parameters: 1628 | rnn_size: The size for rnn cell, e.g. size of your word embeddings. 1629 | cell_type: The type of rnn cell, including rnn, gru, and lstm. 1630 | num_layers: The number of layers of the rnn model. 1631 | input_op_fn: Function that will transform the input tensor, such as 1632 | creating word embeddings, byte list, etc. This takes 1633 | an argument X for input and returns transformed X. 1634 | bidirectional: boolean, Whether this is a bidirectional rnn. 1635 | sequence_length: If sequence_length is provided, dynamic calculation is performed. 1636 | This saves computational time when unrolling past max sequence length. 1637 | initial_state: An initial state for the RNN. This must be a tensor of appropriate type 1638 | and shape [batch_size x cell.state_size]. 1639 | n_classes: Number of classes in the target. 1640 | tf_master: TensorFlow master. Empty string is default for local. 1641 | batch_size: Mini batch size. 1642 | steps: Number of steps to run over data. 1643 | optimizer: Optimizer name (or class), for example "SGD", "Adam", 1644 | "Adagrad". 1645 | learning_rate: If this is constant float value, no decay function is used. 1646 | Instead, a customized decay function can be passed that accepts 1647 | global_step as parameter and returns a Tensor. 1648 | e.g. exponential decay function: 1649 | def exp_decay(global_step): 1650 | return tf.train.exponential_decay( 1651 | learning_rate=0.1, global_step, 1652 | decay_steps=2, decay_rate=0.001) 1653 | class_weight: None or list of n_classes floats. Weight associated with 1654 | classes for loss computation. If not given, all classes are suppose to have 1655 | weight one. 1656 | tf_random_seed: Random seed for TensorFlow initializers. 1657 | Setting this value, allows consistency between reruns. 1658 | continue_training: when continue_training is True, once initialized 1659 | model will be continuely trained on every call of fit. 1660 | num_cores: Number of cores to be used. (default: 4) 1661 | max_to_keep: The maximum number of recent checkpoint files to keep. 1662 | As new files are created, older files are deleted. 1663 | If None or 0, all checkpoint files are kept. 1664 | Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) 1665 | keep_checkpoint_every_n_hours: Number of hours between each checkpoint 1666 | to be saved. The default value of 10,000 hours effectively disables the feature. 1667 | - - - 1668 | 1669 | #### `skflow.TensorFlowRNNClassifier.__init__(rnn_size, n_classes, cell_type='gru', num_layers=1, input_op_fn=null_input_op_fn, initial_state=None, bidirectional=False, sequence_length=None, tf_master='', batch_size=32, steps=50, optimizer='SGD', learning_rate=0.1, class_weight=None, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowRNNClassifier.__init__} 1670 | 1671 | 1672 | 1673 | 1674 | - - - 1675 | 1676 | #### `skflow.TensorFlowRNNClassifier.bias_` {#TensorFlowRNNClassifier.bias_} 1677 | 1678 | Returns bias of the rnn layer. 1679 | 1680 | 1681 | - - - 1682 | 1683 | #### `skflow.TensorFlowRNNClassifier.fit(X, y, monitor=None, logdir=None)` {#TensorFlowRNNClassifier.fit} 1684 | 1685 | Builds a neural network model given provided `model_fn` and training 1686 | data X and y. 1687 | 1688 | Note: called first time constructs the graph and initializers 1689 | variables. Consecutives times it will continue training the same model. 1690 | This logic follows partial_fit() interface in scikit-learn. 1691 | 1692 | To restart learning, create new estimator. 1693 | 1694 | ##### Args: 1695 | 1696 | 1697 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1698 | iterator that returns arrays of features. The training input 1699 | samples for fitting the model. 1700 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1701 | iterator that returns array of targets. The training target values 1702 | (class labels in classification, real numbers in regression). 1703 | * `monitor`: Monitor object to print training progress and invoke early stopping 1704 | * `logdir`: the directory to save the log file that can be used for 1705 | optional visualization. 1706 | 1707 | ##### Returns: 1708 | 1709 | Returns self. 1710 | 1711 | 1712 | - - - 1713 | 1714 | #### `skflow.TensorFlowRNNClassifier.get_params(deep=True)` {#TensorFlowRNNClassifier.get_params} 1715 | 1716 | Get parameters for this estimator. 1717 | 1718 | Parameters 1719 | ---------- 1720 | deep: boolean, optional 1721 | If True, will return the parameters for this estimator and 1722 | contained subobjects that are estimators. 1723 | 1724 | Returns 1725 | ------- 1726 | params : mapping of string to any 1727 | Parameter names mapped to their values. 1728 | 1729 | 1730 | - - - 1731 | 1732 | #### `skflow.TensorFlowRNNClassifier.get_tensor(name)` {#TensorFlowRNNClassifier.get_tensor} 1733 | 1734 | Returns tensor by name. 1735 | 1736 | ##### Args: 1737 | 1738 | 1739 | * `name`: string, name of the tensor. 1740 | 1741 | ##### Returns: 1742 | 1743 | Tensor. 1744 | 1745 | 1746 | - - - 1747 | 1748 | #### `skflow.TensorFlowRNNClassifier.get_tensor_value(name)` {#TensorFlowRNNClassifier.get_tensor_value} 1749 | 1750 | Returns value of the tensor give by name. 1751 | 1752 | ##### Args: 1753 | 1754 | 1755 | * `name`: string, name of the tensor. 1756 | 1757 | ##### Returns: 1758 | 1759 | Numpy array - value of the tensor. 1760 | 1761 | 1762 | - - - 1763 | 1764 | #### `skflow.TensorFlowRNNClassifier.partial_fit(X, y)` {#TensorFlowRNNClassifier.partial_fit} 1765 | 1766 | Incremental fit on a batch of samples. 1767 | 1768 | This method is expected to be called several times consecutively 1769 | on different or the same chunks of the dataset. This either can 1770 | implement iterative training or out-of-core/online training. 1771 | 1772 | This is especially useful when the whole dataset is too big to 1773 | fit in memory at the same time. Or when model is taking long time 1774 | to converge, and you want to split up training into subparts. 1775 | 1776 | ##### Args: 1777 | 1778 | 1779 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1780 | iterator that returns arrays of features. The training input 1781 | samples for fitting the model. 1782 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1783 | iterator that returns array of targets. The training target values 1784 | (class label in classification, real numbers in regression). 1785 | 1786 | ##### Returns: 1787 | 1788 | Returns self. 1789 | 1790 | 1791 | - - - 1792 | 1793 | #### `skflow.TensorFlowRNNClassifier.predict(X, axis=1, batch_size=-1)` {#TensorFlowRNNClassifier.predict} 1794 | 1795 | Predict class or regression for X. 1796 | 1797 | For a classification model, the predicted class for each sample in X is 1798 | returned. For a regression model, the predicted value based on X is 1799 | returned. 1800 | 1801 | ##### Args: 1802 | 1803 | 1804 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1805 | * `axis`: Which axis to argmax for classification. 1806 | By default axis 1 (next after batch) is used. 1807 | Use 2 for sequence predictions. 1808 | * `batch_size`: If test set is too big, use batch size to split 1809 | it into mini batches. By default full dataset is used. 1810 | 1811 | ##### Returns: 1812 | 1813 | 1814 | * `y`: array of shape [n_samples]. The predicted classes or predicted 1815 | value. 1816 | 1817 | 1818 | - - - 1819 | 1820 | #### `skflow.TensorFlowRNNClassifier.predict_proba(X, batch_size=-1)` {#TensorFlowRNNClassifier.predict_proba} 1821 | 1822 | Predict class probability of the input samples X. 1823 | 1824 | ##### Args: 1825 | 1826 | 1827 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 1828 | * `batch_size`: If test set is too big, use batch size to split 1829 | it into mini batches. By default full dataset is used. 1830 | 1831 | ##### Returns: 1832 | 1833 | 1834 | * `y`: array of shape [n_samples, n_classes]. The predicted 1835 | probabilities for each class. 1836 | 1837 | 1838 | - - - 1839 | 1840 | #### `skflow.TensorFlowRNNClassifier.restore(cls, path, config_addon=None)` {#TensorFlowRNNClassifier.restore} 1841 | 1842 | Restores model from give path. 1843 | 1844 | ##### Args: 1845 | 1846 | 1847 | * `path`: Path to the checkpoints and other model information. 1848 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 1849 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 1850 | 1851 | ##### Returns: 1852 | 1853 | Estiamator, object of the subclass of TensorFlowEstimator. 1854 | 1855 | 1856 | - - - 1857 | 1858 | #### `skflow.TensorFlowRNNClassifier.save(path)` {#TensorFlowRNNClassifier.save} 1859 | 1860 | Saves checkpoints and graph to given path. 1861 | 1862 | ##### Args: 1863 | 1864 | 1865 | * `path`: Folder to save model to. 1866 | 1867 | 1868 | - - - 1869 | 1870 | #### `skflow.TensorFlowRNNClassifier.score(X, y, sample_weight=None)` {#TensorFlowRNNClassifier.score} 1871 | 1872 | Returns the mean accuracy on the given test data and labels. 1873 | 1874 | In multi-label classification, this is the subset accuracy 1875 | which is a harsh metric since you require for each sample that 1876 | each label set be correctly predicted. 1877 | 1878 | Parameters 1879 | ---------- 1880 | X : array-like, shape = (n_samples, n_features) 1881 | Test samples. 1882 | 1883 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 1884 | True labels for X. 1885 | 1886 | sample_weight : array-like, shape = [n_samples], optional 1887 | Sample weights. 1888 | 1889 | Returns 1890 | ------- 1891 | score : float 1892 | Mean accuracy of self.predict(X) wrt. y. 1893 | 1894 | 1895 | - - - 1896 | 1897 | #### `skflow.TensorFlowRNNClassifier.set_params(**params)` {#TensorFlowRNNClassifier.set_params} 1898 | 1899 | Set the parameters of this estimator. 1900 | 1901 | The method works on simple estimators as well as on nested objects 1902 | (such as pipelines). The former have parameters of the form 1903 | ``__`` so that it's possible to update each 1904 | component of a nested object. 1905 | 1906 | Returns 1907 | ------- 1908 | self 1909 | 1910 | 1911 | - - - 1912 | 1913 | #### `skflow.TensorFlowRNNClassifier.weights_` {#TensorFlowRNNClassifier.weights_} 1914 | 1915 | Returns weights of the rnn layer. 1916 | 1917 | 1918 | 1919 | - - - 1920 | 1921 | ### `class skflow.TensorFlowRNNRegressor` {#TensorFlowRNNRegressor} 1922 | 1923 | TensorFlow RNN Regressor model. 1924 | 1925 | Parameters: 1926 | rnn_size: The size for rnn cell, e.g. size of your word embeddings. 1927 | cell_type: The type of rnn cell, including rnn, gru, and lstm. 1928 | num_layers: The number of layers of the rnn model. 1929 | input_op_fn: Function that will transform the input tensor, such as 1930 | creating word embeddings, byte list, etc. This takes 1931 | an argument X for input and returns transformed X. 1932 | bidirectional: boolean, Whether this is a bidirectional rnn. 1933 | sequence_length: If sequence_length is provided, dynamic calculation is performed. 1934 | This saves computational time when unrolling past max sequence length. 1935 | initial_state: An initial state for the RNN. This must be a tensor of appropriate type 1936 | and shape [batch_size x cell.state_size]. 1937 | tf_master: TensorFlow master. Empty string is default for local. 1938 | batch_size: Mini batch size. 1939 | steps: Number of steps to run over data. 1940 | optimizer: Optimizer name (or class), for example "SGD", "Adam", 1941 | "Adagrad". 1942 | learning_rate: If this is constant float value, no decay function is used. 1943 | Instead, a customized decay function can be passed that accepts 1944 | global_step as parameter and returns a Tensor. 1945 | e.g. exponential decay function: 1946 | def exp_decay(global_step): 1947 | return tf.train.exponential_decay( 1948 | learning_rate=0.1, global_step, 1949 | decay_steps=2, decay_rate=0.001) 1950 | tf_random_seed: Random seed for TensorFlow initializers. 1951 | Setting this value, allows consistency between reruns. 1952 | continue_training: when continue_training is True, once initialized 1953 | model will be continuely trained on every call of fit. 1954 | num_cores: Number of cores to be used. (default: 4) 1955 | verbose: Controls the verbosity, possible values: 1956 | 0: the algorithm and debug information is muted. 1957 | 1: trainer prints the progress. 1958 | 2: log device placement is printed. 1959 | max_to_keep: The maximum number of recent checkpoint files to keep. 1960 | As new files are created, older files are deleted. 1961 | If None or 0, all checkpoint files are kept. 1962 | Defaults to 5 (that is, the 5 most recent checkpoint files are kept.) 1963 | keep_checkpoint_every_n_hours: Number of hours between each checkpoint 1964 | to be saved. The default value of 10,000 hours effectively disables the feature. 1965 | - - - 1966 | 1967 | #### `skflow.TensorFlowRNNRegressor.__init__(rnn_size, cell_type='gru', num_layers=1, input_op_fn=null_input_op_fn, initial_state=None, bidirectional=False, sequence_length=None, n_classes=0, tf_master='', batch_size=32, steps=50, optimizer='SGD', learning_rate=0.1, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowRNNRegressor.__init__} 1968 | 1969 | 1970 | 1971 | 1972 | - - - 1973 | 1974 | #### `skflow.TensorFlowRNNRegressor.bias_` {#TensorFlowRNNRegressor.bias_} 1975 | 1976 | Returns bias of the rnn layer. 1977 | 1978 | 1979 | - - - 1980 | 1981 | #### `skflow.TensorFlowRNNRegressor.fit(X, y, monitor=None, logdir=None)` {#TensorFlowRNNRegressor.fit} 1982 | 1983 | Builds a neural network model given provided `model_fn` and training 1984 | data X and y. 1985 | 1986 | Note: called first time constructs the graph and initializers 1987 | variables. Consecutives times it will continue training the same model. 1988 | This logic follows partial_fit() interface in scikit-learn. 1989 | 1990 | To restart learning, create new estimator. 1991 | 1992 | ##### Args: 1993 | 1994 | 1995 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 1996 | iterator that returns arrays of features. The training input 1997 | samples for fitting the model. 1998 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 1999 | iterator that returns array of targets. The training target values 2000 | (class labels in classification, real numbers in regression). 2001 | * `monitor`: Monitor object to print training progress and invoke early stopping 2002 | * `logdir`: the directory to save the log file that can be used for 2003 | optional visualization. 2004 | 2005 | ##### Returns: 2006 | 2007 | Returns self. 2008 | 2009 | 2010 | - - - 2011 | 2012 | #### `skflow.TensorFlowRNNRegressor.get_params(deep=True)` {#TensorFlowRNNRegressor.get_params} 2013 | 2014 | Get parameters for this estimator. 2015 | 2016 | Parameters 2017 | ---------- 2018 | deep: boolean, optional 2019 | If True, will return the parameters for this estimator and 2020 | contained subobjects that are estimators. 2021 | 2022 | Returns 2023 | ------- 2024 | params : mapping of string to any 2025 | Parameter names mapped to their values. 2026 | 2027 | 2028 | - - - 2029 | 2030 | #### `skflow.TensorFlowRNNRegressor.get_tensor(name)` {#TensorFlowRNNRegressor.get_tensor} 2031 | 2032 | Returns tensor by name. 2033 | 2034 | ##### Args: 2035 | 2036 | 2037 | * `name`: string, name of the tensor. 2038 | 2039 | ##### Returns: 2040 | 2041 | Tensor. 2042 | 2043 | 2044 | - - - 2045 | 2046 | #### `skflow.TensorFlowRNNRegressor.get_tensor_value(name)` {#TensorFlowRNNRegressor.get_tensor_value} 2047 | 2048 | Returns value of the tensor give by name. 2049 | 2050 | ##### Args: 2051 | 2052 | 2053 | * `name`: string, name of the tensor. 2054 | 2055 | ##### Returns: 2056 | 2057 | Numpy array - value of the tensor. 2058 | 2059 | 2060 | - - - 2061 | 2062 | #### `skflow.TensorFlowRNNRegressor.partial_fit(X, y)` {#TensorFlowRNNRegressor.partial_fit} 2063 | 2064 | Incremental fit on a batch of samples. 2065 | 2066 | This method is expected to be called several times consecutively 2067 | on different or the same chunks of the dataset. This either can 2068 | implement iterative training or out-of-core/online training. 2069 | 2070 | This is especially useful when the whole dataset is too big to 2071 | fit in memory at the same time. Or when model is taking long time 2072 | to converge, and you want to split up training into subparts. 2073 | 2074 | ##### Args: 2075 | 2076 | 2077 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 2078 | iterator that returns arrays of features. The training input 2079 | samples for fitting the model. 2080 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 2081 | iterator that returns array of targets. The training target values 2082 | (class label in classification, real numbers in regression). 2083 | 2084 | ##### Returns: 2085 | 2086 | Returns self. 2087 | 2088 | 2089 | - - - 2090 | 2091 | #### `skflow.TensorFlowRNNRegressor.predict(X, axis=1, batch_size=-1)` {#TensorFlowRNNRegressor.predict} 2092 | 2093 | Predict class or regression for X. 2094 | 2095 | For a classification model, the predicted class for each sample in X is 2096 | returned. For a regression model, the predicted value based on X is 2097 | returned. 2098 | 2099 | ##### Args: 2100 | 2101 | 2102 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 2103 | * `axis`: Which axis to argmax for classification. 2104 | By default axis 1 (next after batch) is used. 2105 | Use 2 for sequence predictions. 2106 | * `batch_size`: If test set is too big, use batch size to split 2107 | it into mini batches. By default full dataset is used. 2108 | 2109 | ##### Returns: 2110 | 2111 | 2112 | * `y`: array of shape [n_samples]. The predicted classes or predicted 2113 | value. 2114 | 2115 | 2116 | - - - 2117 | 2118 | #### `skflow.TensorFlowRNNRegressor.predict_proba(X, batch_size=-1)` {#TensorFlowRNNRegressor.predict_proba} 2119 | 2120 | Predict class probability of the input samples X. 2121 | 2122 | ##### Args: 2123 | 2124 | 2125 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 2126 | * `batch_size`: If test set is too big, use batch size to split 2127 | it into mini batches. By default full dataset is used. 2128 | 2129 | ##### Returns: 2130 | 2131 | 2132 | * `y`: array of shape [n_samples, n_classes]. The predicted 2133 | probabilities for each class. 2134 | 2135 | 2136 | - - - 2137 | 2138 | #### `skflow.TensorFlowRNNRegressor.restore(cls, path, config_addon=None)` {#TensorFlowRNNRegressor.restore} 2139 | 2140 | Restores model from give path. 2141 | 2142 | ##### Args: 2143 | 2144 | 2145 | * `path`: Path to the checkpoints and other model information. 2146 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 2147 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 2148 | 2149 | ##### Returns: 2150 | 2151 | Estiamator, object of the subclass of TensorFlowEstimator. 2152 | 2153 | 2154 | - - - 2155 | 2156 | #### `skflow.TensorFlowRNNRegressor.save(path)` {#TensorFlowRNNRegressor.save} 2157 | 2158 | Saves checkpoints and graph to given path. 2159 | 2160 | ##### Args: 2161 | 2162 | 2163 | * `path`: Folder to save model to. 2164 | 2165 | 2166 | - - - 2167 | 2168 | #### `skflow.TensorFlowRNNRegressor.score(X, y, sample_weight=None)` {#TensorFlowRNNRegressor.score} 2169 | 2170 | Returns the coefficient of determination R^2 of the prediction. 2171 | 2172 | The coefficient R^2 is defined as (1 - u/v), where u is the regression 2173 | sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual 2174 | sum of squares ((y_true - y_true.mean()) ** 2).sum(). 2175 | Best possible score is 1.0 and it can be negative (because the 2176 | model can be arbitrarily worse). A constant model that always 2177 | predicts the expected value of y, disregarding the input features, 2178 | would get a R^2 score of 0.0. 2179 | 2180 | Parameters 2181 | ---------- 2182 | X : array-like, shape = (n_samples, n_features) 2183 | Test samples. 2184 | 2185 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 2186 | True values for X. 2187 | 2188 | sample_weight : array-like, shape = [n_samples], optional 2189 | Sample weights. 2190 | 2191 | Returns 2192 | ------- 2193 | score : float 2194 | R^2 of self.predict(X) wrt. y. 2195 | 2196 | 2197 | - - - 2198 | 2199 | #### `skflow.TensorFlowRNNRegressor.set_params(**params)` {#TensorFlowRNNRegressor.set_params} 2200 | 2201 | Set the parameters of this estimator. 2202 | 2203 | The method works on simple estimators as well as on nested objects 2204 | (such as pipelines). The former have parameters of the form 2205 | ``__`` so that it's possible to update each 2206 | component of a nested object. 2207 | 2208 | Returns 2209 | ------- 2210 | self 2211 | 2212 | 2213 | - - - 2214 | 2215 | #### `skflow.TensorFlowRNNRegressor.weights_` {#TensorFlowRNNRegressor.weights_} 2216 | 2217 | Returns weights of the rnn layer. 2218 | 2219 | 2220 | 2221 | - - - 2222 | 2223 | ### `class skflow.TensorFlowRegressor` {#TensorFlowRegressor} 2224 | 2225 | TensorFlow Linear Regression model. 2226 | - - - 2227 | 2228 | #### `skflow.TensorFlowRegressor.__init__(n_classes=0, tf_master='', batch_size=32, steps=200, optimizer='SGD', learning_rate=0.1, tf_random_seed=42, continue_training=False, config_addon=None, verbose=1, max_to_keep=5, keep_checkpoint_every_n_hours=10000)` {#TensorFlowRegressor.__init__} 2229 | 2230 | 2231 | 2232 | 2233 | - - - 2234 | 2235 | #### `skflow.TensorFlowRegressor.bias_` {#TensorFlowRegressor.bias_} 2236 | 2237 | Returns bias of the linear regression. 2238 | 2239 | 2240 | - - - 2241 | 2242 | #### `skflow.TensorFlowRegressor.fit(X, y, monitor=None, logdir=None)` {#TensorFlowRegressor.fit} 2243 | 2244 | Builds a neural network model given provided `model_fn` and training 2245 | data X and y. 2246 | 2247 | Note: called first time constructs the graph and initializers 2248 | variables. Consecutives times it will continue training the same model. 2249 | This logic follows partial_fit() interface in scikit-learn. 2250 | 2251 | To restart learning, create new estimator. 2252 | 2253 | ##### Args: 2254 | 2255 | 2256 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 2257 | iterator that returns arrays of features. The training input 2258 | samples for fitting the model. 2259 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 2260 | iterator that returns array of targets. The training target values 2261 | (class labels in classification, real numbers in regression). 2262 | * `monitor`: Monitor object to print training progress and invoke early stopping 2263 | * `logdir`: the directory to save the log file that can be used for 2264 | optional visualization. 2265 | 2266 | ##### Returns: 2267 | 2268 | Returns self. 2269 | 2270 | 2271 | - - - 2272 | 2273 | #### `skflow.TensorFlowRegressor.get_params(deep=True)` {#TensorFlowRegressor.get_params} 2274 | 2275 | Get parameters for this estimator. 2276 | 2277 | Parameters 2278 | ---------- 2279 | deep: boolean, optional 2280 | If True, will return the parameters for this estimator and 2281 | contained subobjects that are estimators. 2282 | 2283 | Returns 2284 | ------- 2285 | params : mapping of string to any 2286 | Parameter names mapped to their values. 2287 | 2288 | 2289 | - - - 2290 | 2291 | #### `skflow.TensorFlowRegressor.get_tensor(name)` {#TensorFlowRegressor.get_tensor} 2292 | 2293 | Returns tensor by name. 2294 | 2295 | ##### Args: 2296 | 2297 | 2298 | * `name`: string, name of the tensor. 2299 | 2300 | ##### Returns: 2301 | 2302 | Tensor. 2303 | 2304 | 2305 | - - - 2306 | 2307 | #### `skflow.TensorFlowRegressor.get_tensor_value(name)` {#TensorFlowRegressor.get_tensor_value} 2308 | 2309 | Returns value of the tensor give by name. 2310 | 2311 | ##### Args: 2312 | 2313 | 2314 | * `name`: string, name of the tensor. 2315 | 2316 | ##### Returns: 2317 | 2318 | Numpy array - value of the tensor. 2319 | 2320 | 2321 | - - - 2322 | 2323 | #### `skflow.TensorFlowRegressor.partial_fit(X, y)` {#TensorFlowRegressor.partial_fit} 2324 | 2325 | Incremental fit on a batch of samples. 2326 | 2327 | This method is expected to be called several times consecutively 2328 | on different or the same chunks of the dataset. This either can 2329 | implement iterative training or out-of-core/online training. 2330 | 2331 | This is especially useful when the whole dataset is too big to 2332 | fit in memory at the same time. Or when model is taking long time 2333 | to converge, and you want to split up training into subparts. 2334 | 2335 | ##### Args: 2336 | 2337 | 2338 | * `X`: matrix or tensor of shape [n_samples, n_features...]. Can be 2339 | iterator that returns arrays of features. The training input 2340 | samples for fitting the model. 2341 | * `y`: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be 2342 | iterator that returns array of targets. The training target values 2343 | (class label in classification, real numbers in regression). 2344 | 2345 | ##### Returns: 2346 | 2347 | Returns self. 2348 | 2349 | 2350 | - - - 2351 | 2352 | #### `skflow.TensorFlowRegressor.predict(X, axis=1, batch_size=-1)` {#TensorFlowRegressor.predict} 2353 | 2354 | Predict class or regression for X. 2355 | 2356 | For a classification model, the predicted class for each sample in X is 2357 | returned. For a regression model, the predicted value based on X is 2358 | returned. 2359 | 2360 | ##### Args: 2361 | 2362 | 2363 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 2364 | * `axis`: Which axis to argmax for classification. 2365 | By default axis 1 (next after batch) is used. 2366 | Use 2 for sequence predictions. 2367 | * `batch_size`: If test set is too big, use batch size to split 2368 | it into mini batches. By default full dataset is used. 2369 | 2370 | ##### Returns: 2371 | 2372 | 2373 | * `y`: array of shape [n_samples]. The predicted classes or predicted 2374 | value. 2375 | 2376 | 2377 | - - - 2378 | 2379 | #### `skflow.TensorFlowRegressor.predict_proba(X, batch_size=-1)` {#TensorFlowRegressor.predict_proba} 2380 | 2381 | Predict class probability of the input samples X. 2382 | 2383 | ##### Args: 2384 | 2385 | 2386 | * `X`: array-like matrix, [n_samples, n_features...] or iterator. 2387 | * `batch_size`: If test set is too big, use batch size to split 2388 | it into mini batches. By default full dataset is used. 2389 | 2390 | ##### Returns: 2391 | 2392 | 2393 | * `y`: array of shape [n_samples, n_classes]. The predicted 2394 | probabilities for each class. 2395 | 2396 | 2397 | - - - 2398 | 2399 | #### `skflow.TensorFlowRegressor.restore(cls, path, config_addon=None)` {#TensorFlowRegressor.restore} 2400 | 2401 | Restores model from give path. 2402 | 2403 | ##### Args: 2404 | 2405 | 2406 | * `path`: Path to the checkpoints and other model information. 2407 | * `config_addon`: ConfigAddon object that controls the configurations of the session, 2408 | e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured. 2409 | 2410 | ##### Returns: 2411 | 2412 | Estiamator, object of the subclass of TensorFlowEstimator. 2413 | 2414 | 2415 | - - - 2416 | 2417 | #### `skflow.TensorFlowRegressor.save(path)` {#TensorFlowRegressor.save} 2418 | 2419 | Saves checkpoints and graph to given path. 2420 | 2421 | ##### Args: 2422 | 2423 | 2424 | * `path`: Folder to save model to. 2425 | 2426 | 2427 | - - - 2428 | 2429 | #### `skflow.TensorFlowRegressor.score(X, y, sample_weight=None)` {#TensorFlowRegressor.score} 2430 | 2431 | Returns the coefficient of determination R^2 of the prediction. 2432 | 2433 | The coefficient R^2 is defined as (1 - u/v), where u is the regression 2434 | sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual 2435 | sum of squares ((y_true - y_true.mean()) ** 2).sum(). 2436 | Best possible score is 1.0 and it can be negative (because the 2437 | model can be arbitrarily worse). A constant model that always 2438 | predicts the expected value of y, disregarding the input features, 2439 | would get a R^2 score of 0.0. 2440 | 2441 | Parameters 2442 | ---------- 2443 | X : array-like, shape = (n_samples, n_features) 2444 | Test samples. 2445 | 2446 | y : array-like, shape = (n_samples) or (n_samples, n_outputs) 2447 | True values for X. 2448 | 2449 | sample_weight : array-like, shape = [n_samples], optional 2450 | Sample weights. 2451 | 2452 | Returns 2453 | ------- 2454 | score : float 2455 | R^2 of self.predict(X) wrt. y. 2456 | 2457 | 2458 | - - - 2459 | 2460 | #### `skflow.TensorFlowRegressor.set_params(**params)` {#TensorFlowRegressor.set_params} 2461 | 2462 | Set the parameters of this estimator. 2463 | 2464 | The method works on simple estimators as well as on nested objects 2465 | (such as pipelines). The former have parameters of the form 2466 | ``__`` so that it's possible to update each 2467 | component of a nested object. 2468 | 2469 | Returns 2470 | ------- 2471 | self 2472 | 2473 | 2474 | - - - 2475 | 2476 | #### `skflow.TensorFlowRegressor.weights_` {#TensorFlowRegressor.weights_} 2477 | 2478 | Returns weights of the linear regression. 2479 | 2480 | 2481 | 2482 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # TensorFlow Python reference documentation 6 | 7 | * **[Estimators](../../api_docs/python/estimators)**: 8 | * [`TensorFlowClassifier`](../../api_docs/python/estimators#TensorFlowClassifier) 9 | * [`TensorFlowDNNClassifier`](../../api_docs/python/estimators#TensorFlowDNNClassifier) 10 | * [`TensorFlowDNNRegressor`](../../api_docs/python/estimators#TensorFlowDNNRegressor) 11 | * [`TensorFlowEstimator`](../../api_docs/python/estimators#TensorFlowEstimator) 12 | * [`TensorFlowLinearClassifier`](../../api_docs/python/estimators#TensorFlowLinearClassifier) 13 | * [`TensorFlowLinearRegressor`](../../api_docs/python/estimators#TensorFlowLinearRegressor) 14 | * [`TensorFlowRegressor`](../../api_docs/python/estimators#TensorFlowRegressor) 15 | * [`TensorFlowRNNClassifier`](../../api_docs/python/estimators#TensorFlowRNNClassifier) 16 | * [`TensorFlowRNNRegressor`](../../api_docs/python/estimators#TensorFlowRNNRegressor) 17 | 18 | * **[IO](../../api_docs/python/io)**: 19 | * [`extract_dask_data`](../../api_docs/python/io#extract_dask_data) 20 | * [`extract_dask_labels`](../../api_docs/python/io#extract_dask_labels) 21 | * [`extract_pandas_data`](../../api_docs/python/io#extract_pandas_data) 22 | * [`extract_pandas_labels`](../../api_docs/python/io#extract_pandas_labels) 23 | * [`extract_pandas_matrix`](../../api_docs/python/io#extract_pandas_matrix) 24 | 25 | * **[Trainer](../../api_docs/python/trainer)**: 26 | * [`TensorFlowTrainer`](../../api_docs/python/trainer#TensorFlowTrainer) 27 | 28 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/io.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # IO 6 | [TOC] 7 | 8 | Tools to allow different io formats. 9 | 10 | ## Other Functions and Classes 11 | - - - 12 | 13 | ### `skflow.extract_dask_data(data)` {#extract_dask_data} 14 | 15 | Extract data from dask.Series or dask.DataFrame for predictors 16 | 17 | 18 | - - - 19 | 20 | ### `skflow.extract_dask_labels(labels)` {#extract_dask_labels} 21 | 22 | Extract data from dask.Series for labels 23 | 24 | 25 | - - - 26 | 27 | ### `skflow.extract_pandas_data(data)` {#extract_pandas_data} 28 | 29 | Extract data from pandas.DataFrame for predictors 30 | 31 | 32 | - - - 33 | 34 | ### `skflow.extract_pandas_labels(labels)` {#extract_pandas_labels} 35 | 36 | Extract data from pandas.DataFrame for labels 37 | 38 | 39 | - - - 40 | 41 | ### `skflow.extract_pandas_matrix(data)` {#extract_pandas_matrix} 42 | 43 | Extracts numpy matrix from pandas DataFrame. 44 | 45 | 46 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/models.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # Models 6 | [TOC] 7 | 8 | Various high level TF models. 9 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/ops.array_ops.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Tensor Transformations 4 | 5 | Note: Functions taking `Tensor` arguments can also take anything accepted by 6 | [`tf.convert_to_tensor`](framework.md#convert_to_tensor). 7 | 8 | [TOC] 9 | 10 | TensorFlow ops for array / tensor manipulation. 11 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/ops.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # Tensor Transformations 6 | 7 | Note: Functions taking `Tensor` arguments can also take anything accepted by 8 | [`tf.convert_to_tensor`](framework.md#convert_to_tensor). 9 | 10 | [TOC] 11 | 12 | Main Scikit Flow module. 13 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/preprocessing.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # Preprocessing 6 | [TOC] 7 | 8 | Preprocessing tools useful for building models. 9 | -------------------------------------------------------------------------------- /g3doc/api_docs/python/trainer.md: -------------------------------------------------------------------------------- 1 | --- 2 | --- 3 | 4 | 5 | # Trainer 6 | [TOC] 7 | 8 | Generic trainer for TensorFlow models. 9 | 10 | ## Other Functions and Classes 11 | - - - 12 | 13 | ### `class skflow.TensorFlowTrainer` {#TensorFlowTrainer} 14 | 15 | General trainer class. 16 | 17 | Attributes: 18 | model: Model object. 19 | gradients: Gradients tensor. 20 | - - - 21 | 22 | #### `skflow.TensorFlowTrainer.__init__(loss, global_step, optimizer, learning_rate, clip_gradients=5.0)` {#TensorFlowTrainer.__init__} 23 | 24 | Build a trainer part of graph. 25 | 26 | ##### Args: 27 | 28 | 29 | * `loss`: Tensor that evaluates to model's loss. 30 | * `global_step`: Tensor with global step of the model. 31 | * `optimizer`: Name of the optimizer class (SGD, Adam, Adagrad) or class. 32 | * `learning_rate`: If this is constant float value, no decay function is used. 33 | Instead, a customized decay function can be passed that accepts 34 | global_step as parameter and returns a Tensor. 35 | e.g. exponential decay function: 36 | def exp_decay(global_step): 37 | return tf.train.exponential_decay( 38 | learning_rate=0.1, global_step=global_step, 39 | decay_steps=2, decay_rate=0.001) 40 | 41 | ##### Raises: 42 | 43 | 44 | * `ValueError`: if learning_rate is not a float or a callable. 45 | 46 | 47 | - - - 48 | 49 | #### `skflow.TensorFlowTrainer.initialize(sess)` {#TensorFlowTrainer.initialize} 50 | 51 | Initalizes all variables. 52 | 53 | ##### Args: 54 | 55 | 56 | * `sess`: Session object. 57 | 58 | ##### Returns: 59 | 60 | Values of initializers. 61 | 62 | 63 | - - - 64 | 65 | #### `skflow.TensorFlowTrainer.train(sess, feed_dict_fn, steps, monitor, summary_writer=None, summaries=None, feed_params_fn=None)` {#TensorFlowTrainer.train} 66 | 67 | Trains a model for given number of steps, given feed_dict function. 68 | 69 | ##### Args: 70 | 71 | 72 | * `sess`: Session object. 73 | * `feed_dict_fn`: Function that will return a feed dictionary. 74 | * `summary_writer`: SummaryWriter object to use for writing summaries. 75 | * `steps`: Number of steps to run. 76 | * `monitor`: Monitor object to track training progress and induce early stopping 77 | * `summaries`: Joined object of all summaries that should be ran. 78 | 79 | ##### Returns: 80 | 81 | List of losses for each step. 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /g3doc/get_started/index.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | Below are few simple examples of the API. For more examples, please see `examples `__. 4 | 5 | ## General tips 6 | 7 | - It's useful to re-scale dataset before passing to estimator to 0 mean and unit standard deviation. Stochastic Gradient Descent doesn't always do the right thing when variable are very different scale. 8 | 9 | - Categorical variables should be managed before passing input to the estimator. 10 | 11 | ## Linear Classifier 12 | 13 | Simple linear classification: 14 | 15 | .. code:: python 16 | 17 | import skflow 18 | from sklearn import datasets, metrics 19 | 20 | iris = datasets.load_iris() 21 | classifier = skflow.TensorFlowLinearClassifier(n_classes=3) 22 | classifier.fit(iris.data, iris.target) 23 | score = metrics.accuracy_score(iris.target, classifier.predict(iris.data)) 24 | print("Accuracy: %f" % score) 25 | 26 | ## Linear Regressor 27 | 28 | Simple linear regression: 29 | 30 | .. code:: python 31 | 32 | import skflow 33 | from sklearn import datasets, metrics, preprocessing 34 | 35 | boston = datasets.load_boston() 36 | X = preprocessing.StandardScaler().fit_transform(boston.data) 37 | regressor = skflow.TensorFlowLinearRegressor() 38 | regressor.fit(X, boston.target) 39 | score = metrics.mean_squared_error(regressor.predict(X), boston.target) 40 | print ("MSE: %f" % score) 41 | 42 | ## Deep Neural Network 43 | 44 | Example of 3 layer network with 10, 20 and 10 hidden units respectively: 45 | 46 | .. code:: python 47 | 48 | import skflow 49 | from sklearn import datasets, metrics 50 | 51 | iris = datasets.load_iris() 52 | classifier = skflow.TensorFlowDNNClassifier(hidden_units=[10, 20, 10], n_classes=3) 53 | classifier.fit(iris.data, iris.target) 54 | score = metrics.accuracy_score(iris.target, classifier.predict(iris.data)) 55 | print("Accuracy: %f" % score) 56 | 57 | ## Custom model 58 | 59 | Example of how to pass a custom model to the TensorFlowEstimator: 60 | 61 | .. code:: python 62 | 63 | import skflow 64 | from sklearn import datasets, metrics 65 | 66 | iris = datasets.load_iris() 67 | 68 | def my_model(X, y): 69 | """This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.""" 70 | layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5) 71 | return skflow.models.logistic_regression(layers, y) 72 | 73 | classifier = skflow.TensorFlowEstimator(model_fn=my_model, n_classes=3) 74 | classifier.fit(iris.data, iris.target) 75 | score = metrics.accuracy_score(iris.target, classifier.predict(iris.data)) 76 | print("Accuracy: %f" % score) 77 | 78 | ## Saving / Restoring models 79 | 80 | Each estimator has a ``save`` method which takes folder path where all model information will be saved. For restoring you can just call ``skflow.TensorFlowEstimator.restore(path)`` and it will return object of your class. 81 | 82 | Some example code: 83 | 84 | .. code:: python 85 | 86 | import skflow 87 | 88 | classifier = skflow.TensorFlowLinearRegression() 89 | classifier.fit(...) 90 | classifier.save('/tmp/tf_examples/my_model_1/') 91 | 92 | new_classifier = TensorFlowEstimator.restore('/tmp/tf_examples/my_model_2') 93 | new_classifier.predict(...) 94 | 95 | ## Summaries 96 | 97 | To get nice visualizations and summaries you can use ``logdir`` parameter on ``fit``. It will start writing summaries for ``loss`` and histograms for variables in your model. You can also add custom summaries in your custom model function by calling ``tf.summary`` and passing Tensors to report. 98 | 99 | .. code:: python 100 | 101 | classifier = skflow.TensorFlowLinearRegression() 102 | classifier.fit(X, y, logdir='/tmp/tf_examples/my_model_1/') 103 | 104 | Then run next command in command line: 105 | 106 | .. code:: bash 107 | 108 | tensorboard --logdir=/tmp/tf_examples/my_model_1 109 | 110 | and follow reported url. 111 | 112 | Graph visualization: |Text classification RNN Graph| 113 | 114 | Loss visualization: |Text classification RNN Loss| 115 | 116 | # More examples 117 | 118 | See examples folder for: 119 | 120 | - Easy way to handle categorical variables - words are just an example of categorical variable. 121 | - Text Classification - see examples for RNN, CNN on word and characters. 122 | - Language modeling and text sequence to sequence. 123 | - Images (CNNs) - see example for digit recognition. 124 | - More & deeper - different examples showing DNNs and CNNs 125 | 126 | -------------------------------------------------------------------------------- /g3doc/how_to/index.md: -------------------------------------------------------------------------------- 1 | # How to 2 | 3 | ## Re-generate API documentation 4 | 5 | To regenerate API documentation, run this commands from main git folder: 6 | 7 | cd scripts/docs 8 | ./gen_docs.sh 9 | 10 | Then review and commit changes. 11 | 12 | -------------------------------------------------------------------------------- /g3doc/images/text_classification_rnn_graph.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensorflow/skflow/947dc93b6f6185b9dfcd9d9ab7280f0aa0efcbf5/g3doc/images/text_classification_rnn_graph.png -------------------------------------------------------------------------------- /g3doc/images/text_classification_rnn_loss.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensorflow/skflow/947dc93b6f6185b9dfcd9d9ab7280f0aa0efcbf5/g3doc/images/text_classification_rnn_loss.png -------------------------------------------------------------------------------- /g3doc/index.md: -------------------------------------------------------------------------------- 1 | # Scikit Flow 2 | 3 | This is a simplified interface for TensorFlow, to get people started on predictive analytics and data mining. 4 | 5 | Library covers variety of needs from linear models to *Deep Learning* applications like text and image understanding. 6 | 7 | ## Why *TensorFlow*? 8 | 9 | - TensorFlow provides a good backbone for building different shapes of machine learning applications. 10 | - It will continue to evolve both in the distributed direction and as general pipelinining machinery. 11 | 12 | ## Why *Scikit Flow*? 13 | 14 | - To smooth the transition from the Scikit Learn world of one-liner machine learning into the more open world of building different shapes of ML models. You can start by using fit/predict and slide into TensorFlow APIs as you are getting comfortable. 15 | - To provide a set of reference models that would be easy to integrate with existing code. 16 | 17 | # Installation 18 | 19 | ## Dependencies 20 | 21 | - Python: 2.7, 3.4+ 22 | - Scikit learn: 0.16, 0.17, 0.18+ 23 | - Tensorflow: 0.7+ 24 | 25 | First, you need to make sure you have `TensorFlow `__ and `Scikit Learn `__ installed. 26 | 27 | Run the following to install the stable version from PyPI: 28 | 29 | pip install skflow 30 | 31 | Or run the following to install from the development version from Github: 32 | 33 | pip install git+git://github.com/tensorflow/skflow.git 34 | 35 | ## Tutorial 36 | 37 | - `Introduction to Scikit Flow and why you want to start learning 38 | TensorFlow `__ 39 | - `DNNs, custom model and Digit recognition 40 | examples `__ 41 | - `Categorical variables: One hot vs Distributed 42 | representation `__ 43 | - More coming soon. 44 | 45 | ## Community 46 | 47 | - Twitter `#skflow `__. 48 | - StackOverflow with `skflow tag `__ for questions and struggles. 49 | - Github `issues `__ for technical discussions and feature requests. 50 | - `Gitter channel `__ for non-trivial discussions. 51 | 52 | ## Table of Contents 53 | 54 | 55 | -------------------------------------------------------------------------------- /scripts/docs/docs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Updates generated docs from Python doc comments. 16 | 17 | Updates the documentation files. 18 | """ 19 | 20 | from __future__ import absolute_import 21 | from __future__ import division 22 | from __future__ import print_function 23 | 24 | import inspect 25 | import os 26 | import re 27 | import sys 28 | 29 | 30 | _arg_re = re.compile(" *([*]{0,2}[a-zA-Z][a-zA-Z0-9_]*):") 31 | _section_re = re.compile("([A-Z][a-zA-Z ]*):$") 32 | _always_drop_symbol_re = re.compile("_[_a-zA-Z0-9]") 33 | _anchor_re = re.compile(r"^[\w.]+$") 34 | _member_mark = "@@" 35 | 36 | 37 | class Document(object): 38 | """Base class for an automatically generated document.""" 39 | 40 | def write_markdown_to_file(self, f): 41 | """Writes a Markdown-formatted version of this document to file `f`. 42 | 43 | Args: 44 | f: The output file. 45 | """ 46 | raise NotImplementedError("Document.WriteToFile") 47 | 48 | 49 | class Index(Document): 50 | """An automatically generated index for a collection of documents.""" 51 | 52 | def __init__(self, module_to_name, members, filename_to_library_map, 53 | path_prefix): 54 | """Creates a new Index. 55 | 56 | Args: 57 | module_to_name: Dictionary mapping modules to short names. 58 | members: Dictionary mapping member name to (fullname, member). 59 | filename_to_library_map: A list of (filename, Library) pairs. The order 60 | corresponds to the order in which the libraries appear in the index. 61 | path_prefix: Prefix to add to links in the index. 62 | """ 63 | self._module_to_name = module_to_name 64 | self._members = members 65 | self._filename_to_library_map = filename_to_library_map 66 | self._path_prefix = path_prefix 67 | 68 | def write_markdown_to_file(self, f): 69 | """Writes this index to file `f`. 70 | 71 | The output is formatted as an unordered list. Each list element 72 | contains the title of the library, followed by a list of symbols 73 | in that library hyperlinked to the corresponding anchor in that 74 | library. 75 | 76 | Args: 77 | f: The output file. 78 | """ 79 | print("---", file=f) 80 | print("---", file=f) 81 | print("", file=f) 82 | print("", file=f) 83 | print("# TensorFlow Python reference documentation", file=f) 84 | print("", file=f) 85 | fullname_f = lambda name: self._members[name][0] 86 | anchor_f = lambda name: _get_anchor(self._module_to_name, fullname_f(name)) 87 | 88 | for filename, library in self._filename_to_library_map: 89 | sorted_names = sorted(library.mentioned, key=lambda x: (str.lower(x), x)) 90 | member_names = [n for n in sorted_names if n in self._members] 91 | # TODO: This is a hack that should be removed as soon as the website code 92 | # allows it. 93 | full_filename = self._path_prefix + filename 94 | links = ["[`%s`](%s#%s)" % (name, full_filename[:-3], anchor_f(name)) 95 | for name in member_names] 96 | if links: 97 | print("* **[%s](%s)**:" % (library.title, full_filename[:-3]), file=f) 98 | for link in links: 99 | print(" * %s" % link, file=f) 100 | print("", file=f) 101 | 102 | 103 | def collect_members(module_to_name): 104 | """Collect all symbols from a list of modules. 105 | 106 | Args: 107 | module_to_name: Dictionary mapping modules to short names. 108 | 109 | Returns: 110 | Dictionary mapping name to (fullname, member) pairs. 111 | """ 112 | members = {} 113 | for module, module_name in module_to_name.items(): 114 | all_names = getattr(module, "__all__", None) 115 | for name, member in inspect.getmembers(module): 116 | if ((inspect.isfunction(member) or inspect.isclass(member)) and 117 | not _always_drop_symbol_re.match(name) and 118 | (all_names is None or name in all_names)): 119 | fullname = '%s.%s' % (module_name, name) 120 | if name in members: 121 | other_fullname, other_member = members[name] 122 | if member is not other_member: 123 | raise RuntimeError("Short name collision between %s and %s" % 124 | (fullname, other_fullname)) 125 | if len(fullname) == len(other_fullname): 126 | raise RuntimeError("Can't decide whether to use %s or %s for %s: " 127 | "both full names have length %d" % 128 | (fullname, other_fullname, name, len(fullname))) 129 | if len(fullname) > len(other_fullname): 130 | continue # Use the shorter full name 131 | members[name] = fullname, member 132 | return members 133 | 134 | 135 | def _get_anchor(module_to_name, fullname): 136 | """Turn a full member name into an anchor. 137 | 138 | Args: 139 | module_to_name: Dictionary mapping modules to short names. 140 | fullname: Fully qualified name of symbol. 141 | 142 | Returns: 143 | HTML anchor string. The longest module name prefix of fullname is 144 | removed to make the anchor. 145 | 146 | Raises: 147 | ValueError: If fullname uses characters invalid in an anchor. 148 | """ 149 | if not _anchor_re.match(fullname): 150 | raise ValueError("'%s' is not a valid anchor" % fullname) 151 | anchor = fullname 152 | for module_name in module_to_name.values(): 153 | if fullname.startswith(module_name + "."): 154 | rest = fullname[len(module_name)+1:] 155 | # Use this prefix iff it is longer than any found before 156 | if len(anchor) > len(rest): 157 | anchor = rest 158 | return anchor 159 | 160 | 161 | class Library(Document): 162 | """An automatically generated document for a set of functions and classes.""" 163 | 164 | def __init__(self, 165 | title, 166 | module, 167 | module_to_name, 168 | members, 169 | documented, 170 | exclude_symbols=(), 171 | prefix=None): 172 | """Creates a new Library. 173 | 174 | Args: 175 | title: A human-readable title for the library. 176 | module: Module to pull high level docstring from (for table of contents, 177 | list of Ops to document, etc.). 178 | module_to_name: Dictionary mapping modules to short names. 179 | members: Dictionary mapping member name to (fullname, member). 180 | documented: Set of documented names to update. 181 | exclude_symbols: A list of specific symbols to exclude. 182 | prefix: A string to include at the beginning of the page. 183 | """ 184 | self._title = title 185 | self._module = module 186 | self._module_to_name = module_to_name 187 | self._members = dict(members) # Copy since we mutate it below 188 | self._exclude_symbols = frozenset(exclude_symbols) 189 | documented.update(exclude_symbols) 190 | self._documented = documented 191 | self._mentioned = set() 192 | self._prefix = prefix or "" 193 | 194 | @property 195 | def title(self): 196 | """The human-readable title for this library.""" 197 | return self._title 198 | 199 | @property 200 | def mentioned(self): 201 | """Set of names mentioned in this library.""" 202 | return self._mentioned 203 | 204 | @property 205 | def exclude_symbols(self): 206 | """Set of excluded symbols.""" 207 | return self._exclude_symbols 208 | 209 | def _should_include_member(self, name, member): 210 | """Returns True if this member should be included in the document.""" 211 | # Always exclude symbols matching _always_drop_symbol_re. 212 | if _always_drop_symbol_re.match(name): 213 | return False 214 | # Finally, exclude any specifically-excluded symbols. 215 | if name in self._exclude_symbols: 216 | return False 217 | return True 218 | 219 | def get_imported_modules(self, module): 220 | """Returns the list of modules imported from `module`.""" 221 | for name, member in inspect.getmembers(module): 222 | if inspect.ismodule(member): 223 | yield name, member 224 | 225 | def get_class_members(self, cls_name, cls): 226 | """Returns the list of class members to document in `cls`. 227 | 228 | This function filters the class member to ONLY return those 229 | defined by the class. It drops the inherited ones. 230 | 231 | Args: 232 | cls_name: Qualified name of `cls`. 233 | cls: An inspect object of type 'class'. 234 | 235 | Yields: 236 | name, member tuples. 237 | """ 238 | for name, member in inspect.getmembers(cls): 239 | # Only show methods and properties presently. In Python 3, 240 | # methods register as isfunction. 241 | is_method = inspect.ismethod(member) or inspect.isfunction(member) 242 | if not (is_method or isinstance(member, property)): 243 | continue 244 | if ((is_method and member.__name__ == "__init__") 245 | or self._should_include_member(name, member)): 246 | yield name, ("%s.%s" % (cls_name, name), member) 247 | 248 | def _generate_signature_for_function(self, func): 249 | """Given a function, returns a string representing its args.""" 250 | args_list = [] 251 | argspec = inspect.getargspec(func) 252 | first_arg_with_default = ( 253 | len(argspec.args or []) - len(argspec.defaults or [])) 254 | for arg in argspec.args[:first_arg_with_default]: 255 | if arg == "self": 256 | # Python documentation typically skips `self` when printing method 257 | # signatures. 258 | continue 259 | args_list.append(arg) 260 | 261 | # TODO(mrry): This is a workaround for documenting signature of 262 | # functions that have the @contextlib.contextmanager decorator. 263 | # We should do something better. 264 | if argspec.varargs == "args" and argspec.keywords == "kwds": 265 | original_func = func.__closure__[0].cell_contents 266 | return self._generate_signature_for_function(original_func) 267 | 268 | if argspec.defaults: 269 | for arg, default in zip( 270 | argspec.args[first_arg_with_default:], argspec.defaults): 271 | if callable(default): 272 | args_list.append("%s=%s" % (arg, default.__name__)) 273 | else: 274 | args_list.append("%s=%r" % (arg, default)) 275 | if argspec.varargs: 276 | args_list.append("*" + argspec.varargs) 277 | if argspec.keywords: 278 | args_list.append("**" + argspec.keywords) 279 | return "(" + ", ".join(args_list) + ")" 280 | 281 | def _remove_docstring_indent(self, docstring): 282 | """Remove indenting. 283 | 284 | We follow Python's convention and remove the minimum indent of the lines 285 | after the first, see: 286 | https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation 287 | preserving relative indentation. 288 | 289 | Args: 290 | docstring: A docstring. 291 | 292 | Returns: 293 | A list of strings, one per line, with the minimum indent stripped. 294 | """ 295 | docstring = docstring or "" 296 | lines = docstring.strip().split("\n") 297 | 298 | min_indent = len(docstring) 299 | for l in lines[1:]: 300 | l = l.rstrip() 301 | if l: 302 | i = 0 303 | while i < len(l) and l[i] == " ": 304 | i += 1 305 | if i < min_indent: min_indent = i 306 | for i in range(1, len(lines)): 307 | l = lines[i].rstrip() 308 | if len(l) >= min_indent: 309 | l = l[min_indent:] 310 | lines[i] = l 311 | return lines 312 | 313 | def _print_formatted_docstring(self, docstring, f): 314 | """Formats the given `docstring` as Markdown and prints it to `f`.""" 315 | lines = self._remove_docstring_indent(docstring) 316 | 317 | # Output the lines, identifying "Args" and other section blocks. 318 | i = 0 319 | 320 | def _at_start_of_section(): 321 | """Returns the header if lines[i] is at start of a docstring section.""" 322 | l = lines[i] 323 | match = _section_re.match(l) 324 | if match and i + 1 < len( 325 | lines) and lines[i + 1].startswith(" "): 326 | return match.group(1) 327 | else: 328 | return None 329 | 330 | while i < len(lines): 331 | l = lines[i] 332 | 333 | section_header = _at_start_of_section() 334 | if section_header: 335 | if i == 0 or lines[i-1]: 336 | print("", file=f) 337 | # Use at least H4 to keep these out of the TOC. 338 | print("##### " + section_header + ":", file=f) 339 | print("", file=f) 340 | i += 1 341 | outputting_list = False 342 | while i < len(lines): 343 | l = lines[i] 344 | # A new section header terminates the section. 345 | if _at_start_of_section(): 346 | break 347 | match = _arg_re.match(l) 348 | if match: 349 | if not outputting_list: 350 | # We need to start a list. In Markdown, a blank line needs to 351 | # precede a list. 352 | print("", file=f) 353 | outputting_list = True 354 | suffix = l[len(match.group()):].lstrip() 355 | print("* `" + match.group(1) + "`: " + suffix, file=f) 356 | else: 357 | # For lines that don't start with _arg_re, continue the list if it 358 | # has enough indentation. 359 | outputting_list &= l.startswith(" ") 360 | print(l, file=f) 361 | i += 1 362 | else: 363 | print(l, file=f) 364 | i += 1 365 | 366 | def _print_function(self, f, prefix, fullname, func): 367 | """Prints the given function to `f`.""" 368 | heading = prefix + " `" + fullname 369 | if not isinstance(func, property): 370 | heading += self._generate_signature_for_function(func) 371 | heading += "` {#%s}" % _get_anchor(self._module_to_name, fullname) 372 | print(heading, file=f) 373 | print("", file=f) 374 | self._print_formatted_docstring(inspect.getdoc(func), f) 375 | print("", file=f) 376 | 377 | def _write_member_markdown_to_file(self, f, prefix, name, member): 378 | """Print `member` to `f`.""" 379 | if (inspect.isfunction(member) or inspect.ismethod(member) or 380 | isinstance(member, property)): 381 | print("- - -", file=f) 382 | print("", file=f) 383 | self._print_function(f, prefix, name, member) 384 | print("", file=f) 385 | elif inspect.isclass(member): 386 | print("- - -", file=f) 387 | print("", file=f) 388 | print("%s `class %s` {#%s}" % (prefix, name, 389 | _get_anchor(self._module_to_name, name)), 390 | file=f) 391 | print("", file=f) 392 | self._write_class_markdown_to_file(f, name, member) 393 | print("", file=f) 394 | else: 395 | raise RuntimeError("Member %s has unknown type %s" % (name, type(member))) 396 | 397 | def _write_docstring_markdown_to_file(self, f, prefix, docstring, members, 398 | imports): 399 | for l in self._remove_docstring_indent(docstring): 400 | if l.startswith(_member_mark): 401 | name = l[len(_member_mark):].strip(" \t") 402 | if name in members: 403 | self._documented.add(name) 404 | self._mentioned.add(name) 405 | self._write_member_markdown_to_file(f, prefix, *members[name]) 406 | del members[name] 407 | elif name in imports: 408 | self._write_module_markdown_to_file(f, imports[name]) 409 | else: 410 | raise ValueError("%s: unknown member `%s`" % (self._title, name)) 411 | else: 412 | print(l, file=f) 413 | 414 | def _write_class_markdown_to_file(self, f, name, cls): 415 | """Write the class doc to `f`. 416 | 417 | Args: 418 | f: File to write to. 419 | prefix: Prefix for names. 420 | cls: class object. 421 | name: name to use. 422 | """ 423 | # Build the list of class methods to document. 424 | methods = dict(self.get_class_members(name, cls)) 425 | # Used later to check if any methods were called out in the class 426 | # docstring. 427 | num_methods = len(methods) 428 | try: 429 | self._write_docstring_markdown_to_file(f, "####", inspect.getdoc(cls), 430 | methods, {}) 431 | except ValueError as e: 432 | raise ValueError(str(e) + " in class `%s`" % cls.__name__) 433 | 434 | # If some methods were not described, describe them now if they are 435 | # defined by the class itself (not inherited). If NO methods were 436 | # described, describe all methods. 437 | # 438 | # TODO(touts): when all methods have been categorized make it an error 439 | # if some methods are not categorized. 440 | any_method_called_out = (len(methods) != num_methods) 441 | if any_method_called_out: 442 | other_methods = {n: m for n, m in methods.items() if n in cls.__dict__} 443 | if other_methods: 444 | print("\n#### Other Methods", file=f) 445 | else: 446 | other_methods = methods 447 | for name in sorted(other_methods): 448 | self._write_member_markdown_to_file(f, "####", *other_methods[name]) 449 | 450 | def _write_module_markdown_to_file(self, f, module): 451 | imports = dict(self.get_imported_modules(module)) 452 | self._write_docstring_markdown_to_file(f, "###", inspect.getdoc(module), 453 | self._members, imports) 454 | 455 | def write_markdown_to_file(self, f): 456 | """Prints this library to file `f`. 457 | 458 | Args: 459 | f: File to write to. 460 | 461 | Returns: 462 | Dictionary of documented members. 463 | """ 464 | print("---", file=f) 465 | print("---", file=f) 466 | print("", file=f) 467 | print("", file=f) 468 | # TODO(touts): Do not insert these. Let the doc writer put them in 469 | # the module docstring explicitly. 470 | print("#", self._title, file=f) 471 | if self._prefix: 472 | print(self._prefix, file=f) 473 | print("[TOC]", file=f) 474 | print("", file=f) 475 | if self._module is not None: 476 | self._write_module_markdown_to_file(f, self._module) 477 | 478 | def write_other_members(self, f, catch_all=False): 479 | """Writes the leftover members to `f`. 480 | 481 | Args: 482 | f: File to write to. 483 | catch_all: If true, document all missing symbols from any module. 484 | Otherwise, document missing symbols from just this module. 485 | """ 486 | if catch_all: 487 | names = self._members.items() 488 | else: 489 | names = inspect.getmembers(self._module) 490 | leftovers = [] 491 | for name, _ in names: 492 | if name in self._members and name not in self._documented: 493 | leftovers.append(name) 494 | if leftovers: 495 | print("%s: undocumented members: %d" % (self._title, len(leftovers))) 496 | print("\n## Other Functions and Classes", file=f) 497 | for name in sorted(leftovers): 498 | print(" %s" % name) 499 | self._documented.add(name) 500 | self._mentioned.add(name) 501 | self._write_member_markdown_to_file(f, "###", *self._members[name]) 502 | 503 | def assert_no_leftovers(self): 504 | """Generate an error if there are leftover members.""" 505 | leftovers = [] 506 | for name in self._members.keys(): 507 | if name in self._members and name not in self._documented: 508 | leftovers.append(name) 509 | if leftovers: 510 | raise RuntimeError("%s: undocumented members: %s" % 511 | (self._title, ", ".join(leftovers))) 512 | 513 | 514 | def write_libraries(dir, libraries): 515 | """Write a list of libraries to disk. 516 | 517 | Args: 518 | dir: Output directory. 519 | libraries: List of (filename, library) pairs. 520 | """ 521 | files = [open(os.path.join(dir, k), "w") for k, _ in libraries] 522 | # Document mentioned symbols for all libraries 523 | for f, (_, v) in zip(files, libraries): 524 | v.write_markdown_to_file(f) 525 | # Document symbols that no library mentioned. We do this after writing 526 | # out all libraries so that earlier libraries know what later libraries 527 | # documented. 528 | for f, (_, v) in zip(files, libraries): 529 | v.write_other_members(f) 530 | f.close() 531 | -------------------------------------------------------------------------------- /scripts/docs/gen_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This script needs to be run from the skflow/tools/docs directory 17 | 18 | set -e 19 | 20 | DOC_DIR="g3doc/api_docs" 21 | 22 | if [ ! -f gen_docs.sh ]; then 23 | echo "This script must be run from inside the skflow/scripts/docs directory." 24 | exit 1 25 | fi 26 | 27 | # go to the skflow/ directory 28 | pushd ../.. 29 | BASE=$(pwd) 30 | 31 | # Make Python docs 32 | python scripts/docs/gen_docs_combined.py --out_dir=$BASE/$DOC_DIR/python 33 | 34 | popd 35 | -------------------------------------------------------------------------------- /scripts/docs/gen_docs_combined.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | """Updates generated docs from Python doc comments.""" 16 | from __future__ import absolute_import 17 | from __future__ import division 18 | from __future__ import print_function 19 | 20 | import os.path 21 | import sys 22 | 23 | import tensorflow as tf 24 | 25 | import docs 26 | 27 | import skflow 28 | 29 | tf.flags.DEFINE_string("out_dir", None, 30 | "Directory to which docs should be written.") 31 | tf.flags.DEFINE_boolean("print_hidden_regex", False, 32 | "Dump a regular expression matching any hidden symbol") 33 | FLAGS = tf.flags.FLAGS 34 | 35 | 36 | PREFIX_TEXT = """ 37 | Note: Functions taking `Tensor` arguments can also take anything accepted by 38 | [`tf.convert_to_tensor`](framework.md#convert_to_tensor). 39 | """ 40 | 41 | 42 | def get_module_to_name(): 43 | return { 44 | skflow: "skflow", 45 | } 46 | 47 | def all_libraries(module_to_name, members, documented): 48 | # A list of (filename, docs.Library) pairs representing the individual files 49 | # that we want to create. 50 | def library(name, title, module=None, **args): 51 | if module is None: 52 | module = sys.modules["skflow" + 53 | ("" if name == "ops" else "." + name)] 54 | return (name + ".md", docs.Library(title=title, 55 | module_to_name=module_to_name, 56 | members=members, 57 | documented=documented, 58 | module=module, 59 | **args)) 60 | return [ 61 | # Splits of module 'skflow'. 62 | library("estimators", "Estimators"), 63 | library("io", "IO"), 64 | library("preprocessing", "Preprocessing"), 65 | library("trainer", "Trainer"), 66 | library("models", "Models"), 67 | library("ops", "Tensor Transformations", 68 | exclude_symbols=["list_diff"], prefix=PREFIX_TEXT), 69 | ] 70 | 71 | _hidden_symbols = ["Event", "LogMessage", "Summary", "SessionLog", "xrange", 72 | "HistogramProto", "ConfigProto", "NodeDef", "GraphDef", 73 | "GPUOptions", "GraphOptions", "SessionInterface", 74 | "BaseSession", "NameAttrList", "AttrValue", 75 | "TensorArray", "OptimizerOptions", 76 | "CollectionDef", "MetaGraphDef", "QueueRunnerDef", 77 | "SaverDef", "VariableDef", "TestCase", 78 | ] 79 | 80 | def main(unused_argv): 81 | if not FLAGS.out_dir: 82 | tf.logging.error("out_dir not specified") 83 | return -1 84 | 85 | # Document libraries 86 | documented = set() 87 | module_to_name = get_module_to_name() 88 | members = docs.collect_members(module_to_name) 89 | libraries = all_libraries(module_to_name, members, documented) 90 | 91 | # Define catch_all library before calling write_libraries to avoid complaining 92 | # about generically hidden symbols. 93 | catch_all = docs.Library(title="Catch All", module=None, 94 | exclude_symbols=_hidden_symbols, 95 | module_to_name=module_to_name, members=members, 96 | documented=documented) 97 | 98 | # Write docs to files 99 | docs.write_libraries(FLAGS.out_dir, libraries) 100 | 101 | # Make it easy to search for hidden symbols 102 | if FLAGS.print_hidden_regex: 103 | hidden = set(_hidden_symbols) 104 | for _, lib in libraries: 105 | hidden.update(lib.exclude_symbols) 106 | print(r"hidden symbols regex = r'\b(%s)\b'" % "|".join(sorted(hidden))) 107 | 108 | # Verify that all symbols are mentioned in some library doc. 109 | catch_all.assert_no_leftovers() 110 | 111 | # Generate index 112 | with open(os.path.join(FLAGS.out_dir, "index.md"), "w") as f: 113 | docs.Index(module_to_name, members, libraries, 114 | "../../api_docs/python/").write_markdown_to_file(f) 115 | 116 | 117 | if __name__ == "__main__": 118 | tf.app.run() 119 | -------------------------------------------------------------------------------- /scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Fail on the first error 4 | set -e 5 | 6 | # Show every execution step 7 | set -x 8 | 9 | 10 | case "$TASK" in 11 | "lint") 12 | if [ "$TRAVIS_OS_NAME" != "osx" ]; then 13 | pylint skflow || exit -1 14 | fi 15 | ;; 16 | 17 | "nosetests") 18 | nosetests --with-coverage --cover-erase --cover-package=skflow 19 | codecov 20 | ;; 21 | 22 | esac 23 | -------------------------------------------------------------------------------- /scripts/travis_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Fail on the first error 4 | set -e 5 | 6 | # Show every execution step 7 | set -x 8 | 9 | 10 | case "$TASK" in 11 | "lint") 12 | pip install pylint 13 | ;; 14 | 15 | "nosetests") 16 | # Create virtual env using system numpy and scipy 17 | deactivate || true 18 | case "$TRAVIS_PYTHON_VERSION" in 19 | "2.7") 20 | virtualenv --system-site-packages testenv 21 | ;; 22 | "3.4") 23 | virtualenv -p python3.4 --system-site-packages testenv 24 | ;; 25 | esac 26 | source testenv/bin/activate 27 | 28 | # Install dependencies 29 | pip install --upgrade pip 30 | pip install numpy 31 | pip install scipy 32 | pip install pandas 33 | pip install scikit-learn 34 | pip install toolz 35 | pip install dask 36 | 37 | # Install TensorFlow 38 | case "$TRAVIS_OS_NAME" in 39 | "linux") 40 | case "$TRAVIS_PYTHON_VERSION" in 41 | "2.7") 42 | TENSORFLOW_PACKAGE_URL="https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0rc0-cp27-none-linux_x86_64.whl" 43 | ;; 44 | "3.4") 45 | TENSORFLOW_PACKAGE_URL="https://storage.googleapis.com/tensorflow/linux/cpu/tensorflow-0.8.0rc0-cp34-cp34m-linux_x86_64.whl" 46 | ;; 47 | esac 48 | ;; 49 | "osx") 50 | TENSORFLOW_PACKAGE_URL="https://storage.googleapis.com/tensorflow/mac/tensorflow-0.8.0rc0-py2-none-any.whl" 51 | ;; 52 | esac 53 | pip install "$TENSORFLOW_PACKAGE_URL" --ignore-installed six 54 | 55 | # Install test tools 56 | pip install codecov 57 | pip install coverage 58 | pip install nose 59 | 60 | # Install skflow 61 | python setup.py install 62 | ;; 63 | 64 | esac 65 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.rst 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | from setuptools import setup, find_packages 17 | 18 | META_DATA = dict( 19 | name='skflow', 20 | version='0.1.0', 21 | url='https://github.com/tensorflow/skflow', 22 | license='Apache-2', 23 | description='Simplified Interface for TensorFlow for Deep Learning', 24 | author=['Scikit Flow Authors'], 25 | author_email='terrytangyuan@Gmail.com', 26 | packages=find_packages(), 27 | install_requires=[ 28 | 'sklearn', 29 | 'scipy', 30 | 'numpy', 31 | ], 32 | classifiers=[ 33 | 'Programming Language :: Python', 34 | 'Operating System :: OS Independent', 35 | 'Intended Audience :: Science/Research', 36 | 'Topic :: Scientific/Engineering' 37 | ], 38 | keywords=[ 39 | 'Deep Learning', 40 | 'Neural Networks', 41 | 'Google', 42 | 'TensorFlow', 43 | 'Machine Learning' 44 | ] 45 | ) 46 | 47 | 48 | if __name__ == '__main__': 49 | setup(**META_DATA) 50 | 51 | -------------------------------------------------------------------------------- /skflow/__init__.py: -------------------------------------------------------------------------------- 1 | """Main Scikit Flow module.""" 2 | # Copyright 2015-present Scikit Flow Authors. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import warnings 17 | 18 | try: 19 | from tensorflow.contrib.learn import * 20 | warnings.warn("skflow as separate library is deprecated. " 21 | "Use import tensorflow.contrib.learn as skflow instead.", DeprecationWarning) 22 | except ImportError: 23 | raise ImportError("Update your Tensorflow to 0.8+ to use latest skflow.") 24 | 25 | --------------------------------------------------------------------------------