├── .gitignore ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── book.json ├── docker-compose.yml ├── docs └── tensorbuilder │ ├── api │ ├── api.m.html │ ├── applicative.m.html │ ├── builder.m.html │ ├── builder_tree.m.html │ └── index.html │ ├── builder.m.html │ ├── core │ ├── concrete_classes.m.html │ ├── index.html │ └── utils.m.html │ ├── extensions │ ├── index.html │ └── patches │ │ ├── index.html │ │ └── tensorbuilder_patch.m.html │ ├── index.html │ ├── patches │ ├── custom_patch.m.html │ ├── index.html │ ├── layers_patch.m.html │ ├── rnn_utilities_patch.m.html │ ├── summaries_patch.m.html │ ├── tensorbuilder_patch │ │ ├── api_functions_patch.m.html │ │ ├── applicative_ops.m.html │ │ ├── builder_custom_ops.m.html │ │ ├── custom_layer_ops.m.html │ │ ├── index.html │ │ ├── scope_ops.m.html │ │ ├── summaries.m.html │ │ ├── tensorflow_ops.m.html │ │ └── tflearn_ops.m.html │ └── tensorflow_patch.m.html │ ├── tensordata.m.html │ ├── tensordata │ └── index.html │ └── tests │ ├── index.html │ └── test_tensorbuilder.m.html ├── examples ├── tensorbuilder_patch.py └── tflearn_patch.py ├── guide ├── README.md ├── SUMMARY.md ├── basics │ └── README.md ├── branching │ ├── README.md │ └── test.md ├── dsl │ ├── README.md │ └── test.md ├── patches │ ├── README.md │ └── test.md └── scoping │ ├── README.md │ └── test.md ├── requirements.txt ├── scripts ├── create_readme.py ├── do_pypitest ├── gen_docs ├── init ├── test_pypitest ├── update_gh_pages ├── upload_live └── upload_test ├── setup.py ├── tensorbuilder ├── README-template.md ├── __init__.py ├── builder.py ├── patches │ ├── __init__.py │ ├── custom_patch.py │ ├── layers_patch.py │ ├── rnn_utilities_patch.py │ ├── summaries_patch.py │ └── tensorflow_patch.py ├── tensordata │ └── __init__.py ├── tests │ ├── __init__.py │ └── test_tensorbuilder.py └── version.txt └── tests └── test2.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | #Ipython Notebook 62 | .ipynb_checkpoints 63 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.0.5 2 | Mayor rewrite to enable clean extensibility + added scoping mechanism. 3 | 4 | **scoping** 5 | 6 | You can now write code with scopes like this 7 | 8 | h = ( 9 | tb.build(x) 10 | .relu_layer(100) 11 | .then_with(tf.device, "/gpu:0")(lambda layer: 12 | layer 13 | .tanh_layer(50) 14 | .dropout() 15 | .softmax_layer(10) 16 | ) 17 | .tensors() 18 | ) 19 | 20 | It internally uses the `with` statement to do what you expect. The DSL was also expanded to support these operations and it looks more natural 21 | 22 | h = dl.pipe( 23 | x, 24 | dl.relu_layer(100), 25 | 26 | { tf.device("/gpu:0"): 27 | dl.tanh_layer(50) 28 | .dropout() 29 | .softmax_layer(10) 30 | }, 31 | dl.tensors() 32 | ) 33 | 34 | as you see scoping is done using a dictionary object. 35 | 36 | #### New methods 37 | * then_with: support scoping 38 | * with_device: shortcut for `then_with(tf.device, ...)` 39 | * with_variable_scope: shortcut for `then_with(tf.variable_scope, ...)` 40 | * linear_layer: sets `activation_fn` to `None` 41 | * flatten: taken from `tflearn` 42 | 43 | #### Bugs Fixed 44 | * `*_layer` methods where getting inserted an in-between `relu` operation because `fully_connected` has `activation_fn=tf.nn.relu` as a default parameter. 45 | 46 | #### Other changes 47 | * Builder, BuilderTree, and Applicative now inherit from BuilderBase, BuilderTree and ApplicativeBase respectively. They are also dynamically generated by a function so patches get set own set of these classes and do not pollute each other. 48 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:0.11.0rc2 2 | 3 | RUN apt-get update 4 | RUN apt-get -y install git 5 | 6 | RUN pip install prettytensor 7 | RUN pip install pandas 8 | RUN pip install plotly 9 | RUN pip install pdoc 10 | RUN pip install mako 11 | RUN pip install markdown 12 | RUN pip install decorator==4.0.9 13 | RUN pip install tflearn 14 | RUN pip install asq==1.2.1 15 | RUN pip install pytest 16 | RUN pip install pytest-sugar 17 | RUN pip install fn 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 cgarciae 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENCE 2 | include requirements.txt 3 | include README.md 4 | include CHANGELOG.md 5 | 6 | include tensorbuilder/version.txt 7 | include tensorbuilder/README-template.md 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tensor Builder 2 | TensorBuilder had a mayor refactoring and is now based on [Phi](https://github.com/cgarciae/phi). Updates to the README comming soon! 3 | 4 | ### Goals 5 | Comming Soon! 6 | 7 | ## Installation 8 | Tensor Builder assumes you have a working `tensorflow` installation. We don't include it in the `requirements.txt` since the installation of tensorflow varies depending on your setup. 9 | 10 | #### From pypi 11 | ``` 12 | pip install tensorbuilder 13 | ``` 14 | 15 | #### From github 16 | For the latest development version 17 | ``` 18 | pip install git+https://github.com/cgarciae/tensorbuilder.git@develop 19 | ``` 20 | 21 | ## Getting Started 22 | 23 | Create neural network with a [5, 10, 3] architecture with a `softmax` output layer and a `tanh` hidden layer through a Builder and then get back its tensor: 24 | 25 | ```python 26 | import tensorflow as tf 27 | from tensorbuilder import T 28 | 29 | x = tf.placeholder(tf.float32, shape=[None, 5]) 30 | keep_prob = tf.placeholder(tf.float32) 31 | 32 | h = T.Pipe( 33 | x, 34 | T.tanh_layer(10) # tanh(x * w + b) 35 | .dropout(keep_prob) # dropout(x, keep_prob) 36 | .softmax_layer(3) # softmax(x * w + b) 37 | ) 38 | ``` 39 | 40 | ## Features 41 | Comming Soon! 42 | 43 | ## Documentation 44 | Comming Soon! 45 | 46 | ## The Guide 47 | Comming Soon! 48 | 49 | ## Full Example 50 | Next is an example with all the features of TensorBuilder including the DSL, branching and scoping. It creates a branched computation where each branch is executed on a different device. All branches are then reduced to a single layer, but the computation is the branched again to obtain both the activation function and the trainer. 51 | 52 | ```python 53 | import tensorflow as tf 54 | from tensorbuilder import T 55 | 56 | x = placeholder(tf.float32, shape=[None, 10]) 57 | y = placeholder(tf.float32, shape=[None, 5]) 58 | 59 | [activation, trainer] = T.Pipe( 60 | x, 61 | [ 62 | T.With( tf.device("/gpu:0"): 63 | T.relu_layer(20) 64 | ) 65 | , 66 | T.With( tf.device("/gpu:1"): 67 | T.sigmoid_layer(20) 68 | ) 69 | , 70 | T.With( tf.device("/cpu:0"): 71 | T.tanh_layer(20) 72 | ) 73 | ], 74 | T.linear_layer(5), 75 | [ 76 | T.softmax() # activation 77 | , 78 | T 79 | .softmax_cross_entropy_with_logits(y) # loss 80 | .minimize(tf.train.AdamOptimizer(0.01)) # trainer 81 | ] 82 | ) 83 | ``` -------------------------------------------------------------------------------- /book.json: -------------------------------------------------------------------------------- 1 | { 2 | "root": "./guide" 3 | } 4 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | volumes: 4 | data: 5 | 6 | services: 7 | tf: 8 | build: . 9 | volumes: 10 | - ./:/tensorbuilder 11 | ports: 12 | - "8885:8888" 13 | command: bash -c "python setup.py install && cd /notebooks && /run_jupyter.sh" 14 | 15 | docs: 16 | # update gh-pages => git subtree push --prefix docs/tensorbuilder origin gh-pages 17 | build: . 18 | volumes: 19 | - ./:/tensorbuilder 20 | working_dir: /tensorbuilder 21 | command: ./gen_docs 22 | 23 | test: 24 | build: . 25 | volumes: 26 | - ./:/tensorbuilder 27 | working_dir: /tensorbuilder 28 | command: py.test 29 | -------------------------------------------------------------------------------- /docs/tensorbuilder/api/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tensorbuilder.api API documentation 7 | 8 | 9 | 10 | 11 | 551 | 552 | 853 | 854 | 921 | 922 | 1014 | 1015 | 1029 | 1030 | 1031 | Top 1032 | 1033 |
1034 | 1035 | 1036 | 1052 | 1053 |
1054 | 1055 | 1056 | 1057 | 1058 | 1059 | 1060 |
1061 |

tensorbuilder.api module

1062 | 1063 | 1064 | 1065 |
1066 |
from api import API
1067 | import builder
1068 | import builder_tree
1069 | import applicative
1070 | 
1071 | 
1072 | __all__ = ["builder", "builder_tree", "applicative", "api"]
1073 | 
1074 | 1075 |
1076 | 1077 |
1078 | 1079 |
1080 | 1081 | 1082 | 1083 |

Sub-modules

1084 |
1085 |

tensorbuilder.api.api

1086 | 1087 | 1088 | 1089 |
1090 |
1091 |

tensorbuilder.api.applicative

1092 | 1093 | 1094 | 1095 |
1096 |
1097 |

tensorbuilder.api.builder

1098 | 1099 | 1100 | 1101 |
1102 |
1103 |

tensorbuilder.api.builder_tree

1104 | 1105 | 1106 | 1107 |
1108 |
1109 | 1110 |
1111 |
1112 | 1123 |
1124 | 1125 | 1126 | -------------------------------------------------------------------------------- /docs/tensorbuilder/extensions/patches/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tensorbuilder.extensions.patches API documentation 7 | 8 | 9 | 10 | 11 | 551 | 552 | 853 | 854 | 921 | 922 | 1014 | 1015 | 1029 | 1030 | 1031 | Top 1032 | 1033 |
1034 | 1035 | 1036 | 1049 | 1050 |
1051 | 1052 | 1053 | 1054 | 1055 | 1056 | 1057 |
1058 |

tensorbuilder.extensions.patches module

1059 | 1060 | 1061 | 1062 |
1063 | 1064 |
1065 | 1066 | 1067 | 1068 |

Sub-modules

1069 |
1070 |

tensorbuilder.extensions.patches.tensorbuilder_patch

1071 | 1072 | 1073 | 1074 |
1075 |
1076 | 1077 |
1078 |
1079 | 1090 |
1091 | 1092 | 1093 | -------------------------------------------------------------------------------- /docs/tensorbuilder/patches/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tensorbuilder.patches API documentation 7 | 8 | 9 | 10 | 11 | 551 | 552 | 853 | 854 | 921 | 922 | 1014 | 1015 | 1029 | 1030 | 1031 | Top 1032 | 1033 |
1034 | 1035 | 1036 | 1053 | 1054 |
1055 | 1056 | 1057 | 1058 | 1059 | 1060 | 1061 |
1062 |

tensorbuilder.patches module

1063 | 1064 | 1065 | 1066 |
1067 |
#import layers_patch
1068 | import tensorflow_patch
1069 | import summaries_patch
1070 | import layers_patch
1071 | import rnn_utilities_patch
1072 | import custom_patch
1073 | 
1074 | 1075 |
1076 | 1077 |
1078 | 1079 |
1080 | 1081 | 1082 | 1083 |

Sub-modules

1084 |
1085 |

tensorbuilder.patches.custom_patch

1086 | 1087 | 1088 | 1089 |
1090 |
1091 |

tensorbuilder.patches.layers_patch

1092 | 1093 | 1094 | 1095 |
1096 |
1097 |

tensorbuilder.patches.rnn_utilities_patch

1098 | 1099 | 1100 | 1101 |
1102 |
1103 |

tensorbuilder.patches.summaries_patch

1104 | 1105 | 1106 | 1107 |
1108 |
1109 |

tensorbuilder.patches.tensorflow_patch

1110 | 1111 | 1112 | 1113 |
1114 |
1115 | 1116 |
1117 |
1118 | 1129 |
1130 | 1131 | 1132 | -------------------------------------------------------------------------------- /docs/tensorbuilder/patches/tensorbuilder_patch/tflearn_ops.m.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tensorbuilder.patches.tensorbuilder_patch.tflearn_ops API documentation 7 | 8 | 9 | 10 | 11 | 551 | 552 | 853 | 854 | 921 | 922 | 1014 | 1015 | 1029 | 1030 | 1031 | Top 1032 | 1033 |
1034 | 1035 | 1036 | 1058 | 1059 |
1060 | 1061 | 1062 | 1063 | 1064 | 1065 | 1066 |
1067 |

tensorbuilder.patches.tensorbuilder_patch.tflearn_ops module

1068 | 1069 | 1070 | 1071 |
1072 |
import tflearn
1073 | 
1074 | builders_blacklist = []
1075 | 
1076 | def patch_classes(Builder, BuilderTree, Applicative):
1077 |     Builder.register_map_method(tflearn.layers.conv.max_pool_2d, "tflearn.layers")
1078 |     Builder.register_map_method(tflearn.embedding, "tflearn.layers.embedding_ops")
1079 | 
1080 | 1081 |
1082 | 1083 |
1084 | 1085 |
1086 |

Module variables

1087 |
1088 |

var builders_blacklist

1089 | 1090 | 1091 |
1092 |
1093 | 1094 |
1095 | 1096 |

Functions

1097 | 1098 |
1099 |
1100 |

def patch_classes(

Builder, BuilderTree, Applicative)

1101 |
1102 | 1103 | 1104 | 1105 | 1106 |
1107 | 1108 |
1109 |
def patch_classes(Builder, BuilderTree, Applicative):
1110 |     Builder.register_map_method(tflearn.layers.conv.max_pool_2d, "tflearn.layers")
1111 |     Builder.register_map_method(tflearn.embedding, "tflearn.layers.embedding_ops")
1112 | 
1113 | 1114 |
1115 |
1116 | 1117 |
1118 | 1119 | 1120 | 1121 |
1122 | 1123 |
1124 |
1125 | 1136 |
1137 | 1138 | 1139 | -------------------------------------------------------------------------------- /docs/tensorbuilder/tests/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | tensorbuilder.tests API documentation 7 | 8 | 9 | 10 | 11 | 551 | 552 | 853 | 854 | 921 | 922 | 1014 | 1015 | 1029 | 1030 | 1031 | Top 1032 | 1033 |
1034 | 1035 | 1036 | 1049 | 1050 |
1051 | 1052 | 1053 | 1054 | 1055 | 1056 | 1057 |
1058 |

tensorbuilder.tests module

1059 | 1060 | 1061 | 1062 |
1063 | 1064 |
1065 | 1066 | 1067 | 1068 |

Sub-modules

1069 |
1070 |

tensorbuilder.tests.test_tensorbuilder

1071 | 1072 | 1073 | 1074 |
1075 |
1076 | 1077 |
1078 |
1079 | 1090 |
1091 | 1092 | 1093 | -------------------------------------------------------------------------------- /examples/tensorbuilder_patch.py: -------------------------------------------------------------------------------- 1 | 2 | ############################## 3 | ##### GETTING STARTED 4 | ############################## 5 | 6 | # TensorBuilder includes a set of primitives that you can use to wrap, around 7 | 8 | import tensorflow as tf 9 | from tensorflow.contrib import layers as layers 10 | from tensorbuilder import tb 11 | 12 | x = tf.placeholder(tf.float32, shape=[None, 40]) 13 | keep_prob = tf.placeholder(tf.float32) 14 | 15 | h = ( 16 | tb.build(x) 17 | .map(layers.fully_connected, 100, activation_fn=tf.nn.tanh) 18 | .map(tf.nn.dropout, keep_prob) 19 | .map(layers.fully_connected, 30, activation_fn=tf.nn.softmax) 20 | .tensor() 21 | ) 22 | 23 | print(h) 24 | 25 | # The previous is equivalent to this next example using the `slim_patch`, which includes the `fully_connected` method that is taken from `tf.contrib.layers` 26 | 27 | import tensorflow as tf 28 | from tensorbuilder import tb 29 | import tensorbuilder.slim_patch 30 | 31 | x = tf.placeholder(tf.float32, shape=[None, 5]) 32 | keep_prob = tf.placeholder(tf.float32) 33 | 34 | h = ( 35 | tb.build(x) 36 | .fully_connected(10, activation_fn=tf.nn.tanh) # tanh(x * w + b) 37 | .map(tf.nn.dropout, keep_prob) # dropout(x, keep_prob) 38 | .fully_connected(3, activation_fn=tf.nn.softmax) # softmax(x * w + b) 39 | .tensor() 40 | ) 41 | 42 | print(h) 43 | 44 | # The `tensorbuilder.patch` includes a lot more methods that register functions from the `tf`, `tf.nn` and `tf.contrib.layers` modules plus some custom methods based on `fully_connected` to create layers: 45 | 46 | import tensorflow as tf 47 | from tensorbuilder import tb 48 | import tensorbuilder.patch 49 | 50 | x = tf.placeholder(tf.float32, shape=[None, 5]) 51 | keep_prob = tf.placeholder(tf.float32) 52 | 53 | h = ( 54 | tb.build(x) 55 | .tanh_layer(10) # tanh(x * w + b) 56 | .dropout(keep_prob) # dropout(x, keep_prob) 57 | .softmax_layer(3) # softmax(x * w + b) 58 | .tensor() 59 | ) 60 | 61 | print(h) 62 | 63 | ############################## 64 | ##### BRANCHING 65 | ############################## 66 | 67 | #To create a branch you just have to use the `tensorbuilder.tensorbuilder.Builder.branch` method 68 | 69 | import tensorflow as tf 70 | from tensorbuilder import tb 71 | import tensorbuilder.slim_patch 72 | 73 | x = tf.placeholder(tf.float32, shape=[None, 5]) 74 | keep_prob = tf.placeholder(tf.float32) 75 | 76 | h = ( 77 | tb.build(x) 78 | .fully_connected(10) 79 | .branch(lambda root: 80 | [ 81 | root 82 | .fully_connected(3, activation_fn=tf.nn.relu) 83 | , 84 | root 85 | .fully_connected(9, activation_fn=tf.nn.tanh) 86 | .branch(lambda root2: 87 | [ 88 | root2 89 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 90 | , 91 | root2 92 | .map(tf.nn.dropout, keep_prob) 93 | .fully_connected(8, tf.nn.softmax) 94 | ]) 95 | ]) 96 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 97 | .tensor() 98 | ) 99 | 100 | print(h) 101 | 102 | #Thanks to TensorBuilder's immutable API, each branch is independent. The previous can also be simplified with the full `patch` 103 | 104 | import tensorflow as tf 105 | from tensorbuilder import tb 106 | import tensorbuilder.patch 107 | 108 | x = tf.placeholder(tf.float32, shape=[None, 5]) 109 | keep_prob = tf.placeholder(tf.float32) 110 | 111 | h = ( 112 | tb.build(x) 113 | .fully_connected(10) 114 | .branch(lambda root: 115 | [ 116 | root 117 | .relu_layer(3) 118 | , 119 | root 120 | .tanh_layer(9) 121 | .branch(lambda root2: 122 | [ 123 | root2 124 | .sigmoid_layer(6) 125 | , 126 | root2 127 | .dropout(keep_prob) 128 | .softmax_layer(8) 129 | ]) 130 | ]) 131 | .sigmoid_layer(6) 132 | .tensor() 133 | ) 134 | 135 | print(h) 136 | 137 | 138 | ############################## 139 | ##### DSL 140 | ############################## 141 | 142 | #Lets see an example, here is the previous example about branching with the the full `patch`, this time using the `dsl` module 143 | 144 | import tensorflow as tf 145 | from tensorbuilder import tb 146 | import tensorbuilder.patch 147 | import tensorbuilder.dsl as dl #<== Notice the alias 148 | 149 | x = tf.placeholder(tf.float32, shape=[None, 5]) 150 | keep_prob = tf.placeholder(tf.float32) 151 | 152 | h = tb.build(x).pipe( 153 | dl.fully_connected(10), 154 | [ 155 | dl.relu_layer(3) 156 | , 157 | (dl.tanh_layer(9), 158 | [ 159 | dl.sigmoid_layer(6) 160 | , 161 | dl 162 | .dropout(keep_prob) 163 | .softmax_layer(8) 164 | ]) 165 | ], 166 | dl.sigmoid_layer(6) 167 | .tensor() 168 | ) 169 | 170 | print(h) 171 | 172 | #As you see a lot of noise is gone, some `dl` terms appeared, and a few `,` where introduced, but the end result better reveals the structure of you network, plus its very easy to modify. 173 | 174 | ## API 175 | 176 | ############################## 177 | ##### FUNCTIONS 178 | ############################## 179 | 180 | 181 | ############################## 182 | ##### builder 183 | ############################## 184 | 185 | # The following example shows you how to construct a `tensorbuilder.tensorbuilder.Builder` from a tensorflow Tensor. 186 | 187 | import tensorflow as tf 188 | from tensorbuilder import tb 189 | 190 | a = tf.placeholder(tf.float32, shape=[None, 8]) 191 | a_builder = tb.build(a) 192 | 193 | print(a_builder) 194 | 195 | # The previous is the same as 196 | 197 | a = tf.placeholder(tf.float32, shape=[None, 8]) 198 | a_builder = a.builder() 199 | 200 | print(a_builder) 201 | 202 | ############################## 203 | ##### branches 204 | ############################## 205 | 206 | # Given a list of Builders and/or BuilderTrees you construct a `tensorbuilder.tensorbuilder.BuilderTree`. 207 | 208 | import tensorflow as tf 209 | from tensorbuilder import tb 210 | 211 | a = tf.placeholder(tf.float32, shape=[None, 8]).builder() 212 | b = tf.placeholder(tf.float32, shape=[None, 8]).builder() 213 | 214 | tree = tb.branches([a, b]) 215 | 216 | print(tree) 217 | 218 | #`tensorbuilder.tensorbuilder.BuilderTree`s are usually constructed using `tensorbuilder.tensorbuilder.Builder.branch` of the `tensorbuilder.tensorbuilder.Builder` class, but you can use this for special cases 219 | 220 | 221 | 222 | ############################## 223 | ##### BUILDER 224 | ############################## 225 | 226 | 227 | ############################## 228 | ##### fully_connected 229 | ############################## 230 | 231 | # This method is included by many libraries so its "sort of" part of TensorBuilder. The following builds the computation `tf.nn.sigmoid(tf.matmul(x, w) + b)` 232 | import tensorflow as tf 233 | from tensorbuilder import tb 234 | import tensorbuilder.slim_patch 235 | 236 | x = tf.placeholder(tf.float32, shape=[None, 5]) 237 | 238 | h = ( 239 | tb.build(x) 240 | .fully_connected(3, activation_fn=tf.nn.sigmoid) 241 | .tensor() 242 | ) 243 | 244 | print(h) 245 | 246 | # Using `tensorbuilder.patch` the previous is equivalent to 247 | 248 | import tensorflow as tf 249 | from tensorbuilder import tb 250 | import tensorbuilder.patch 251 | 252 | x = tf.placeholder(tf.float32, shape=[None, 5]) 253 | 254 | h = ( 255 | tb.build(x) 256 | .sigmoid_layer(3) 257 | .tensor() 258 | ) 259 | 260 | print(h) 261 | 262 | 263 | # You can chain various `fully_connected`s to get deeper neural networks 264 | 265 | import tensorflow as tf 266 | from tensorbuilder import tb 267 | import tensorbuilder.slim_patch 268 | 269 | x = tf.placeholder(tf.float32, shape=[None, 40]) 270 | 271 | h = ( 272 | tb.build(x) 273 | .fully_connected(100, activation_fn=tf.nn.tanh) 274 | .fully_connected(30, activation_fn=tf.nn.softmax) 275 | .tensor() 276 | ) 277 | 278 | print(h) 279 | 280 | # Using `tensorbuilder.patch` the previous is equivalent to 281 | 282 | import tensorflow as tf 283 | from tensorbuilder import tb 284 | import tensorbuilder.patch 285 | 286 | x = tf.placeholder(tf.float32, shape=[None, 5]) 287 | 288 | h = ( 289 | tb.build(x) 290 | .tanh_layer(100) 291 | .softmax_layer(30) 292 | .tensor() 293 | ) 294 | 295 | print(h) 296 | 297 | ############################## 298 | ##### map 299 | ############################## 300 | 301 | #The following constructs a neural network with the architecture `[40 input, 100 tanh, 30 softmax]` and and applies `dropout` to the tanh layer 302 | 303 | import tensorflow as tf 304 | from tensorbuilder import tb 305 | import tensorbuilder.slim_patch 306 | 307 | x = tf.placeholder(tf.float32, shape=[None, 40]) 308 | keep_prob = tf.placeholder(tf.float32) 309 | 310 | h = ( 311 | tb.build(x) 312 | .fully_connected(100, activation_fn=tf.nn.tanh) 313 | .map(tf.nn.dropout, keep_prob) 314 | .fully_connected(30, activation_fn=tf.nn.softmax) 315 | .tensor() 316 | ) 317 | 318 | print(h) 319 | 320 | 321 | ############################## 322 | ##### then 323 | ############################## 324 | 325 | # The following *manually* constructs the computation `tf.nn.sigmoid(tf.matmul(x, w) + b)` while updating the `tensorbuilder.tensorbuiler.Builder.variables` dictionary. 326 | 327 | import tensorflow as tf 328 | from tensorbuilder import tb 329 | import tensorbuilder.slim_patch 330 | 331 | x = tf.placeholder(tf.float32, shape=[None, 40]) 332 | keep_prob = tf.placeholder(tf.float32) 333 | 334 | def sigmoid_layer(builder, size): 335 | x = builder.tensor() 336 | m = int(x.get_shape()[1]) 337 | n = size 338 | 339 | w = tf.Variable(tf.random_uniform([m, n], -1.0, 1.0)) 340 | b = tf.Variable(tf.random_uniform([n], -1.0, 1.0)) 341 | 342 | y = tf.nn.sigmoid(tf.matmul(x, w) + b) 343 | 344 | return y.builder() 345 | 346 | h = ( 347 | tb.build(x) 348 | .then(sigmoid_layer, 3) 349 | .tensor() 350 | ) 351 | 352 | # Note that the previous if equivalent to 353 | import tensorflow as tf 354 | from tensorbuilder import tb 355 | import tensorbuilder.slim_patch 356 | h = ( 357 | tb.build(x) 358 | .fully_connected(3, activation_fn=tf.nn.sigmoid) 359 | .tensor() 360 | ) 361 | 362 | print(h) 363 | 364 | ############################## 365 | ##### branch 366 | ############################## 367 | 368 | # The following will create a sigmoid layer but will branch the computation at the logit (z) so you get both the output tensor `h` and `trainer` tensor. Observe that first the logit `z` is calculated by creating a linear layer with `fully_connected(1)` and then its branched out 369 | 370 | import tensorflow as tf 371 | from tensorbuilder import tb 372 | import tensorbuilder.slim_patch 373 | 374 | x = tf.placeholder(tf.float32, shape=[None, 5]) 375 | y = tf.placeholder(tf.float32, shape=[None, 1]) 376 | 377 | [h, trainer] = ( 378 | tb.build(x) 379 | .fully_connected(1) 380 | .branch(lambda z: 381 | [ 382 | z.map(tf.nn.sigmoid) 383 | , 384 | z.map(tf.nn.sigmoid_cross_entropy_with_logits, y) 385 | .map(tf.train.AdamOptimizer(0.01).minimize) 386 | ]) 387 | .tensors() 388 | ) 389 | 390 | print(h) 391 | print(trainer) 392 | 393 | # Note that you have to use the `tensorbuilder.tensorbuilder.BuilderTree.tensors` method from the `tensorbuilder.tensorbuilder.BuilderTree` class to get the tensors back. 394 | 395 | # Remember that you can also contain `tensorbuilder.tensorbuilder.BuilderTree` elements when you branch out, this means that you can keep branching inside branch. Don't worry that the tree keep getting deeper, `tensorbuilder.tensorbuilder.BuilderTree` has methods that help you flatten or reduce the tree. 396 | #The following example will show you how create a (overly) complex tree and then connect all the leaf nodes to a single `sigmoid` layer 397 | 398 | import tensorflow as tf 399 | from tensorbuilder import tb 400 | import tensorbuilder.slim_patch 401 | 402 | x = tf.placeholder(tf.float32, shape=[None, 5]) 403 | keep_prob = tf.placeholder(tf.float32) 404 | 405 | h = ( 406 | tb.build(x) 407 | .fully_connected(10) 408 | .branch(lambda base: 409 | [ 410 | base 411 | .fully_connected(3, activation_fn=tf.nn.relu) 412 | , 413 | base 414 | .fully_connected(9, activation_fn=tf.nn.tanh) 415 | .branch(lambda base2: 416 | [ 417 | base2 418 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 419 | , 420 | base2 421 | .map(tf.nn.dropout, keep_prob) 422 | .fully_connected(8, tf.nn.softmax) 423 | ]) 424 | ]) 425 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 426 | ) 427 | 428 | print(h) 429 | 430 | ############################## 431 | ##### BUILDER TREE 432 | ############################## 433 | 434 | ############################## 435 | ##### builders 436 | ############################## 437 | 438 | import tensorflow as tf 439 | from tensorbuilder import tb 440 | import tensorbuilder.slim_patch 441 | 442 | x = tf.placeholder(tf.float32, shape=[None, 5]) 443 | y = tf.placeholder(tf.float32, shape=[None, 1]) 444 | 445 | [h_builder, trainer_builder] = ( 446 | tb.build(x) 447 | .fully_connected(1) 448 | .branch(lambda z: 449 | [ 450 | z.map(tf.nn.sigmoid) 451 | , 452 | z.map(tf.nn.sigmoid_cross_entropy_with_logits, y) 453 | .map(tf.train.AdamOptimizer(0.01).minimize) 454 | ]) 455 | .builders() 456 | ) 457 | 458 | print(h_builder) 459 | print(trainer_builder) 460 | 461 | ############################## 462 | ##### tensors 463 | ############################## 464 | 465 | import tensorflow as tf 466 | from tensorbuilder import tb 467 | import tensorbuilder.slim_patch 468 | 469 | x = tf.placeholder(tf.float32, shape=[None, 5]) 470 | y = tf.placeholder(tf.float32, shape=[None, 1]) 471 | 472 | [h_tensor, trainer_tensor] = ( 473 | tb.build(x) 474 | .fully_connected(1) 475 | .branch(lambda z: 476 | [ 477 | z.map(tf.nn.sigmoid) 478 | , 479 | z.map(tf.nn.sigmoid_cross_entropy_with_logits, y) 480 | .map(tf.train.AdamOptimizer(0.01).minimize) 481 | ]) 482 | .tensors() 483 | ) 484 | 485 | print(h_tensor) 486 | print(trainer_tensor) 487 | 488 | ############################## 489 | ##### fully_connected 490 | ############################## 491 | 492 | # The following example shows you how to connect two tensors (rather builders) of different shapes to a single `softmax` layer of shape [None, 3] 493 | 494 | import tensorflow as tf 495 | from tensorbuilder import tb 496 | import tensorbuilder.slim_patch 497 | 498 | a = tf.placeholder(tf.float32, shape=[None, 8]).builder() 499 | b = tf.placeholder(tf.float32, shape=[None, 5]).builder() 500 | 501 | h = ( 502 | tb.branches([a, b]) 503 | .fully_connected(3, activation_fn=tf.nn.softmax) 504 | ) 505 | 506 | print(h) 507 | 508 | # The next example show you how you can use this to pass the input layer directly through one branch, and "analyze" it with a `tanh layer` filter through the other, both of these are connect to a single `softmax` output layer 509 | 510 | import tensorflow as tf 511 | from tensorbuilder import tb 512 | import tensorbuilder.slim_patch 513 | 514 | x = tf.placeholder(tf.float32, shape=[None, 5]) 515 | 516 | h = ( 517 | tb.build(x) 518 | .branch(lambda x: 519 | [ 520 | x 521 | , 522 | x.fully_connected(10, activation_fn=tf.nn.tanh) 523 | ]) 524 | .fully_connected(3, activation_fn=tf.nn.softmax) 525 | ) 526 | 527 | print(h) 528 | -------------------------------------------------------------------------------- /examples/tflearn_patch.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Here is an example using the `tflearn` patch 4 | import tflearn 5 | from tensorbuilder import tb 6 | import tensorbuilder.patches.tflearn.patch 7 | 8 | model = ( 9 | tflearn.input_data(shape=[None, 784]).builder() 10 | .fully_connected(64) 11 | .dropout(0.5) 12 | .fully_connected(10, activation='softmax') 13 | .regression(optimizer='adam', loss='categorical_crossentropy') 14 | .map(tflearn.DNN) 15 | .tensor() 16 | ) 17 | 18 | print(model) 19 | -------------------------------------------------------------------------------- /guide/README.md: -------------------------------------------------------------------------------- 1 | # Tensor Builder 2 | TensorBuilder is light-weight extensible library that enables you to easily create complex deep neural networks through a functional [fluent](https://en.wikipedia.org/wiki/Fluent_interface) [immutable](https://en.wikipedia.org/wiki/Immutable_object) API based on the Builder Pattern. Tensor Builder also comes with a DSL based on [applicatives](http://learnyouahaskell.com/functors-applicative-functors-and-monoids) and function composition that enables you to express more clearly the structure of your network, make changes faster, and reuse code. 3 | 4 | ### Goals 5 | 6 | * Be a light-wrapper around Tensor-based libraries 7 | * Enable users to easily create complex branched topologies while maintaining a fluent API (see [Builder.branch](http://cgarciae.github.io/tensorbuilder/api/builder.m.html#tensorbuilder.api.builder.Builder.branch)) 8 | * Let users be expressive and productive through a DSL 9 | 10 | ## Installation 11 | Tensor Builder assumes you have a working `tensorflow` installation. We don't include it in the `requirements.txt` since the installation of tensorflow varies depending on your setup. 12 | 13 | #### From pypi 14 | ``` 15 | pip install tensorbuilder==0.0.18 16 | 17 | ``` 18 | 19 | #### From github 20 | For the latest development version 21 | ``` 22 | pip install git+https://github.com/cgarciae/tensorbuilder.git@develop 23 | ``` 24 | 25 | ## Getting Started 26 | 27 | Create neural network with a [5, 10, 3] architecture with a `softmax` output layer and a `tanh` hidden layer through a Builder and then get back its tensor: 28 | 29 | import tensorflow as tf 30 | from tensorbuilder import tb 31 | 32 | x = tf.placeholder(tf.float32, shape=[None, 5]) 33 | keep_prob = tf.placeholder(tf.float32) 34 | 35 | h = ( 36 | tb 37 | .build(x) 38 | .tanh_layer(10) # tanh(x * w + b) 39 | .dropout(keep_prob) # dropout(x, keep_prob) 40 | .softmax_layer(3) # softmax(x * w + b) 41 | .tensor() 42 | ) 43 | 44 | ## Features 45 | * **Branching**: Enable to easily express complex complex topologies with a fluent API. See [Branching](https://cgarciae.gitbooks.io/tensorbuilder/content/branching/). 46 | * **Scoping**: Enable you to express scopes for your tensor graph using methods such as `tf.device` and `tf.variable_scope` with the same fluent API. [Scoping](https://cgarciae.gitbooks.io/tensorbuilder/content/scoping/). 47 | * **DSL**: Use an abbreviated notation with a functional style to make the creation of networks faster, structural changes easier, and reuse code. See [DSL](https://cgarciae.gitbooks.io/tensorbuilder/content/dsl/). 48 | * **Patches**: Add functions from other Tensor-based libraries as methods of the Builder class. TensorBuilder gives you a curated patch plus some specific patches from `TensorFlow` and `TFLearn`, but you can build you own to make TensorBuilder what you want it to be. See [Patches](https://cgarciae.gitbooks.io/tensorbuilder/content/patches/). 49 | 50 | ## Documentation 51 | * [Complete API](http://cgarciae.github.io/tensorbuilder/api/index.html). 52 | * [Core API](http://cgarciae.github.io/tensorbuilder/core/index.html). 53 | * [Complete Documentation](http://cgarciae.github.io/tensorbuilder/index.html) 54 | 55 | ## The Guide 56 | Check out [The Guide](https://cgarciae.gitbooks.io/tensorbuilder/content/) to learn to code in TensorBuilder. 57 | 58 | ## Full Example 59 | Next is an example with all the features of TensorBuilder including the DSL, branching and scoping. It creates a branched computation where each branch is executed on a different device. All branches are then reduced to a single layer, but the computation is the branched again to obtain both the activation function and the trainer. 60 | 61 | import tensorflow as tf 62 | from tensorbuilder import tb 63 | 64 | x = placeholder(tf.float32, shape=[None, 10]) 65 | y = placeholder(tf.float32, shape=[None, 5]) 66 | 67 | [activation, trainer] = tb.pipe( 68 | x, 69 | [ 70 | { tf.device("/gpu:0"): 71 | tb.relu_layer(20) 72 | } 73 | , 74 | { tf.device("/gpu:1"): 75 | tb.sigmoid_layer(20) 76 | } 77 | , 78 | { tf.device("/cpu:0"): 79 | tb.tanh_layer(20) 80 | } 81 | ], 82 | tb.linear_layer(5), 83 | [ 84 | tb.softmax() # activation 85 | , 86 | tb 87 | .softmax_cross_entropy_with_logits(y) # loss 88 | .map(tf.train.AdamOptimizer(0.01).minimize) # trainer 89 | ], 90 | tb.tensors() 91 | ) 92 | 93 | 94 | -------------------------------------------------------------------------------- /guide/SUMMARY.md: -------------------------------------------------------------------------------- 1 | # Summary 2 | 3 | * [1. Basics](basics/README.md) 4 | * [Setup](basics/README.md#Setup) 5 | * [Building Networks](basics/README.md#Building_networks) 6 | * [2. Branching](branching/README.md) 7 | * [3. DSL](dsl/README.md) -------------------------------------------------------------------------------- /guide/basics/README.md: -------------------------------------------------------------------------------- 1 | # Basics 2 | Here we will cover the basics of Tensor Builder, for this we will solve one of the simplest classical examples in the history of neural network: the XOR. 3 | 4 | We will assume that you have already installed TensorBuilder, if not click [here](https://cgarciae.gitbooks.io/tensorbuilder/content/). Remember that you must have a working installation of TensorFlow. 5 | 6 | ## Setup 7 | First we will setup our imports, you'll need to have `numpy` installed. 8 | 9 | ```python 10 | import numpy as np 11 | 12 | import tensorflow as tf 13 | from tensorbuilder import tb 14 | ``` 15 | 16 | As you see `tb` is **not** an alias for the `tensorbuilder` module, its actually an object that we import from this library. There are several reason behind exposing the API as an object, one is that implementing it this way reduced a lot of code internally, but it also plays better with the DSL as you might see later. 17 | 18 | > **Note:** `tb` is of type `Applicative` and all of its methods are immutable, so down worry about "breaking" it. 19 | 20 | Next we are going to create our data and placeholders 21 | 22 | ```python 23 | #TRUTH TABLE (DATA) 24 | X = [[0.0,0.0]]; Y = [[0.0]] 25 | X.append([1.0,0.0]); Y.append([1.0]) 26 | X.append([0.0,1.0]); Y.append([1.0]) 27 | X.append([1.0,1.0]); Y.append([0.0]) 28 | 29 | X = np.array(X) 30 | Y = np.array(Y) 31 | 32 | x = tf.placeholder(tf.float32, shape=[None, 2]) 33 | y = tf.placeholder(tf.float32, shape=[None, 1]) 34 | ``` 35 | 36 | ## Building Networks 37 | Now we need to construct smallest the neural network that can solve the XOR, its architecture is going to be `[2 input, 2 sigmoid, 1 sigmoid]`. To do that we will first calculate the `logit` of the last layer, and then using it we will calculate 2 things: 38 | 39 | 1. The `activation` function (sometimes denoted `h`) by using the `sigmoid` function 40 | 2. The networks `trainer` by creating a loss function and feeding it to a training algorithm. 41 | 42 | Here is the code 43 | 44 | ```python 45 | logit = ( 46 | tb 47 | .build(x) 48 | .sigmoid_layer(2) 49 | .linear_layer(1) 50 | ) 51 | 52 | activation = ( 53 | logit 54 | .sigmoid() 55 | .tensor() 56 | ) 57 | 58 | trainer = ( 59 | logit 60 | .sigmoid_cross_entropy_with_logits(y) # loss 61 | .map(tf.train.AdamOptimizer(0.01).minimize) 62 | .tensor() 63 | ) 64 | ``` 65 | 66 | As you see `TensorBuilder`s API is fluent, meaning that you can keep chaining methods to *build* the computation. 67 | 68 | ### The Builder class 69 | The first thing we should talk about when reviewing this code is the `Builder` class. When we executed 70 | 71 | ```python 72 | tb 73 | .build(x) 74 | ``` 75 | 76 | we created a `Builder` that holds our input Tensor `x`. Having our `Builder` we proceeded to use the methods 77 | 78 | ```python 79 | .sigmoid_layer(2) 80 | .linear_layer(1) 81 | ``` 82 | 83 | If the acronym "What You Read is Mostly What You Get (WYRMWYG)" were a thing, this code would be it. Its telling you that the input is connected to a layer of *2 sigmoid* units, and then this is connected to a layer of *1 linear* unit. You might be wondering where do these methods come from? Or what kinds of methods are there? 84 | 85 | #### Method Families 86 | 87 | `TensorBuilder` decided to become a library that doesn't implement the core methods that actually deal with Tensors. Instead it has some class methods to **register** instance methods and during import we actually include a bunch of functions from other libraries (yeah we are basically just stealing other libraries for the greater good). Currently most of these methods come from the `tensorflow` library, but there are also some from `tflearn`. The the current practice is the following 88 | 89 | 1. The function `tf.contrib.layers.fully_connected` is a very special function that is registered as a method of this class. Its importance is due to the fact that the most fundamental operations in the creation of neural networks involve creating/connecting layers. 90 | 2. If `f` is a funciton in `tf` or `tf.nn`, it will *most likely* be registered as method of the `Builder` class. The process that registers these functions *lifts* them from being functions that accept a `Tensor` (plus some extra arguments) to functions that accept a `Builder` (plus some extra arguments). Due to this, not all methods will work as expected, an obvious example is [tf.placeholder](https://www.tensorflow.org/versions/r0.9/api_docs/python/io_ops.html#placeholder), this function is automatically included but it doesn't take a Tensor as its first parameter so it doesn't make any sense a method of this class. Right now the current policy of which of these functions are include/exclude is a blacklist approach so that only functions that are known to cause serious problems (like having the same name as basic methods) are excluded and all the functions you are likely going to use are included. 91 | 3. Based on point 1 and 2, the next set of function are defined as: if `f` is a function in `tf` or `tf.nn` with name `fname`, then the method `fname_layer` exists in this class. These methods use `fully_connected` and `f` to create a layer with `f` as its activation function. While you don't REALLY need them, `.softmax_layer(5)` reads much better than `.fully_connected(5, activation_fn=tf.nn.softmax)`. 92 | 93 | #### Using the methods 94 | 95 | So we used the methods `.sigmoid_layer(2)` and `.linear_layer(1)` to create our `logit`. Now to create the `activation` function (rather Tensor) of our network we did the following 96 | 97 | ```python 98 | activation = ( 99 | logit 100 | .sigmoid() 101 | .tensor() 102 | ) 103 | ``` 104 | 105 | This was basically just applying `tf.sigmoid` over the `logit`. The method `.tensor` allows us to actually get back the `Tensor` inside the Builder. 106 | 107 | #### The map method 108 | 109 | Finally we created our network `trainer` doing the following 110 | 111 | ```python 112 | trainer = ( 113 | logit 114 | .sigmoid_cross_entropy_with_logits(y) # loss 115 | .map(tf.train.AdamOptimizer(0.01).minimize) 116 | .tensor() 117 | ) 118 | ``` 119 | 120 | Initially we just indirectly applyed the function `tf.nn.sigmoid_cross_entropy_with_logits` over the `login` and the target's placeholder `y`, to get out our *loss* Tensor. But then we used a custom method from the `Builder` class: [map](http://cgarciae.github.io/tensorbuilder/core/index.html#tensorbuilder.core.BuilderBase.map). 121 | 122 | `map` takes any function that accepts a Tensor as its first parameter (and some extra arguments), applies that function to the Tensor inside our Builder (plus the extra arguments), and returns a Builder with the new Tensor. In this case our function was the *unbounded method* `minimize` of the `AdamOptimizer` instace (created in-line) that expect a loss Tensor and returns a Tensor that performs the computation that trains our network. 123 | 124 | The thing is, given that we have `map` we actually don't REALLY need most of the other methods! We could e.g. have written the initial structure of our network like this 125 | 126 | ```python 127 | logit = ( 128 | tb 129 | .build(x) 130 | .map(tf.contrib.layers.fully_connected, 2, activation_fn=tf.nn.sigmoid) 131 | .map(tf.contrib.layers.fully_connected, 1, activation_fn=None) 132 | ) 133 | ``` 134 | 135 | instead of 136 | 137 | ```python 138 | logit = ( 139 | tb 140 | .build(x) 141 | .sigmoid_layer(2) 142 | .linear_layer(1) 143 | ) 144 | ``` 145 | 146 | but as you see the latter is more compact and readable. The important thing is that you understand that you can use `map` to incorporate functions not registered in the Builder class naturally into the computation. 147 | 148 | ## Training 149 | Finally, given that we have constructed the `trainer` and `activation` Tensors, lets use regular TensorFlow operations to trainer the network. We will train for 2000 epochs using full batch training (given that we only have 4 training examples) and then print out the prediction for each case of the XOR using the `activation` Tensor. 150 | 151 | ```python 152 | # create session 153 | sess = tf.Session() 154 | sess.run(tf.initialize_all_variables()) 155 | 156 | # train 157 | for i in range(2000): 158 | sess.run(trainer, feed_dict={x: X, y: Y}) 159 | 160 | # test 161 | for i in range(len(X)): 162 | print "{0} ==> {1}".format(X[i], sess.run(activation, feed_dict={x: X[i:i+1,:]})) 163 | ``` 164 | 165 | Congratulations! You have just solved the XOR problem using TensorBuilder. Not much of a feat for a serious Machine Learning Engineer, but you have the basic knowledge of the TensorBuilder API. 166 | 167 | ## What's Next? 168 | In the next chapters you will learn how to create branched neural networks (important in many architectures), use scoping mechanisms to specify some attributes about the Tensor we build, and explore the Domain Specific Language (DSL) using all the previous knowledge to enable you to code even faster. 169 | 170 | -------------------------------------------------------------------------------- /guide/branching/README.md: -------------------------------------------------------------------------------- 1 | # Branching 2 | 3 | Branching is common in many neural networks that need to resolve complex tasks because each branch to specialize its knowledge while lowering number of weight compared to a network with wider layers, thus giving better performance. TensorBuilder enables you to easily create nested branches. Branching results in a `BuilderTree`, which has methods for traversing all the `Builder` leaf nodes and reducing the whole tree to a single `Builder`. 4 | 5 | To create a branch you just have to use the `Builder.branch` method 6 | 7 | import tensorflow as tf 8 | from tensorbuilder import tb 9 | 10 | x = tf.placeholder(tf.float32, shape=[None, 5]) 11 | keep_prob = tf.placeholder(tf.float32) 12 | 13 | h = ( 14 | tb.build(x) 15 | .fully_connected(10) 16 | .branch(lambda root: 17 | [ 18 | root 19 | .fully_connected(3, activation_fn=tf.nn.relu) 20 | , 21 | root 22 | .fully_connected(9, activation_fn=tf.nn.tanh) 23 | .branch(lambda root2: 24 | [ 25 | root2 26 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 27 | , 28 | root2 29 | .map(tf.nn.dropout, keep_prob) 30 | .fully_connected(8, tf.nn.softmax) 31 | ]) 32 | ]) 33 | .fully_connected(6, activation_fn=tf.nn.sigmoid) 34 | .tensor() 35 | ) 36 | 37 | print(h) 38 | 39 | Thanks to TensorBuilder's immutable API, each branch is independent. The previous can also be simplified with the full `patch` 40 | 41 | import tensorflow as tf 42 | from tensorbuilder import tb 43 | import tensorbuilder.patch 44 | 45 | x = tf.placeholder(tf.float32, shape=[None, 5]) 46 | keep_prob = tf.placeholder(tf.float32) 47 | 48 | h = ( 49 | tb.build(x) 50 | .fully_connected(10) 51 | .branch(lambda root: 52 | [ 53 | root 54 | .relu_layer(3) 55 | , 56 | root 57 | .tanh_layer(9) 58 | .branch(lambda root2: 59 | [ 60 | root2 61 | .sigmoid_layer(6) 62 | , 63 | root2 64 | .dropout(keep_prob) 65 | .softmax_layer(8) 66 | ]) 67 | ]) 68 | .sigmoid_layer(6) 69 | .tensor() 70 | ) 71 | 72 | print(h) 73 | -------------------------------------------------------------------------------- /guide/branching/test.md: -------------------------------------------------------------------------------- 1 | # Some Test 2 | -------------------------------------------------------------------------------- /guide/dsl/README.md: -------------------------------------------------------------------------------- 1 | # DSL 2 | 3 | TensorBuilder's DSL enables you to express the computation do desire to do into a single flexible structure. The DSL preserves all features of given to you by the `Builder` class: 4 | 5 | * Composing operations 6 | * Branching 7 | * Scoping 8 | 9 | The `Applicative` was built to create elements that are accepted/play well will this language. It also two very import methods 10 | 11 | * `compile`: generates a function out a given valid **ast**/structure (compiles it) 12 | * `pipe`: given `Builder` or `Tensor` and an **ast**, compile it to a function and apply it to the Tensor/Builder. 13 | 14 | ## Rules 15 | 16 | * All final elements in the "AST" must be functions, non final elements are compiled to a function. 17 | * A Tuple `()` denotes a sequential operation. Results in the composition of all elements within it. 18 | * A List `[]` denotes a branching operation. Results in the creation of a function that applies the `.branch` method to its argument, and each element in the list results in a branch. It compiles to a function of type `Builder -> BuilderTree`. 19 | * A Dict `{}` denotes a scoping operation. It only accepts a single key-value pair, its key must me a [Disposable](https://www.python.org/dev/peps/pep-0343/) and its value can be any element of the language. It results in the creation of a function that takes a `Builder` as its argument, applies the `with` statemente to the `key` and applies the function of the `value` to its argument inside the `with` block. 20 | 21 | ## Example 22 | 23 | Its easier to see the actual DSL with an example, especially because you can see a direct mapping of the concepts brought by the `Builder` class into the DSL: 24 | 25 | import tensorflow as tf 26 | from tensorbuilder import tb 27 | 28 | x = placeholder(tf.float32, shape=[None, 10]) 29 | y = placeholder(tf.float32, shape=[None, 5]) 30 | 31 | [h, trainer] = tb.pipe( 32 | x, 33 | [ 34 | { tf.device("/gpu:0"): 35 | tb.relu_layer(20) 36 | } 37 | , 38 | { tf.device("/gpu:1"): 39 | tb.sigmoid_layer(20) 40 | } 41 | , 42 | { tf.device("/cpu:0"): 43 | tb.tanh_layer(20) 44 | } 45 | ], 46 | tb.relu_layer(10) 47 | .linear_layer(5), 48 | [ 49 | tb.softmax() # h 50 | , 51 | tb.softmax_cross_entropy_with_logits(y) 52 | .reduce_mean() 53 | .map(tf.trainer.AdamOptimizer(0.01).minimize) # trainer 54 | ], 55 | tb.tensors() 56 | ) 57 | 58 | Lets go step by step to what is happening here: 59 | 60 | 1. The Tensor `x` pluged inside a `Builder` and *piped* through the computational structured defined. All the arguments of `pipe` after `x` are grouped as if they were in a tuple `()` and the whole expression is compiled to a single function with is then applied to the `Buider` containing `x`. 61 | 1. **final** elements you see here like `tb.softmax()` are `Applicative`s which as you've been told are functions. As you see, *almost* all methods from the `Builder` class are also methods from the `Applicative` class, the diference is that the methods of the `Builder` class actually perform the computation they intend (construct a new Tensor), but the methods from the `Applicative` class rather *compose/define* the computation to be done later. 62 | 1. There is an implicit Tuple `()` element that is performing a sequential composition of all the other elements. As a result, the visual/spatial ordering of the code corresponds to the intended behavior. 63 | 1. Lists very naturally express branches. Notice how indentation and an intentional positioning of the `,` comma help to diferentiate each branch. 64 | 1. Expresions like `tb.relu_layer(10)` are polymorphic and work for `Builder`s or `BuilderTree`s regardless. 65 | 1. Scoping is very clean with the `{}` notation. In constrast to using `then_with` from the `Builder` class, here you can actually use the original functions from `tensorflow` unchanged in the `key` of the dict. -------------------------------------------------------------------------------- /guide/dsl/test.md: -------------------------------------------------------------------------------- 1 | # Some Test 2 | -------------------------------------------------------------------------------- /guide/patches/README.md: -------------------------------------------------------------------------------- 1 | # Patches 2 | 3 | Coming Soon! 4 | -------------------------------------------------------------------------------- /guide/patches/test.md: -------------------------------------------------------------------------------- 1 | # Some Test 2 | -------------------------------------------------------------------------------- /guide/scoping/README.md: -------------------------------------------------------------------------------- 1 | # Scoping 2 | 3 | Coming Soon! 4 | -------------------------------------------------------------------------------- /guide/scoping/test.md: -------------------------------------------------------------------------------- 1 | # Some Test 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | phi 2 | asq -------------------------------------------------------------------------------- /scripts/create_readme.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | import os 4 | 5 | print("Getting Version") 6 | with open('tensorbuilder/version.txt', 'r') as f: 7 | version = f.read() 8 | 9 | print("Getting Readme Template") 10 | with open('tensorbuilder/README-template.md', 'r') as f: 11 | readme = f.read().format(version) 12 | 13 | print("Writting Readme") 14 | with open('README.md', 'w') as f: 15 | f.write(readme) 16 | 17 | -------------------------------------------------------------------------------- /scripts/do_pypitest: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | pip install -r /code/requirements.txt 4 | pip install -U -i https://testpypi.python.org/pypi tensorbuilder 5 | echo 6 | echo "Testing Library:" 7 | echo 8 | python -c "import tensorbuilder; print 'Hello from python. TensorBuilder version:', tensorbuilder.__version__" -------------------------------------------------------------------------------- /scripts/gen_docs: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | find . -name "*.pyc" -delete 4 | echo "Generating docs" 5 | PYTHONPATH=./tensorbuilder pdoc --html-dir=docs --html --all-submodules --overwrite tensorbuilder 6 | echo "Making README.md" 7 | python scripts/create_readme.py 8 | echo "Finished" -------------------------------------------------------------------------------- /scripts/init: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | sudo pip install -e . -------------------------------------------------------------------------------- /scripts/test_pypitest: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | docker run -it -v ${PWD}:/code -w /code tensorflow/tensorflow scripts/do_pypitest -------------------------------------------------------------------------------- /scripts/update_gh_pages: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | git subtree push --prefix docs/tensorbuilder origin gh-pages -------------------------------------------------------------------------------- /scripts/upload_live: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Register 4 | #python setup.py register -r pypi 5 | 6 | # Upload 7 | python setup.py sdist upload -r pypi 8 | -------------------------------------------------------------------------------- /scripts/upload_test: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Register 4 | #python setup.py register -r pypitest 5 | 6 | # Upload 7 | python setup.py sdist upload -r pypitest 8 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup 3 | from pip.req import parse_requirements 4 | 5 | # parse requirements 6 | reqs = [str(r.req) for r in parse_requirements("requirements.txt", session=False)] 7 | 8 | 9 | # Utility function to read the README file. 10 | # Used for the long_description. It's nice, because now 1) we have a top level 11 | # README file and 2) it's easier to type in the README file than to put a raw 12 | # string in below ... 13 | def read(fname): 14 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 15 | 16 | version = read('tensorbuilder/version.txt').split("\n")[0] 17 | 18 | setup( 19 | name = "tensorbuilder", 20 | version = version, 21 | author = "Cristian Garcia", 22 | author_email = "cgarcia.e88@gmail.com", 23 | description = ("A light wrapper over TensorFlow that enables you to easily create complex deep neural networks using the Builder Pattern through a functional fluent immutable API"), 24 | license = "MIT", 25 | keywords = ["tensorflow", "deep learning", "neural networks"], 26 | url = "https://github.com/cgarciae/tensorbuilder", 27 | packages = [ 28 | 'tensorbuilder', 29 | 'tensorbuilder.tensordata', 30 | 'tensorbuilder.patches', 31 | 'tensorbuilder.tests' 32 | ], 33 | package_data={ 34 | '': ['LICENCE', 'requirements.txt', 'README.md', 'CHANGELOG.md'], 35 | 'tensorbuilder': ['version.txt', 'README-template.md'] 36 | }, 37 | download_url = 'https://github.com/cgarciae/tensorbuilder/tarball/{0}'.format(version), 38 | include_package_data = True, 39 | long_description = read('README.md'), 40 | install_requires = reqs 41 | ) 42 | -------------------------------------------------------------------------------- /tensorbuilder/README-template.md: -------------------------------------------------------------------------------- 1 | # Tensor Builder 2 | TensorBuilder had a mayor refactoring and is now based on [Phi](https://github.com/cgarciae/phi). Updates to the README comming soon! 3 | 4 | ### Goals 5 | Comming Soon! 6 | 7 | ## Installation 8 | Tensor Builder assumes you have a working `tensorflow` installation. We don't include it in the `requirements.txt` since the installation of tensorflow varies depending on your setup. 9 | 10 | #### From pypi 11 | ``` 12 | pip install tensorbuilder 13 | ``` 14 | 15 | #### From github 16 | For the latest development version 17 | ``` 18 | pip install git+https://github.com/cgarciae/tensorbuilder.git@develop 19 | ``` 20 | 21 | ## Getting Started 22 | 23 | Create neural network with a [5, 10, 3] architecture with a `softmax` output layer and a `tanh` hidden layer through a Builder and then get back its tensor: 24 | 25 | ```python 26 | import tensorflow as tf 27 | from tensorbuilder import T 28 | 29 | x = tf.placeholder(tf.float32, shape=[None, 5]) 30 | keep_prob = tf.placeholder(tf.float32) 31 | 32 | h = T.Pipe( 33 | x, 34 | T.tanh_layer(10) # tanh(x * w + b) 35 | .dropout(keep_prob) # dropout(x, keep_prob) 36 | .softmax_layer(3) # softmax(x * w + b) 37 | ) 38 | ``` 39 | 40 | ## Features 41 | Comming Soon! 42 | 43 | ## Documentation 44 | Comming Soon! 45 | 46 | ## The Guide 47 | Comming Soon! 48 | 49 | ## Full Example 50 | Next is an example with all the features of TensorBuilder including the DSL, branching and scoping. It creates a branched computation where each branch is executed on a different device. All branches are then reduced to a single layer, but the computation is the branched again to obtain both the activation function and the trainer. 51 | 52 | ```python 53 | import tensorflow as tf 54 | from tensorbuilder import T 55 | 56 | x = placeholder(tf.float32, shape=[None, 10]) 57 | y = placeholder(tf.float32, shape=[None, 5]) 58 | 59 | [activation, trainer] = T.Pipe( 60 | x, 61 | [ 62 | T.With( tf.device("/gpu:0"): 63 | T.relu_layer(20) 64 | ) 65 | , 66 | T.With( tf.device("/gpu:1"): 67 | T.sigmoid_layer(20) 68 | ) 69 | , 70 | T.With( tf.device("/cpu:0"): 71 | T.tanh_layer(20) 72 | ) 73 | ], 74 | T.linear_layer(5), 75 | [ 76 | T.softmax() # activation 77 | , 78 | T 79 | .softmax_cross_entropy_with_logits(y) # loss 80 | .minimize(tf.train.AdamOptimizer(0.01)) # trainer 81 | ] 82 | ) 83 | ``` -------------------------------------------------------------------------------- /tensorbuilder/__init__.py: -------------------------------------------------------------------------------- 1 | from builder import TensorBuilder 2 | from tensordata import Data 3 | from phi import utils 4 | 5 | T = TensorBuilder(utils.identity) 6 | 7 | import patches #import last 8 | 9 | ######################## 10 | # Documentation 11 | ######################## 12 | import os 13 | 14 | def _to_pdoc_markdown(doc): 15 | indent = False 16 | lines = [] 17 | 18 | for line in doc.split('\n'): 19 | if "```" in line: 20 | indent = not indent 21 | line = line.replace("```python", '') 22 | line = line.replace("```", '') 23 | 24 | if indent: 25 | line = " " + line 26 | 27 | lines.append(line) 28 | 29 | return '\n'.join(lines) 30 | 31 | def _read(fname): 32 | return open(os.path.join(os.path.dirname(__file__), fname)).read() 33 | 34 | _raw_docs = _read("README-template.md") 35 | __version__ = _read("version.txt") 36 | __doc__ = _to_pdoc_markdown(_raw_docs.format(__version__)) 37 | -------------------------------------------------------------------------------- /tensorbuilder/builder.py: -------------------------------------------------------------------------------- 1 | from phi.builder import Builder 2 | import inspect 3 | from tensordata import Data 4 | from phi import P 5 | 6 | class TensorBuilder(Builder): 7 | """docstring for TensorBuilder.""" 8 | 9 | def data(self, *args, **kwargs): 10 | return Data(*args, **kwargs) 11 | 12 | TensorBuilder.__core__ = [ name for name, f in inspect.getmembers(TensorBuilder, predicate=inspect.ismethod) ] 13 | -------------------------------------------------------------------------------- /tensorbuilder/patches/__init__.py: -------------------------------------------------------------------------------- 1 | #import layers_patch 2 | import tensorflow_patch 3 | import summaries_patch 4 | import layers_patch 5 | import rnn_utilities_patch 6 | import custom_patch 7 | -------------------------------------------------------------------------------- /tensorbuilder/patches/custom_patch.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorbuilder import T, TensorBuilder 3 | from phi import P, utils 4 | 5 | @TensorBuilder.Register1("tb") 6 | def inception_layer(tensor, num_outputs, **kwargs): 7 | stride = 1 8 | pool_operation = T.max_pool2d 9 | pool_kernel = [3, 3] 10 | scope = None 11 | reuse = None 12 | 13 | if 'stride' in kwargs: 14 | stride = kwargs['stride'] 15 | else: 16 | kwargs['stride'] = stride 17 | 18 | 19 | if 'pool_kernel' in kwargs: 20 | pool_kernel = kwargs['pool_kernel'] 21 | del kwargs['pool_kernel'] 22 | 23 | if 'scope' in kwargs: 24 | scope = kwargs['scope'] 25 | del kwargs['scope'] 26 | 27 | if 'reuse' in kwargs: 28 | reuse = kwargs['reuse'] 29 | del kwargs['reuse'] 30 | 31 | if 'pool_operation' in kwargs: 32 | pool_operation = kwargs['pool_operation'] 33 | del kwargs['pool_operation'] 34 | 35 | kwargs_no_stride = kwargs.copy() 36 | del kwargs_no_stride['stride'] 37 | 38 | with tf.variable_scope(scope, default_name='InceptionLayer', reuse=reuse): 39 | return P( 40 | tensor, 41 | [ 42 | T.convolution2d(num_outputs, [1, 1], **dict(kwargs, scope="Conv1x1")) 43 | , 44 | P.With( T.variable_scope("Branch3x3"), 45 | T 46 | .convolution2d(num_outputs, [1, 1], **dict(kwargs_no_stride, scope="Conv1x1")) 47 | .convolution2d(num_outputs, [3, 3], **dict(kwargs, scope="Conv3x3")) 48 | ) 49 | , 50 | P.With( T.variable_scope("Branch5x5"), 51 | T 52 | .convolution2d(num_outputs, [1, 1], **dict(kwargs_no_stride, scope="Conv1x1")) 53 | .convolution2d(num_outputs, [5, 5], **dict(kwargs, scope="Conv5x5")) 54 | ) 55 | , 56 | P.With( T.variable_scope("BranchPool"), 57 | pool_operation(pool_kernel, stride=stride, padding='SAME'), 58 | T.convolution2d(num_outputs, [1, 1], **dict(kwargs_no_stride, scope="Conv1x1")) 59 | ) 60 | ], 61 | T.concat(3) 62 | ) 63 | 64 | @TensorBuilder.Register1("tb") 65 | def minimize(tensor, optimizer, *args, **kwargs): 66 | return optimizer.minimize(tensor, *args, **kwargs) 67 | 68 | @TensorBuilder.Register1("tb") 69 | def maximize(tensor, optimizer, *args, **kwargs): 70 | return optimizer.maximize(tensor, *args, **kwargs) 71 | 72 | @TensorBuilder.Register1("tb") 73 | def drop_layer(x, keep_prob, seed=None, name=None): 74 | """Computes dropout. 75 | With probability `keep_prob`, outputs the input element scaled up by 76 | `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected 77 | sum is unchanged. 78 | 79 | Args: 80 | x: A tensor. 81 | keep_prob: A scalar `Tensor` with the same type as x. The probability 82 | that each element is kept. 83 | noise_shape: A 1-D `Tensor` of type `int32`, representing the 84 | shape for randomly generated keep/drop flags. 85 | seed: A Python integer. Used to create random seeds. See 86 | [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed) 87 | for behavior. 88 | name: A name for this operation (optional). 89 | Returns: 90 | A Tensor of the same shape of `x`. 91 | Raises: 92 | ValueError: If `keep_prob` is not in `(0, 1]`. 93 | """ 94 | with tf.op_scope([x], name, "drop_layer") as name: 95 | x = tf.convert_to_tensor(x, name="x") 96 | if isinstance(keep_prob, float) and not 0 < keep_prob <= 1: 97 | raise ValueError("keep_prob must be a scalar tensor or a float in the " 98 | "range (0, 1], got %g" % keep_prob) 99 | keep_prob = tf.convert_to_tensor(keep_prob, 100 | dtype=x.dtype, 101 | name="keep_prob") 102 | keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar()) 103 | 104 | noise_shape = [ tf.shape(x)[0], 1 ] 105 | # uniform [keep_prob, 1.0 + keep_prob) 106 | random_tensor = keep_prob 107 | random_tensor += tf.random_uniform( 108 | noise_shape, 109 | seed=seed, 110 | dtype=x.dtype 111 | ) 112 | 113 | # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob) 114 | binary_tensor = tf.floor(random_tensor) 115 | ret = x * tf.inv(keep_prob) * binary_tensor 116 | ret.set_shape(x.get_shape()) 117 | return ret 118 | 119 | @TensorBuilder.Register1("tb") 120 | def ensamble_dropout(tree, keep_prob, seed=None, name=None): 121 | with tf.op_scope(tree.tensors(), name, "drop_layer"): 122 | return tree.map_each(drop_layer, keep_prob, seed=seed, name=name) 123 | 124 | @TensorBuilder.Register1("tb") 125 | def add_regularization_loss(tensor, graph=None, scope='add_regularization_loss'): 126 | if not graph: 127 | reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) 128 | else: 129 | reg_losses = graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) 130 | 131 | with tf.variable_scope(scope): 132 | reg_loss = tf.reduce_sum([ tf.reduce_mean(reg_loss) for reg_loss in reg_losses ], name='reg_loss_mean_sum') 133 | return tf.add(tensor, reg_loss) 134 | -------------------------------------------------------------------------------- /tensorbuilder/patches/layers_patch.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import inspect 3 | import functools 4 | from tensorflow.contrib import layers 5 | from tensorflow.contrib.layers import fully_connected, convolution2d 6 | from tensorbuilder import TensorBuilder 7 | from phi import utils, P, patch 8 | from phi.builder import Builder 9 | 10 | 11 | class LayerBuilder(Builder): 12 | """docstring for LayerBuilder.""" 13 | 14 | @property 15 | def TensorBuilder(self): 16 | return TensorBuilder()._unit(self._f, self._refs) 17 | 18 | #Add property to TensorBuilder 19 | TensorBuilder.layers = property(lambda self: LayerBuilder()._unit(self._f, self._refs)) 20 | 21 | # patch all layer functions 22 | patch.builder_with_members_from_1(LayerBuilder, layers, module_alias="tf.contrib.layers") #, _return_type=TensorBuilder) 23 | 24 | # fully conneted layers 25 | blacklist = ( 26 | ["relu_layer"] + 27 | TensorBuilder.__core__ 28 | ) 29 | 30 | funs = ( (name, f) for (name, f) in inspect.getmembers(tf.nn, inspect.isfunction) if name not in blacklist ) 31 | 32 | def register_layer_functions(name, f): 33 | explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name) 34 | 35 | @TensorBuilder.Register1("tf.contrib.layers", name + "_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder) 36 | def layer_function(*args, **kwargs): 37 | kwargs['activation_fn'] = f 38 | return fully_connected(*args, **kwargs) 39 | 40 | def register_conv_layer_functions(name, f): 41 | explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name) 42 | 43 | @TensorBuilder.Register1("tf.contrib.layers", name + "_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder) 44 | def layer_function(*args, **kwargs): 45 | kwargs['activation_fn'] = f 46 | return convolution2d(*args, **kwargs) 47 | 48 | for name, f in funs: 49 | register_layer_functions(name, f) 50 | register_conv_layer_functions(name, f) 51 | 52 | 53 | 54 | #linear_layer 55 | explanation = """and the keyword argument `activation_fn` is set to `None`.""" 56 | 57 | @TensorBuilder.Register1("tf.contrib.layers", alias="linear_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder) 58 | def linear_layer(*args, **kwargs): 59 | kwargs['activation_fn'] = None 60 | return tf.contrib.layers.fully_connected(*args, **kwargs) 61 | 62 | @TensorBuilder.Register1("tf.contrib.layers", alias="linear_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder) 63 | def linear_conv2d_layer(*args, **kwargs): 64 | kwargs['activation_fn'] = None 65 | return tf.contrib.layers.fully_connected(*args, **kwargs) 66 | 67 | def _polynomial(tensor): 68 | size = int(tensor.get_shape()[1]) 69 | pows = [ tf.pow(tensor[:, n], n + 1) for n in range(size) ] 70 | return tf.transpose(tf.pack(pows)) 71 | 72 | explanation = """ 73 | However, it uses an activation function of the form 74 | ``` 75 | y(i) = z(i)^(i+1) 76 | ``` 77 | where `z = w*x + b` 78 | """ 79 | 80 | @TensorBuilder.Register1("tb", alias="polynomial_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder) 81 | def polynomial_layer(*args, **kwargs): 82 | kwargs['activation_fn'] = _polynomial 83 | return layers.fully_connected(*args, **kwargs) 84 | 85 | 86 | whitelist = ["convolution2d", "max_pool2d", "avg_pool2d", "flatten"] 87 | patch.builder_with_members_from_1(TensorBuilder, layers, module_alias="tf.contrib.layers", whitelist=lambda x: x in whitelist) #, _return_type=TensorBuilder) 88 | -------------------------------------------------------------------------------- /tensorbuilder/patches/rnn_utilities_patch.py: -------------------------------------------------------------------------------- 1 | from phi import utils 2 | from tensorbuilder import TensorBuilder 3 | 4 | 5 | @TensorBuilder.RegisterMethod("tb") 6 | def rnn_placeholders_from_state(self, zero_state, name="rnn_state"): 7 | if isinstance(zero_state, tuple): 8 | return tuple([self.rnn_placeholders_from_state(substate, name=name) for substate in zero_state]) 9 | else: 10 | return tf.placeholder(zero_state.dtype, shape=zero_state.get_shape(), name=name) 11 | 12 | @TensorBuilder.RegisterMethod("tb") 13 | def rnn_state_feed_dict(self, placeholders, values): 14 | return dict(zip(utils.flatten(placeholders), utils.flatten_list(values))) 15 | -------------------------------------------------------------------------------- /tensorbuilder/patches/summaries_patch.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import inspect 3 | import functools 4 | from tensorbuilder import TensorBuilder 5 | 6 | 7 | funs = ( (name, f) for (name, f) in inspect.getmembers(tf, inspect.isfunction) if "_summary" in name ) 8 | 9 | def register_summary_functions(name, f): 10 | @TensorBuilder.Register2("tf", alias="make_{0}".format(name), wrapped=f) 11 | def summary_function(tags, values, *args, **kwargs): 12 | f(tags, values, *args, **kwargs) 13 | return values 14 | 15 | for name, f in funs: 16 | register_summary_functions(name, f) 17 | -------------------------------------------------------------------------------- /tensorbuilder/patches/tensorflow_patch.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import inspect 3 | from tensorbuilder import TensorBuilder 4 | from phi import utils, patch 5 | 6 | 7 | f0_pred = (lambda x: 8 | "scope" in x or 9 | "device" in x 10 | ) 11 | 12 | f2_pred = (lambda x: 13 | x in [ 14 | "concat" 15 | ] or 16 | "_summary" in x 17 | ) 18 | 19 | f1_blacklist = (lambda x: 20 | x in ["relu_layer", "device"] or 21 | x in TensorBuilder.__core__ or 22 | f0_pred(x) or 23 | f2_pred(x) 24 | ) 25 | 26 | #tf 27 | patch.builder_with_members_from_0(TensorBuilder, tf, whitelist=f0_pred) 28 | patch.builder_with_members_from_1(TensorBuilder, tf, blacklist=f1_blacklist) 29 | patch.builder_with_members_from_2(TensorBuilder, tf, whitelist=f2_pred) 30 | 31 | #tf.nn 32 | patch.builder_with_members_from_1(TensorBuilder, tf.nn, module_alias="tf.nn", blacklist=f1_blacklist) 33 | 34 | # for name, f, module in f1s: 35 | # TensorBuilder.register_function_1(f, module) 36 | # 37 | # for name, f, module in f2s: 38 | # TensorBuilder.register_function_2(f, module) 39 | -------------------------------------------------------------------------------- /tensorbuilder/tensordata/__init__.py: -------------------------------------------------------------------------------- 1 | import asq 2 | from asq.initiators import query 3 | import asq.queryables 4 | import random 5 | from itertools import islice, izip_longest 6 | import numpy as np 7 | import tensorflow as tf 8 | from decorator import decorator 9 | 10 | """ 11 | """ 12 | 13 | @decorator 14 | def immutable(method, self, *args, **kwargs): 15 | """ 16 | Decorator. Passes a copy of the entity to the method so that the original object remains un touched. 17 | Used in methods to get a fluent immatable API. 18 | """ 19 | return method(self.copy(), *args, **kwargs) 20 | 21 | class Data(object): 22 | """docstring for Data""" 23 | def __init__(self, _iterator=None, **sources): 24 | super(Data, self).__init__() 25 | self.sources = sources 26 | self.__dict__.update(sources) 27 | 28 | self._iterator = _iterator if _iterator else lambda: self._raw_data() 29 | self.batch = None 30 | self.patch = None 31 | 32 | 33 | def copy(self): 34 | return Data(_iterator=self._iterator, **self.sources) 35 | 36 | def __iter__(self): 37 | return self._iterator() 38 | 39 | def enumerated(self): 40 | return enumerate(self._iterator()) 41 | 42 | 43 | def split(self, *splits): 44 | """docstring for Batcher""" 45 | 46 | data_length = len(self.x) 47 | 48 | indexes = range(data_length) 49 | random.shuffle(indexes) 50 | 51 | splits = [0] + list(splits) 52 | splits_total = sum(splits) 53 | 54 | return ( 55 | query(splits) 56 | .scan() 57 | .select(lambda n: int(data_length * n / splits_total)) 58 | .then(_window, n=2) 59 | .select(lambda (start, end): np.array(indexes[start:end])) 60 | .select(lambda split: Data(**{k: source[split,:] for (k, source) in self.sources.iteritems()})) 61 | .to_list() 62 | ) 63 | 64 | 65 | @immutable 66 | def raw_data(self): 67 | self._iterator = lambda: self._raw_data() 68 | return self 69 | 70 | def _raw_data(self): 71 | yield self 72 | 73 | 74 | @immutable 75 | def batches_of(self, batch_size): 76 | """ 77 | docstring for Batcher 78 | """ 79 | _iterator = self._iterator 80 | self._iterator = lambda: self._batch(batch_size, _iterator) 81 | return self 82 | 83 | def _batch(self, batch_size, _iterator): 84 | for data in _iterator(): 85 | length = len(data.x) 86 | sample = np.random.choice(length, batch_size) 87 | new_data = Data(**{k: source[sample] for (k, source) in data.sources.iteritems()}) 88 | 89 | yield new_data 90 | 91 | @immutable 92 | def epochs(self, epochs): 93 | """docstring for Batcher""" 94 | _iterator = self._iterator 95 | self._iterator = lambda: self._epochs(epochs, _iterator) 96 | return self 97 | 98 | def _epochs(self, epochs, _iterator): 99 | for epoch in range(epochs): 100 | for data in _iterator(): 101 | data.epoch = epoch 102 | yield data 103 | 104 | 105 | def placeholders(self, *args): 106 | return list(self._placeholders(*args)) 107 | 108 | def _placeholders(self, *args): 109 | for source_name in args: 110 | source = self.sources[source_name] 111 | shape = [None] + list(source.shape)[1:] 112 | yield tf.placeholder(tf.float32, shape=shape) 113 | 114 | 115 | def run(self, sess, tensor, tensors={}, **feed): 116 | 117 | try: 118 | tensor = tensor.tensor() 119 | except: 120 | pass 121 | 122 | feed = { feed[k]: self.sources[k] for k in feed } 123 | feed.update(tensors) 124 | 125 | return sess.run(tensor, feed_dict=feed) 126 | 127 | 128 | def _window(seq, n=2): 129 | "Returns a sliding window (of width n) over data from the iterable" 130 | " s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... " 131 | it = iter(seq) 132 | result = tuple(islice(it, n)) 133 | if len(result) == n: 134 | yield result 135 | for elem in it: 136 | result = result[1:] + (elem,) 137 | yield result 138 | 139 | def _then(q, fn, *args, **kwargs): 140 | return query(fn(q, *args, **kwargs)) 141 | 142 | asq.queryables.Queryable.then = _then 143 | -------------------------------------------------------------------------------- /tensorbuilder/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cgarciae/tensorbuilder/f8e0b19c09deaaea67611d9df51218e4a9cd705a/tensorbuilder/tests/__init__.py -------------------------------------------------------------------------------- /tensorbuilder/tests/test_tensorbuilder.py: -------------------------------------------------------------------------------- 1 | from tensorbuilder import T 2 | from phi import P, Rec 3 | import tensorflow as tf 4 | 5 | class TestTensorBuilder(object): 6 | """docstring for TestBuilder""" 7 | 8 | @classmethod 9 | def setup_method(self): 10 | self.x = tf.placeholder('float', shape=[None, 5], name='x') 11 | self.w = tf.transpose(tf.Variable( 12 | [[1.,2.,3.,4.,5.], 13 | [6.,7.,8.,9.,10.]] 14 | ), name='w') 15 | self.b = tf.Variable( 16 | [1.,2.], 17 | name='b' 18 | ) 19 | 20 | 21 | def test_patch(self): 22 | 23 | matmul, add = T.Ref('matmul'), T.Ref('add') 24 | 25 | y = T.Pipe( 26 | self.x, 27 | 28 | T 29 | .matmul(self.w).Write(matmul) 30 | .add(self.b).Write(add) 31 | .relu() 32 | ) 33 | 34 | assert "Relu" in y.name 35 | assert "MatMul" in matmul().name 36 | assert "Add" in add().name 37 | 38 | def test_summaries_patch(self): 39 | name = T.Pipe( 40 | self.x, 41 | T.reduce_mean().make_scalar_summary('mean'), 42 | Rec.name 43 | ) 44 | assert "Mean" in name 45 | 46 | name = T.Pipe( 47 | self.x, 48 | T.reduce_mean().scalar_summary('mean'), 49 | Rec.name 50 | ) 51 | assert "ScalarSummary" in name 52 | 53 | def test_layers_patch(self): 54 | softmax_layer = T.Pipe( 55 | self.x, 56 | T 57 | .sigmoid_layer(10) 58 | .softmax_layer(20) 59 | ) 60 | assert "Softmax" in softmax_layer.name 61 | 62 | def test_concat(self): 63 | concatenated = T.Pipe( 64 | self.x, 65 | [ 66 | T.softmax_layer(3) 67 | , 68 | T.tanh_layer(2) 69 | , 70 | T.sigmoid_layer(5) 71 | ], 72 | T.concat(1) 73 | ) 74 | 75 | assert int(concatenated.get_shape()[1]) == 10 76 | 77 | def test_rnn_utilities(self): 78 | assert T.rnn_placeholders_from_state 79 | assert T.rnn_state_feed_dict 80 | -------------------------------------------------------------------------------- /tensorbuilder/version.txt: -------------------------------------------------------------------------------- 1 | 0.2.5 -------------------------------------------------------------------------------- /tests/test2.py: -------------------------------------------------------------------------------- 1 | import tflearn 2 | from tensorbuilder import tb 3 | import tensorbuilder.patches.tflearn.patch 4 | import tensorbuilder.dsl as dl 5 | 6 | 7 | model = ( 8 | tflearn.input_data(shape=[None, 784]).builder() 9 | .fully_connected(64) 10 | .dropout(0.5) 11 | .fully_connected(10, activation='softmax') 12 | .regression(optimizer='adam', loss='categorical_crossentropy') 13 | .map(tflearn.DNN) 14 | .tensor() 15 | ) 16 | 17 | model2 = tflearn.input_data(shape=[None, 784]).builder().pipe( 18 | dl.fully_connected(64) 19 | .dropout(0.5) 20 | .fully_connected(10, activation='softmax') 21 | .regression(optimizer='adam', loss='categorical_crossentropy') 22 | .map(tflearn.DNN) 23 | .tensor() 24 | ) 25 | 26 | print model 27 | print model2 28 | --------------------------------------------------------------------------------