├── .circleci └── config.yml ├── .gitignore ├── .rake_tasks~ ├── .rspec ├── .rubocop.yml ├── .travis.yml ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── Gemfile ├── LICENSE.txt ├── README.md ├── Rakefile ├── USAGE_GUIDE.md ├── benchmark_intel.txt ├── benchmark_nvidia.txt ├── benchmark_ryzen_amd.txt ├── bin ├── console ├── setup └── stubgen ├── exe └── model_utils ├── lib ├── tensor_stream.rb └── tensor_stream │ ├── constant.rb │ ├── control_flow.rb │ ├── debugging │ └── debugging.rb │ ├── device.rb │ ├── dynamic_stitch.rb │ ├── evaluator │ ├── base_evaluator.rb │ ├── buffer.rb │ ├── evaluator.rb │ ├── evaluator_utils.rb │ ├── operation_helpers │ │ ├── array_ops_helper.rb │ │ ├── math_helper.rb │ │ └── random_gaussian.rb │ ├── ruby │ │ ├── array_ops.rb │ │ ├── check_ops.rb │ │ ├── images_ops.rb │ │ ├── math_ops.rb │ │ ├── nn_ops.rb │ │ ├── random_ops.rb │ │ ├── storage_manager.rb │ │ └── variable_ops.rb │ └── ruby_evaluator.rb │ ├── exceptions.rb │ ├── generated_stub │ ├── ops.rb │ └── stub_file.erb │ ├── graph.rb │ ├── graph_builder.rb │ ├── graph_deserializers │ ├── protobuf.rb │ └── yaml_loader.rb │ ├── graph_keys.rb │ ├── graph_serializers │ ├── graphml.rb │ ├── packer.rb │ ├── pbtext.rb │ ├── serializer.rb │ └── yaml.rb │ ├── helpers │ ├── infer_shape.rb │ ├── op_helper.rb │ ├── string_helper.rb │ └── tensor_mixins.rb │ ├── images.rb │ ├── initializer.rb │ ├── math │ └── math_ops.rb │ ├── math_gradients.rb │ ├── monkey_patches │ ├── array.rb │ ├── float.rb │ ├── integer.rb │ ├── op_patch.rb │ └── patch.rb │ ├── nn │ ├── embedding_lookup.rb │ └── nn_ops.rb │ ├── op_maker.rb │ ├── operation.rb │ ├── ops.rb │ ├── ops │ ├── add.rb │ ├── argmax.rb │ ├── argmin.rb │ ├── bias_add.rb │ ├── case.rb │ ├── cast.rb │ ├── ceil.rb │ ├── const.rb │ ├── cos.rb │ ├── div.rb │ ├── equal.rb │ ├── expand_dims.rb │ ├── fill.rb │ ├── floor.rb │ ├── floor_div.rb │ ├── greater.rb │ ├── greater_equal.rb │ ├── less.rb │ ├── less_equal.rb │ ├── log.rb │ ├── mat_mul.rb │ ├── max.rb │ ├── min.rb │ ├── mod.rb │ ├── mul.rb │ ├── negate.rb │ ├── not_equal.rb │ ├── ones_like.rb │ ├── pow.rb │ ├── prod.rb │ ├── random_uniform.rb │ ├── range.rb │ ├── rank.rb │ ├── reshape.rb │ ├── round.rb │ ├── rsqrt.rb │ ├── shape.rb │ ├── sigmoid.rb │ ├── sign.rb │ ├── sin.rb │ ├── size.rb │ ├── strided_slice.rb │ ├── sub.rb │ ├── sum.rb │ ├── tan.rb │ ├── tanh.rb │ ├── tile.rb │ ├── top_k.rb │ └── zeros.rb │ ├── placeholder.rb │ ├── profile │ └── report_tool.rb │ ├── session.rb │ ├── tensor.rb │ ├── tensor_shape.rb │ ├── train │ ├── adadelta_optimizer.rb │ ├── adagrad_optimizer.rb │ ├── adam_optimizer.rb │ ├── gradient_descent_optimizer.rb │ ├── learning_rate_decay.rb │ ├── momentum_optimizer.rb │ ├── optimizer.rb │ ├── rmsprop_optimizer.rb │ ├── saver.rb │ ├── slot_creator.rb │ └── utils.rb │ ├── trainer.rb │ ├── types.rb │ ├── utils.rb │ ├── utils │ ├── data_type_utils.rb │ ├── freezer.rb │ └── py_ports.rb │ ├── variable.rb │ ├── variable_scope.rb │ └── version.rb ├── samples ├── datasets │ └── iris.data ├── jupyter_notebooks │ └── linear_regression.ipynb ├── neural_networks │ ├── iris.rb │ ├── mnist_data.rb │ ├── raw_neural_net_sample.rb │ └── rnn.rb ├── others │ └── nearest_neighbor.rb ├── regression │ ├── linear_regression.rb │ └── logistic_regression.rb └── word_embeddings │ ├── word_embedding_1.rb │ └── word_embedding_2.rb ├── spec ├── fixtures │ ├── 0_image.png │ ├── 1_image.png │ ├── add.pbtext.proto │ ├── compare.png │ ├── data.json │ ├── data_stride_2.json │ ├── data_stride_2_grad.json │ ├── data_stride_2_input.json │ ├── expected_grad.json │ ├── expected_grad_2.json │ ├── gradients.pbtxt │ ├── grayscale_image.json │ ├── lg_model.ckpt │ ├── lg_model.ckpt.yaml │ ├── lg_model │ │ ├── model-.ckpt │ │ ├── model.meta │ │ └── model.yaml │ ├── lg_model_freezed.yaml │ ├── linear_regression.pb │ ├── linear_regression.pbtxt │ ├── matmul_graph.pbtxt │ ├── mnist.yaml │ ├── neural_network.pbtxt │ ├── neural_network_raw.py │ ├── ruby_16.png │ ├── ruby_512.png │ ├── sample.pbtxt │ ├── sample_jpeg.jpg │ ├── sample_jpeg_bw.jpg │ ├── tensorflow.proto │ ├── test.graphml │ ├── test.pbtxt.proto │ ├── test.yaml │ ├── test_samples │ │ ├── error.graphml │ │ ├── gradient_sample.graphml │ │ ├── test.py │ │ └── test2.py │ ├── tf.case.pbtext │ ├── ts_add.pbtext.proto │ ├── ts_test_graph_lg.yaml │ ├── ts_test_graph_nn.yaml │ └── ts_test_graph_simple.yaml ├── spec_helper.rb ├── support │ ├── freezer_spec.rb │ ├── images_spec.rb │ ├── nn_ops_spec.rb │ ├── ops_spec.rb │ ├── optimizer_spec.rb │ ├── saver_spec.rb │ ├── supported_op.rb │ └── variable_op_spec.rb └── tensor_stream │ ├── debugging │ └── debugging_spec.rb │ ├── deserializer │ └── protobuf_spec.rb │ ├── device_spec.rb │ ├── evaluators │ └── ruby_evaluator_spec.rb │ ├── examples │ ├── basic_operation_spec.rb │ ├── hello_world_spec.rb │ ├── iris.rb │ └── layers_spec.rb │ ├── graph_spec.rb │ ├── libraries │ └── layers.rb │ ├── math_gradients_spec.rb │ ├── nn_ops_spec.rb │ ├── operation_spec.rb │ ├── profile │ └── report_tool_spec.rb │ ├── serializers │ ├── graphml_spec.rb │ ├── pbtext_spec.rb │ └── yaml_spec.rb │ ├── session_spec.rb │ ├── tensor_shape_spec.rb │ ├── tensor_spec.rb │ ├── tensor_stream_spec.rb │ ├── train │ ├── learning_rate_decay_spec.rb │ └── utils_spec.rb │ └── variable_spec.rb └── tensor_stream.gemspec /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | # Ruby CircleCI 2.0 configuration file 2 | # 3 | # Check https://circleci.com/docs/2.0/language-ruby/ for more details 4 | # 5 | version: 2 6 | jobs: 7 | build: 8 | docker: 9 | # specify the version you desire here 10 | - image: jedld/ruby-circleci-opencl:latest 11 | 12 | # Specify service dependencies here if necessary 13 | # CircleCI maintains a library of pre-built images 14 | # documented at https://circleci.com/docs/2.0/circleci-images/ 15 | # - image: circleci/postgres:9.4 16 | 17 | working_directory: ~/repo 18 | 19 | steps: 20 | - checkout 21 | 22 | # Download and cache dependencies 23 | - restore_cache: 24 | keys: 25 | - v1-dependencies-{{ checksum "Gemfile.lock" }} 26 | # fallback to using the latest cache if no exact match is found 27 | - v1-dependencies- 28 | 29 | - run: 30 | name: install dependencies 31 | command: | 32 | bundle install --jobs=4 --retry=3 --path vendor/bundle 33 | 34 | - save_cache: 35 | paths: 36 | - ./vendor/bundle 37 | key: v1-dependencies-{{ checksum "Gemfile.lock" }} 38 | 39 | # run tests! 40 | - run: 41 | name: run tests 42 | command: | 43 | mkdir /tmp/test-results 44 | TEST_FILES="$(circleci tests glob "spec/**/*_spec.rb" | circleci tests split --split-by=timings)" 45 | 46 | bundle exec rspec --exclude-pattern "spec/tensor_stream/evaluators/opencl_*.rb" \ 47 | -r rspec_junit_formatter --format progress \ 48 | --format RspecJunitFormatter \ 49 | --out /tmp/test-results/rspec.xml \ 50 | --format progress \ 51 | $TEST_FILES 52 | 53 | # collect reports 54 | - store_test_results: 55 | path: /tmp/test-results 56 | - store_artifacts: 57 | path: /tmp/test-results 58 | destination: test-results -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.bundle/ 2 | /.yardoc 3 | /Gemfile.lock 4 | /_yardoc/ 5 | /coverage/ 6 | /doc/ 7 | /pkg/ 8 | /spec/reports/ 9 | /tmp/ 10 | /embeddings/ 11 | *.gem 12 | samples/.ipynb_checkpoints/ 13 | 14 | # rspec failure tracking 15 | .rspec_status 16 | .DS_Store 17 | -------------------------------------------------------------------------------- /.rake_tasks~: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/.rake_tasks~ -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --format documentation 2 | --color 3 | -------------------------------------------------------------------------------- /.rubocop.yml: -------------------------------------------------------------------------------- 1 | AllCops: 2 | TargetRubyVersion: 2.2 3 | Exclude: 4 | - samples/* 5 | - bin/* 6 | - spec/**/* 7 | - tensor_stream.gemspec 8 | - Rakefile 9 | 10 | Naming/AccessorMethodName: 11 | Exclude: 12 | - lib/tensor_stream/utils.rb 13 | 14 | Style/StringLiterals: 15 | Enabled: false 16 | 17 | Layout/TrailingBlankLines: 18 | Enabled: false 19 | 20 | Metrics/LineLength: 21 | Max: 200 22 | 23 | Metrics/AbcSize: 24 | Enabled: false 25 | 26 | Metrics/PerceivedComplexity: 27 | Enabled: false 28 | 29 | Metrics/MethodLength: 30 | Enabled: false 31 | 32 | Metrics/CyclomaticComplexity: 33 | Enabled: false 34 | 35 | Metrics/BlockLength: 36 | Exclude: 37 | - lib/tensor_stream/math_gradients.rb 38 | 39 | Naming/AccessorMethodName: 40 | Exclude: 41 | - lib/tensor_stream.rb 42 | - lib/tensor_stream/control_flow.rb 43 | - lib/tensor_stream/graph.rb 44 | - lib/tensor_stream/operation.rb 45 | 46 | Style/Documentation: 47 | Exclude: 48 | - lib/tensor_stream/version.rb 49 | - lib/tensor_stream/trainer.rb 50 | - lib/tensor_stream/nn/nn_ops.rb 51 | - lib/tensor_stream/evaluator/evaluator.rb 52 | 53 | Lint/UnusedMethodArgument: 54 | Exclude: 55 | - lib/tensor_stream/train/saver.rb 56 | - lib/tensor_stream/ops.rb 57 | 58 | Metrics/ParameterLists: 59 | Max: 8 60 | 61 | Style/PerlBackrefs: 62 | Enabled: false 63 | 64 | Style/RegexpLiteral: 65 | Enabled: false 66 | 67 | Naming/MemoizedInstanceVariableName: 68 | Enabled: false 69 | 70 | Metrics/ModuleLength: 71 | Max: 200 72 | 73 | Metrics/ClassLength: 74 | Max: 250 75 | Exclude: 76 | - lib/tensor_stream/evaluator/ruby_evaluator.rb 77 | 78 | Naming/VariableNumber: 79 | Enabled: false 80 | 81 | Style/DoubleNegation: 82 | Enabled: false 83 | 84 | Style/TrailingCommaInHashLiteral: 85 | Enabled: false 86 | 87 | Naming/UncommunicativeMethodParamName: 88 | Exclude: 89 | - lib/tensor_stream/evaluator/ruby_evaluator.rb 90 | - lib/tensor_stream/ops.rb -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: ruby 3 | rvm: 4 | - 2.4.0 5 | before_install: gem install bundler -v 1.14.6 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at joseph.dayo@gmail.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [http://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: http://contributor-covenant.org 74 | [version]: http://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM circleci/ruby:2.6.1-node-browsers 2 | RUN sudo apt-get update -q && sudo apt-get install --no-install-recommends -yq alien wget unzip clinfo \ 3 | && sudo apt-get clean && sudo rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 4 | RUN export DEVEL_URL="https://software.intel.com/file/531197/download" \ 5 | && sudo wget ${DEVEL_URL} -q -O download.zip --no-check-certificate \ 6 | && sudo unzip download.zip \ 7 | && sudo rm -f download.zip *.tar.xz* \ 8 | && sudo alien --to-deb *dev*.rpm \ 9 | && sudo dpkg -i *dev*.deb \ 10 | && sudo rm *.rpm *.deb 11 | RUN export RUNTIME_URL="http://registrationcenter-download.intel.com/akdlm/irc_nas/9019/opencl_runtime_16.1.1_x64_ubuntu_6.4.0.25.tgz" \ 12 | && export TAR=$(basename ${RUNTIME_URL}) \ 13 | && export DIR=$(basename ${RUNTIME_URL} .tgz) \ 14 | && sudo wget -q ${RUNTIME_URL} \ 15 | && sudo tar -xf ${TAR} \ 16 | && for i in ${DIR}/rpm/*.rpm; do sudo alien --to-deb $i; done \ 17 | && sudo rm -rf ${DIR} ${TAR} \ 18 | && sudo dpkg -i *.deb \ 19 | && sudo rm *.deb 20 | RUN sudo mkdir -p /etc/OpenCL/vendors/ \ 21 | && echo "/opt/intel/opencl-1.2-6.4.0.25/lib64/libintelocl.so" | sudo tee --append /etc/OpenCL/vendors/intel.icd > /dev/null 22 | ENV OCL_INC /opt/intel/opencl/include 23 | ENV OCL_LIB /opt/intel/opencl-1.2-6.4.0.25/lib64 24 | ENV LIBOPENCL_SO /opt/intel/opencl-1.2-6.4.0.25/lib64/libOpenCL.so 25 | ENV LD_LIBRARY_PATH $OCL_LIB:$LD_LIBRARY_PATH 26 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | # Specify your gem's dependencies in cl-brains.gemspec 4 | gemspec 5 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018-2019 Joseph Emmanuel Dayo 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require "bundler/gem_tasks" 2 | require "rspec/core/rake_task" 3 | require "rdoc/task" 4 | 5 | RSpec::Core::RakeTask.new(:spec) 6 | 7 | task default: :spec 8 | 9 | RDoc::Task.new do |rdoc| 10 | rdoc.main = "README.rdoc" 11 | rdoc.rdoc_files.include("README.rdoc", "lib /*.rb") 12 | end 13 | -------------------------------------------------------------------------------- /benchmark_intel.txt: -------------------------------------------------------------------------------- 1 | TensorStream::Evaluator::OpenclEvaluator 2 | TensorStream::Evaluator::RubyEvaluator 3 | model name : Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz 4 | OpenCL device Intel Gen OCL Driver Intel(R) HD Graphics Skylake ULT GT2 5 | Rehearsal -------------------------------------------------------------- 6 | pure ruby ooo matmul : 1.800000 0.000000 1.800000 ( 1.803752) 7 | opencl ooo matmul : 0.520000 0.050000 0.570000 ( 0.630992) 8 | pure ruby softmax : 0.300000 0.000000 0.300000 ( 0.303185) 9 | opencl softmax : 0.180000 0.010000 0.190000 ( 0.200246) 10 | pure ruby matmul : 0.860000 0.010000 0.870000 ( 0.869387) 11 | opencl matmul : 0.260000 0.020000 0.280000 ( 0.335164) 12 | pure ruby : 2.960000 0.020000 2.980000 ( 2.980800) 13 | opencl : 1.050000 0.090000 1.140000 ( 1.258354) 14 | pure ruby single function: 0.460000 0.000000 0.460000 ( 0.464543) 15 | opencl singlefunction: 0.570000 0.020000 0.590000 ( 0.590300) 16 | pure ruby pow float: 0.120000 0.000000 0.120000 ( 0.123025) 17 | opencl pow float: 0.290000 0.010000 0.300000 ( 0.316175) 18 | pure ruby pow int: 0.020000 0.000000 0.020000 ( 0.021570) 19 | opencl pow int: 0.180000 0.000000 0.180000 ( 0.194088) 20 | ----------------------------------------------------- total: 9.800000sec 21 | 22 | user system total real 23 | pure ruby ooo matmul : 1.860000 0.000000 1.860000 ( 1.866387) 24 | opencl ooo matmul : 0.410000 0.040000 0.450000 ( 0.505565) 25 | pure ruby softmax : 0.300000 0.000000 0.300000 ( 0.298407) 26 | opencl softmax : 0.120000 0.000000 0.120000 ( 0.128033) 27 | pure ruby matmul : 0.830000 0.000000 0.830000 ( 0.836471) 28 | opencl matmul : 0.240000 0.010000 0.250000 ( 0.269629) 29 | pure ruby : 2.950000 0.000000 2.950000 ( 2.947306) 30 | opencl : 0.930000 0.100000 1.030000 ( 1.205344) 31 | pure ruby single function: 0.650000 0.000000 0.650000 ( 0.642834) 32 | opencl singlefunction: 0.840000 0.040000 0.880000 ( 1.097814) 33 | pure ruby pow float: 0.140000 0.000000 0.140000 ( 0.140097) 34 | opencl pow float: 0.190000 0.010000 0.200000 ( 0.269772) 35 | pure ruby pow int: 0.030000 0.000000 0.030000 ( 0.030491) 36 | opencl pow int: 0.040000 0.010000 0.050000 ( 0.084335) 37 | -------------------------------------------------------------------------------- /benchmark_nvidia.txt: -------------------------------------------------------------------------------- 1 | TensorStream::Evaluator::OpenclEvaluator 2 | TensorStream::Evaluator::RubyEvaluator 3 | model name : Intel(R) Core(TM) i5-6200U CPU @ 2.30GHz 4 | OpenCL device NVIDIA CUDA GeForce GTX 950M 5 | Rehearsal -------------------------------------------------------------- 6 | pure ruby ooo matmul : 1.670000 0.010000 1.680000 ( 1.682059) 7 | opencl ooo matmul : 0.100000 0.100000 0.200000 ( 0.220002) 8 | pure ruby softmax : 0.380000 0.010000 0.390000 ( 0.377827) 9 | opencl softmax : 0.040000 0.000000 0.040000 ( 0.040750) 10 | pure ruby matmul : 1.000000 0.010000 1.010000 ( 1.013795) 11 | opencl matmul : 0.040000 0.000000 0.040000 ( 0.032285) 12 | pure ruby : 3.460000 0.010000 3.470000 ( 3.486048) 13 | opencl : 0.320000 0.020000 0.340000 ( 0.326977) 14 | pure ruby single function: 0.460000 0.000000 0.460000 ( 0.460433) 15 | opencl singlefunction: 0.130000 0.000000 0.130000 ( 0.130273) 16 | pure ruby pow float: 0.110000 0.000000 0.110000 ( 0.115466) 17 | opencl pow float: 0.040000 0.010000 0.050000 ( 0.030290) 18 | pure ruby pow int: 0.020000 0.000000 0.020000 ( 0.023065) 19 | opencl pow int: 0.040000 0.010000 0.050000 ( 0.044086) 20 | ----------------------------------------------------- total: 7.990000sec 21 | 22 | user system total real 23 | pure ruby ooo matmul : 1.790000 0.000000 1.790000 ( 1.794305) 24 | opencl ooo matmul : 0.050000 0.000000 0.050000 ( 0.049030) 25 | pure ruby softmax : 0.300000 0.000000 0.300000 ( 0.305664) 26 | opencl softmax : 0.030000 0.000000 0.030000 ( 0.021897) 27 | pure ruby matmul : 0.810000 0.000000 0.810000 ( 0.805583) 28 | opencl matmul : 0.030000 0.000000 0.030000 ( 0.024358) 29 | pure ruby : 2.870000 0.010000 2.880000 ( 2.881779) 30 | opencl : 0.170000 0.000000 0.170000 ( 0.173036) 31 | pure ruby single function: 0.400000 0.000000 0.400000 ( 0.398390) 32 | opencl singlefunction: 0.120000 0.000000 0.120000 ( 0.117482) 33 | pure ruby pow float: 0.100000 0.000000 0.100000 ( 0.099471) 34 | opencl pow float: 0.030000 0.000000 0.030000 ( 0.025039) 35 | pure ruby pow int: 0.030000 0.000000 0.030000 ( 0.028251) 36 | opencl pow int: 0.040000 0.000000 0.040000 ( 0.031384) 37 | -------------------------------------------------------------------------------- /benchmark_ryzen_amd.txt: -------------------------------------------------------------------------------- 1 | TensorStream::Evaluator::OpenclEvaluator 2 | TensorStream::Evaluator::RubyEvaluator 3 | model name : AMD Ryzen 3 1300X Quad-Core Processor 4 | OpenCL device AMD Accelerated Parallel Processing Ellesmere 5 | Rehearsal -------------------------------------------------------------- 6 | pure ruby ooo matmul : 1.480000 0.000000 1.480000 ( 1.486855) 7 | opencl ooo matmul : 0.190000 0.130000 0.320000 ( 0.332605) 8 | pure ruby softmax : 0.280000 0.000000 0.280000 ( 0.278398) 9 | opencl softmax : 0.040000 0.020000 0.060000 ( 0.070980) 10 | pure ruby matmul : 0.730000 0.000000 0.730000 ( 0.726565) 11 | opencl matmul : 0.020000 0.010000 0.030000 ( 0.046762) 12 | pure ruby : 2.550000 0.000000 2.550000 ( 2.544265) 13 | opencl : 0.290000 0.020000 0.310000 ( 0.318674) 14 | pure ruby single function: 0.370000 0.000000 0.370000 ( 0.374805) 15 | opencl singlefunction: 0.190000 0.050000 0.240000 ( 0.239913) 16 | pure ruby pow float: 0.090000 0.000000 0.090000 ( 0.093488) 17 | opencl pow float: 0.100000 0.010000 0.110000 ( 0.110532) 18 | pure ruby pow int: 0.030000 0.000000 0.030000 ( 0.022236) 19 | opencl pow int: 0.090000 0.010000 0.100000 ( 0.111199) 20 | ----------------------------------------------------- total: 6.700000sec 21 | 22 | user system total real 23 | pure ruby ooo matmul : 1.460000 0.000000 1.460000 ( 1.468597) 24 | opencl ooo matmul : 0.040000 0.000000 0.040000 ( 0.053625) 25 | pure ruby softmax : 0.280000 0.000000 0.280000 ( 0.280252) 26 | opencl softmax : 0.020000 0.010000 0.030000 ( 0.043143) 27 | pure ruby matmul : 0.700000 0.000000 0.700000 ( 0.703540) 28 | opencl matmul : 0.030000 0.000000 0.030000 ( 0.037716) 29 | pure ruby : 2.540000 0.000000 2.540000 ( 2.539661) 30 | opencl : 0.150000 0.000000 0.150000 ( 0.164203) 31 | pure ruby single function: 0.350000 0.000000 0.350000 ( 0.351883) 32 | opencl singlefunction: 0.090000 0.010000 0.100000 ( 0.092359) 33 | pure ruby pow float: 0.080000 0.000000 0.080000 ( 0.080484) 34 | opencl pow float: 0.030000 0.000000 0.030000 ( 0.032691) 35 | pure ruby pow int: 0.020000 0.000000 0.020000 ( 0.019487) 36 | opencl pow int: 0.020000 0.000000 0.020000 ( 0.026782) 37 | -------------------------------------------------------------------------------- /bin/console: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "bundler/setup" 4 | require "tensor_stream" 5 | 6 | # You can add fixtures and/or initialization code here to make experimenting 7 | # with your gem easier. You can also use a different console, if you like. 8 | 9 | # (If you use this, don't forget to add pry to your Gemfile!) 10 | # require "pry" 11 | # Pry.start 12 | 13 | require "irb" 14 | IRB.start(__FILE__) 15 | -------------------------------------------------------------------------------- /bin/setup: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | IFS=$'\n\t' 4 | set -vx 5 | 6 | bundle install 7 | 8 | # Do any other automated setup that you need to do here 9 | -------------------------------------------------------------------------------- /bin/stubgen: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | # Script to auto generate op stub file from the opdef folder 4 | 5 | require "bundler/setup" 6 | require "tensor_stream" 7 | require "erb" 8 | 9 | target = File.join(__dir__, '..', 'lib', 'tensor_stream', 'generated_stub') 10 | 11 | FileUtils.mkdir_p(target) 12 | 13 | stub_file = File.join(target, 'ops.rb') 14 | File.delete(stub_file) if File.exist?(stub_file) 15 | 16 | f = File.open(stub_file, 'wb') 17 | 18 | template = File.read(File.join(target, 'stub_file.erb')) 19 | f << ERB.new(template, nil, '%').result(binding) 20 | f.close -------------------------------------------------------------------------------- /exe/model_utils: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "bundler/setup" 4 | require "tensor_stream" 5 | require "tensor_stream/utils/freezer" 6 | 7 | if ARGV[0].nil? 8 | puts "source checkpoint folder not specified" 9 | puts "usage: model_utils " 10 | puts "example: model_utils sample_model/ frozen.yml" 11 | exit(1) 12 | end 13 | 14 | if ARGV[1].nil? 15 | puts "dest YAML file for frozen model not specified" 16 | puts "usage: model_utils " 17 | puts "example: model_utils sample_model/ frozen.yml" 18 | exit(1) 19 | end 20 | 21 | sess = TensorStream.session 22 | freezer = TensorStream::Freezer.new 23 | freezer.convert(sess, ARGV[0], ARGV[1]) 24 | exit(0) 25 | -------------------------------------------------------------------------------- /lib/tensor_stream.rb: -------------------------------------------------------------------------------- 1 | require "tensor_stream/version" 2 | require "deep_merge" 3 | require "matrix" 4 | require "concurrent" 5 | require "tensor_stream/utils/data_type_utils" 6 | require "tensor_stream/exceptions" 7 | require "tensor_stream/helpers/op_helper" 8 | require "tensor_stream/helpers/string_helper" 9 | require "tensor_stream/initializer" 10 | require "tensor_stream/graph_keys" 11 | require "tensor_stream/types" 12 | require "tensor_stream/graph_builder" 13 | require "tensor_stream/graph" 14 | require "tensor_stream/device" 15 | require "tensor_stream/session" 16 | require "tensor_stream/tensor_shape" 17 | require "tensor_stream/helpers/tensor_mixins" 18 | require "tensor_stream/tensor" 19 | require "tensor_stream/constant" 20 | require "tensor_stream/variable" 21 | require "tensor_stream/variable_scope" 22 | require "tensor_stream/operation" 23 | require "tensor_stream/placeholder" 24 | require "tensor_stream/control_flow" 25 | require "tensor_stream/dynamic_stitch" 26 | require "tensor_stream/math/math_ops" 27 | require "tensor_stream/nn/nn_ops" 28 | require "tensor_stream/evaluator/evaluator" 29 | require "tensor_stream/graph_serializers/packer" 30 | require "tensor_stream/graph_serializers/serializer" 31 | require "tensor_stream/graph_deserializers/protobuf" 32 | require "tensor_stream/graph_deserializers/yaml_loader" 33 | require "tensor_stream/graph_serializers/pbtext" 34 | require "tensor_stream/graph_serializers/graphml" 35 | require "tensor_stream/graph_serializers/yaml" 36 | require "tensor_stream/math_gradients" 37 | require "tensor_stream/debugging/debugging" 38 | require "tensor_stream/utils" 39 | require "tensor_stream/train/utils" 40 | require "tensor_stream/images" 41 | 42 | require "tensor_stream/profile/report_tool" 43 | 44 | # require 'tensor_stream/libraries/layers' 45 | require "tensor_stream/monkey_patches/patch" 46 | require "tensor_stream/monkey_patches/integer" 47 | require "tensor_stream/monkey_patches/float" 48 | require "tensor_stream/monkey_patches/array" 49 | require "tensor_stream/ops" 50 | require "tensor_stream/trainer" 51 | require "tensor_stream/op_maker" 52 | 53 | # module that exposes TensorStream top level functions 54 | module TensorStream 55 | extend TensorStream::OpHelper 56 | extend TensorStream::Ops 57 | extend TensorStream::Debugging 58 | extend TensorStream::Utils 59 | 60 | def self.__version__ 61 | TensorStream::VERSION 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /lib/tensor_stream/constant.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Class that defines a TensorStream variable 3 | class Constant < Tensor 4 | def initialize(data_type, rank, shape, options = {}) 5 | setup_initial_state(options) 6 | @data_type = data_type 7 | @rank = rank 8 | @breakpoint = false 9 | @shape = TensorShape.new(shape, rank) 10 | @value = nil 11 | @options = options 12 | @is_const = true 13 | @internal = options[:internal] 14 | @name = [@graph.get_name_scope, options[:name] || build_name].compact.reject(&:empty?).join("/") 15 | @given_name = @name 16 | 17 | if options[:value] 18 | if options[:value].is_a?(Array) 19 | # check if single dimenstion array is passed 20 | options[:value] = _reshape(options[:value], shape.reverse.dup) if shape.size >= 2 && !options[:value].empty? && !options[:value][0].is_a?(Array) 21 | 22 | @value = options[:value].map { |v| v.is_a?(Tensor) ? Tensor.cast_dtype(v, @data_type) : v } 23 | elsif !shape.empty? 24 | @value = _reshape(Tensor.cast_dtype(options[:value], @data_type), shape.dup) 25 | else 26 | @value = Tensor.cast_dtype(options[:value], @data_type) 27 | end 28 | @shape = TensorShape.new(shape_eval(@value)) 29 | end 30 | 31 | @op = Graph.get_default_graph.add_op!(:const, value: @value, data_type: @data_type, internal_name: @name, shape: @shape) 32 | @name = @op.name 33 | end 34 | 35 | def inspect 36 | "Constant(#{@value}, name: #{@name}, shape: #{@shape}, data_type: #{@data_type})" 37 | end 38 | 39 | protected 40 | 41 | def build_name 42 | "Const" 43 | end 44 | end 45 | end 46 | -------------------------------------------------------------------------------- /lib/tensor_stream/control_flow.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Defines a TensorStream controlflow op 3 | class ControlFlow < Operation 4 | attr_accessor :ops 5 | 6 | def initialize(flow_type, inputs, ops = nil, options = {}) 7 | setup_initial_state(options) 8 | @options = options 9 | @operation = :"flow_#{flow_type}" 10 | @inputs = inputs 11 | @name = [@graph.get_name_scope, options[:name] || set_name].compact.join("/") 12 | @ops = ops 13 | @consumers = Set.new 14 | @shape = TensorShape.new([inputs.size]) 15 | @graph.add_node(self) 16 | end 17 | 18 | def set_data_type(_passed_data_type) 19 | :unknown 20 | end 21 | 22 | def run 23 | eval 24 | end 25 | end 26 | end 27 | -------------------------------------------------------------------------------- /lib/tensor_stream/debugging/debugging.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Debugging 3 | extend TensorStream::OpHelper 4 | 5 | def add_check_numerics_ops 6 | graph = TensorStream.get_default_graph 7 | nodes_to_process = graph.nodes.values.select { |node| node.is_a?(Operation) } 8 | 9 | nodes_to_process.each do |node| 10 | node.inputs = node.inputs.collect do |input| 11 | next if input.nil? 12 | next input if input.is_a?(Variable) 13 | 14 | if input.is_a?(Tensor) && TensorStream::Ops::FLOATING_POINT_TYPES.include?(input.data_type) 15 | TensorStream.check_numerics(input, "#{node.name}/#{input.name}", name: "check/#{node.name}/#{input.name}") 16 | else 17 | input 18 | end 19 | end 20 | end 21 | end 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /lib/tensor_stream/device.rb: -------------------------------------------------------------------------------- 1 | # A tensorstream device 2 | module TensorStream 3 | # Class that describes a supported device 4 | class Device 5 | attr_accessor :name, :type, :evaluator 6 | def initialize(name, type, evaluator) 7 | @name = name 8 | @type = type 9 | @evaluator = evaluator 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /lib/tensor_stream/dynamic_stitch.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Defines a TensorStream controlflow op 3 | class DynamicStitch < Operation 4 | attr_accessor :ops 5 | 6 | def initialize(flow_type, inputs, ops = nil, options = {}) 7 | setup_initial_state(options) 8 | 9 | @operation = :"flow_#{flow_type}" 10 | @options = options.merge(n: inputs[0].size) 11 | @inputs = inputs.flatten(1).map { |i| TensorStream.convert_to_tensor(i) }.map { |i| i ? i.op : nil } 12 | 13 | @consumers = Set.new 14 | @data_type = Tensor.detect_type(inputs[1]) 15 | @name = [@graph.get_name_scope, options[:name] || set_name].compact.join("/") 16 | @ops = ops 17 | @shape = TensorShape.new(nil) 18 | @graph.add_node(self) 19 | end 20 | 21 | def set_data_type(_passed_data_type) 22 | :unknown 23 | end 24 | 25 | def run 26 | eval 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/buffer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # this class represents an evaluator specific native buffer 3 | class Buffer 4 | attr_accessor :data_type, :buffer, :dirty, :name 5 | 6 | def initialize(data_type:, buffer:) 7 | @data_type = data_type 8 | @buffer = buffer 9 | end 10 | 11 | def to_ruby 12 | buffer 13 | end 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/evaluator.rb: -------------------------------------------------------------------------------- 1 | require "tensor_stream/evaluator/ruby_evaluator" 2 | require "tensor_stream/evaluator/buffer" 3 | require "tensor_stream/evaluator/evaluator_utils" 4 | 5 | module TensorStream 6 | module Evaluator 7 | end 8 | end 9 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/evaluator_utils.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class EvaluatorUtils 3 | extend TensorStream::StringHelper 4 | 5 | def self.get_evaluator_classes(evaluators) 6 | @evaluator_classes ||= if evaluators.is_a?(Array) 7 | if evaluators.empty? 8 | TensorStream::Evaluator.default_evaluators 9 | else 10 | evaluators.collect { |name| Object.const_get("TensorStream::Evaluator::#{camelize(name.to_s)}") } 11 | end 12 | elsif evaluators.nil? 13 | TensorStream::Evaluator.default_evaluators 14 | else 15 | [Object.const_get("TensorStream::Evaluator::#{camelize(evaluators.to_s)}")] 16 | end 17 | @evaluator_classes 18 | end 19 | end 20 | end -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/operation_helpers/math_helper.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # varoius utility functions for array processing 3 | module MathHelper 4 | # Calculates value of y = 1.0 / ( 1.0 + exp( -x ) ) 5 | def sigmoid(val) 6 | 1 / (1 + Math.exp(-val)) 7 | end 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/operation_helpers/random_gaussian.rb: -------------------------------------------------------------------------------- 1 | # http://creativecommons.org/publicdomain/zero/1.0/ 2 | class RandomGaussian 3 | def initialize(mean, stddev, rand_helper = -> { Kernel.rand }) 4 | @rand_helper = rand_helper 5 | @mean = mean 6 | @stddev = stddev 7 | @valid = false 8 | @next = 0 9 | end 10 | 11 | def rand 12 | if @valid 13 | @valid = false 14 | @next 15 | else 16 | @valid = true 17 | x, y = self.class.gaussian(@mean, @stddev, @rand_helper) 18 | @next = y 19 | x 20 | end 21 | end 22 | 23 | def self.gaussian(mean, stddev, rand) 24 | theta = 2 * Math::PI * rand.call 25 | rho = Math.sqrt(-2 * Math.log(1 - rand.call)) 26 | scale = stddev * rho 27 | x = mean + scale * Math.cos(theta) 28 | y = mean + scale * Math.sin(theta) 29 | [x, y] 30 | end 31 | end 32 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/ruby/check_ops.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module CheckOps 3 | def self.included(klass) 4 | klass.class_eval do 5 | register_op :assert_equal do |context, tensor, inputs| 6 | result = call_vector_op(tensor, :equal, inputs[0], inputs[1], context) { |t, u| t == u } 7 | 8 | result = result.is_a?(Array) ? result.flatten.uniq : [result] 9 | prefix = tensor.options[:message] || "" 10 | raise TensorStream::InvalidArgumentError, "#{prefix} #{tensor.inputs[0].name} != #{tensor.inputs[1].name}" if result != [true] 11 | 12 | nil 13 | end 14 | end 15 | end 16 | end 17 | end 18 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/ruby/random_ops.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | ## Collection of machine learning related ops 3 | module RandomOps 4 | def self.included(klass) 5 | klass.class_eval do 6 | register_op :glorot_uniform, no_eval: true do |_context, tensor, _inputs| 7 | seed = tensor.options[:seed] 8 | random = _get_randomizer(tensor, seed) 9 | 10 | shape = tensor.options[:shape] || tensor.shape.shape 11 | fan_in, fan_out = if shape.size.zero? 12 | [1, 1] 13 | elsif shape.size == 1 14 | [1, shape[0]] 15 | else 16 | [shape[0], shape.last] 17 | end 18 | 19 | limit = Math.sqrt(6.0 / (fan_in + fan_out)) 20 | 21 | minval = -limit 22 | maxval = limit 23 | 24 | generator = -> { random.rand * (maxval - minval) + minval } 25 | generate_vector(shape, generator: generator) 26 | end 27 | 28 | register_op :random_uniform, no_eval: true do |_context, tensor, inputs| 29 | maxval = tensor.options.fetch(:maxval, 1) 30 | minval = tensor.options.fetch(:minval, 0) 31 | seed = tensor.options[:seed] 32 | 33 | random = _get_randomizer(tensor, seed) 34 | generator = -> { random.rand * (maxval - minval) + minval } 35 | shape = inputs[0] || tensor.shape.shape 36 | generate_vector(shape, generator: generator) 37 | end 38 | 39 | register_op :random_standard_normal, no_eval: true do |_context, tensor, inputs| 40 | seed = tensor.options[:seed] 41 | random = _get_randomizer(tensor, seed) 42 | r = RandomGaussian.new(tensor.options.fetch(:mean), tensor.options.fetch(:stddev), -> { random.rand }) 43 | random = _get_randomizer(tensor, seed) 44 | generator = -> { r.rand } 45 | shape = inputs[0] || tensor.shape.shape 46 | generate_vector(shape, generator: generator) 47 | end 48 | 49 | register_op :truncated_normal, no_eval: true do |_context, tensor, inputs| 50 | seed = tensor.options[:seed] 51 | random = _get_randomizer(tensor, seed) 52 | r = RandomGaussian.new(tensor.options.fetch(:mean), tensor.options.fetch(:stddev), -> { random.rand }) 53 | 54 | generator = -> { r.rand } 55 | shape = inputs[0] || tensor.shape.shape 56 | random_values = Array.new(shape.reduce(:*) || 1) { 57 | generator.call 58 | } 59 | mean = random_values.reduce(:+) / random_values.size 60 | 61 | # standard deviation 62 | 63 | stddev = Math.sqrt(random_values.map { |v| (v - mean)**2 }.reduce(:+) / (random_values.size - 1)) 64 | minval = random_values.min 65 | maxval = random_values.max 66 | max_iterations = 100 67 | 68 | if (minval.infinite? && minval < 0.0) || (maxval < mean) 69 | # Reverse all calculations. normMin and normMax will be flipped. 70 | a = minval 71 | minval = maxval 72 | maxval = a 73 | stddev = -stddev 74 | end 75 | 76 | norm_min = (minval - mean) / stddev 77 | norm_max = (maxval - mean) / stddev 78 | 79 | val = random_values.map { |v| 80 | iterations = 0 81 | pick = v 82 | while (pick > norm_max) || (pick < norm_min) 83 | pick = generator.call 84 | iterations += 1 85 | if iterations > max_iterations 86 | pick = v 87 | break 88 | end 89 | end 90 | 91 | pick 92 | } 93 | TensorShape.reshape(val, shape) 94 | end 95 | end 96 | end 97 | end 98 | end 99 | -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/ruby/storage_manager.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class RubyStorageManager 3 | def self.current_storage_manager 4 | @storage_manager ||= RubyStorageManager.new 5 | end 6 | 7 | def initialize 8 | @variables = {} 9 | end 10 | 11 | def exists?(graph, name) 12 | return false if !@variables.key?(graph.object_id) 13 | 14 | @variables[graph.object_id].key?(name.to_sym) 15 | end 16 | 17 | def create_variable(graph, name, value) 18 | raise "no name specified" if name.nil? 19 | 20 | @variables[graph.object_id][name.to_sym] = value 21 | end 22 | 23 | def assign_value(graph, name, value) 24 | raise "no name specified" if name.nil? 25 | 26 | @variables[graph.object_id] ||= {} 27 | @variables[graph.object_id][name.to_sym] = value 28 | end 29 | 30 | def read_value(graph, name) 31 | raise "no name specified" if name.nil? 32 | 33 | @variables[graph.object_id][name.to_sym] 34 | end 35 | 36 | def clear_variables(graph) 37 | @variables[graph.object_id] = {} 38 | end 39 | end 40 | end -------------------------------------------------------------------------------- /lib/tensor_stream/evaluator/ruby/variable_ops.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | ## Collection of machine learning related ops 3 | module VariableOps 4 | def self.included(klass) 5 | klass.class_eval do 6 | register_op :variable_v2 do |_context, tensor, _inputs| 7 | value = var_read_value(tensor) 8 | raise "variable #{tensor.options[:var_name]} not initalized" if value.nil? 9 | 10 | value 11 | end 12 | 13 | register_op :assign do |context, tensor, inputs| 14 | var_assign_value(tensor, inputs[0]) 15 | end 16 | 17 | register_op :assign_add, no_eval: true do |context, tensor, inputs| 18 | current_val = var_read_value(tensor) 19 | 20 | raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil? 21 | eval_a, eval_b = broadcast(current_val, inputs[0]) 22 | result = multi_array_op(->(var, val) { var + val }, eval_a, eval_b) 23 | var_assign_value(tensor, result) 24 | end 25 | 26 | register_op :assign_sub do |context, tensor, inputs| 27 | current_val = var_read_value(tensor) 28 | raise "variable #{tensor.options[:var_name]} not initialized" if current_val.nil? 29 | eval_a, eval_b = broadcast(current_val, inputs[0]) 30 | result = multi_array_op(->(var, val) { var - val }, eval_a, eval_b) 31 | var_assign_value(tensor, result) 32 | end 33 | 34 | register_op :save_ts do |_context, tensor, inputs| 35 | outputfile = inputs[0] 36 | inputs = tensor.inputs.dup 37 | 38 | inputs.shift 39 | variables = {} 40 | inputs.each do |savable| 41 | val = var_read_value(savable) 42 | 43 | packed_data = Zlib::Deflate.deflate(TensorStream::Packer.pack(val, savable.data_type)) 44 | variables[savable.options[:var_name]] = { 45 | "shape" => shape_eval(val), 46 | "data" => Base64.strict_encode64(packed_data), 47 | } 48 | end 49 | 50 | File.write(outputfile, {"variables" => variables}.to_yaml) 51 | nil 52 | end 53 | 54 | register_op :restore_ts do |_context, tensor, inputs| 55 | inputs = inputs.dup 56 | filename = inputs.shift 57 | tensor_names = inputs 58 | 59 | input_dump = YAML.safe_load(File.read(filename), [Symbol]) 60 | vars = tensor.graph.get_collection(GraphKeys::GLOBAL_VARIABLES) 61 | vars.select! { |v| input_dump["variables"].key?(v.name) && tensor_names.include?(v.name) } 62 | vars.each do |variable| 63 | data = TensorStream::Packer.unpack(Zlib::Inflate.inflate(Base64.decode64(input_dump["variables"][variable.name]["data"])), variable.data_type) 64 | shape = input_dump["variables"][variable.name]["shape"] 65 | variable.buffer = nil 66 | var_assign_value(variable, TensorShape.reshape(data, shape)) 67 | end 68 | 69 | nil 70 | end 71 | end 72 | end 73 | end 74 | end -------------------------------------------------------------------------------- /lib/tensor_stream/exceptions.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class TensorStreamError < RuntimeError; end 3 | class KeyError < TensorStreamError; end 4 | class ValueError < TensorStreamError; end 5 | class InvalidArgumentError < TensorStreamError; end 6 | class NotImplementedError < TensorStreamError; end 7 | end 8 | -------------------------------------------------------------------------------- /lib/tensor_stream/generated_stub/stub_file.erb: -------------------------------------------------------------------------------- 1 | # This file has ben automatically generated by stubgen 2 | # DO NOT EDIT 3 | # 4 | module TensorStream 5 | module OpStub 6 | <% TensorStream::OpMaker.each_op do |op|%> 7 | ## 8 | <% op.description_lines.each do |line|%> # <%= line %> 9 | <%end%> # 10 | #<% if op.supports_broadcasting? %> This operation supports broadcasting 11 | #<% end %> 12 | <% op.parameters.each do |param| %> # @param <%= param[:name] %> <%= param[:description]%><%if param[:validate]%> (of type <%= param[:validate] %>)<%end%> 13 | <% end %> # 14 | # Options: 15 | <% op.options.each do |k, v| %> # @option <%= k %> <%= v[:description]%><% if v[:default_value] != :nil %> default (<%= v[:default_value] %>)<%end%> 16 | <%end%> # @return Tensor 17 | def <%= op.operation.to_s %>(<%= (op.expand_params(true) + op.expand_options(true)).join(', ') %>) 18 | <%= op.generate_body %> 19 | end 20 | <% op.aliases.each do |a|%> 21 | alias_method :<%= a %>, :<%= op.operation %><%end%> 22 | <% end %> 23 | end 24 | end -------------------------------------------------------------------------------- /lib/tensor_stream/graph_builder.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class GraphBuilder 3 | include TensorStream::OpHelper 4 | include TensorStream::StringHelper 5 | 6 | attr_accessor :graph 7 | 8 | def initialize(graph) 9 | @graph = graph 10 | end 11 | 12 | def build(buffer) 13 | protobuf = TensorStream::Protobuf.new 14 | parsed_tree = protobuf.load_from_string(buffer) 15 | parsed_tree.each do |node| 16 | next unless node["type"] == "node" 17 | 18 | # puts "build #{node['name']}" 19 | options = protobuf.options_evaluator(node) 20 | options[:name] = node["name"] 21 | options[:__graph] = @graph 22 | value = options.delete("value") 23 | options = symbolize_keys(options) 24 | case node["op"] 25 | when "Const" 26 | dimension = shape_eval(value) 27 | rank = dimension.size 28 | options[:value] = value 29 | options[:const] = true 30 | TensorStream::Constant.new(options[:dtype] || options[:T], rank, dimension, options) 31 | when "VariableV2" 32 | # evaluate options 33 | shape = options[:shape] 34 | i_var(options[:dtype] || options[:T], nil, shape, nil, options) 35 | when "Placeholder" 36 | shape = options[:shape] 37 | TensorStream::Placeholder.new(options[:dtype] || options[:T], nil, shape, options) 38 | else 39 | op = underscore(node["op"]).to_sym 40 | puts "warning unsupported op #{op}" unless TensorStream::Evaluator::RubyEvaluator.ops.key?(op) 41 | 42 | # map input tensor 43 | inputs = node["input"].map { |input| 44 | input[0] = "" if input.start_with?("^") 45 | 46 | input_indexed, index = input.split(":") 47 | 48 | tensor = if index && index.to_i > 0 49 | @graph.get_tensor_by_name(input_indexed)[index.to_i] 50 | else 51 | @graph.get_tensor_by_name(input) 52 | end 53 | 54 | raise "tensor not found by name #{input}" if tensor.nil? 55 | 56 | tensor 57 | } 58 | 59 | options[:data_type] = options.delete(:T) 60 | Graph.get_default_graph.add_op!(op, *inputs, options) 61 | end 62 | end 63 | 64 | @graph 65 | end 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /lib/tensor_stream/graph_deserializers/yaml_loader.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | ## 3 | # Class for deserialization from a YAML file 4 | class YamlLoader 5 | def initialize(graph = nil) 6 | @graph = graph || TensorStream.get_default_graph 7 | end 8 | 9 | ## 10 | # Loads a model Yaml file and builds the model from it 11 | # 12 | # Args: 13 | # filename: String - Location of Yaml file 14 | # 15 | # Returns: Graph where model is restored to 16 | def load_from_file(filename) 17 | load_from_string(File.read(filename)) 18 | end 19 | 20 | ## 21 | # Loads a model Yaml file and builds the model from it 22 | # 23 | # Args: 24 | # buffer: String - String in Yaml format of the model 25 | # 26 | # Returns: Graph where model is restored to 27 | def load_from_string(buffer) 28 | serialized_ops = YAML.safe_load(buffer, [Symbol], [], true) 29 | serialized_ops.each do |op_def| 30 | inputs = op_def[:inputs].map { |i| @graph.get_tensor_by_name(i) } 31 | options = {} 32 | 33 | new_var = nil 34 | if op_def[:op].to_sym == :variable_v2 35 | new_var = Variable.new(op_def.dig(:attrs, :data_type)) 36 | 37 | var_options = {} 38 | var_options[:name] = op_def.dig(:attrs, :var_name) 39 | 40 | new_var.prepare(nil, nil, TensorStream.get_variable_scope, var_options) 41 | @graph.add_variable(new_var, var_options) 42 | end 43 | 44 | new_op = Operation.new(@graph, inputs: inputs, options: op_def[:attrs].merge(options)) 45 | new_op.operation = op_def[:op].to_sym 46 | new_op.name = op_def[:name] 47 | new_op.shape = TensorShape.new(TensorStream::InferShape.infer_shape(new_op)) 48 | new_op.rank = new_op.shape.rank 49 | new_op.data_type = new_op.set_data_type(op_def.dig(:attrs, :data_type)) 50 | new_op.is_const = new_op.infer_const 51 | new_op.given_name = new_op.name 52 | new_var.op = new_op if new_var 53 | 54 | @graph.add_node(new_op) 55 | end 56 | @graph 57 | end 58 | end 59 | end 60 | -------------------------------------------------------------------------------- /lib/tensor_stream/graph_keys.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class GraphKeys 3 | GLOBAL_VARIABLES = "variables".freeze 4 | TRAINABLE_VARIABLES = "trainable_variables".freeze 5 | GLOBAL_STEP = "global_step".freeze 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /lib/tensor_stream/graph_serializers/packer.rb: -------------------------------------------------------------------------------- 1 | require "base64" 2 | 3 | module TensorStream 4 | # Utility class to handle data type serialization 5 | class Packer 6 | def self.pack(value, data_type) 7 | value = value.is_a?(Array) ? value.flatten : [value] 8 | byte_value = case data_type 9 | when :float64 10 | value.pack("d*") 11 | when :float32, :float16, :float 12 | value.pack("f*") 13 | when :uint32 14 | value.pack("L*") 15 | when :int32, :int 16 | value.pack("l*") 17 | when :int64 18 | value.pack("q*") 19 | when :uint64 20 | value.pack("Q*") 21 | when :uint8 22 | value.pack("C*") 23 | when :boolean 24 | value.map { |v| v ? 1 : 0 }.pack("C*") 25 | when :string 26 | if value.is_a?(Array) 27 | value.to_yaml 28 | else 29 | value 30 | end 31 | else 32 | raise "unknown type #{data_type}" 33 | end 34 | 35 | byte_value 36 | end 37 | 38 | def self.pack_to_str(value, data_type) 39 | pack(value, data_type).bytes.map { |b| /[^[:print:]]/.match?(b.chr) ? "\\#{sprintf("%o", b).rjust(3, "0")}" : b.chr }.join 40 | end 41 | 42 | def self.unpack_from_str(content, data_type) 43 | unpacked = eval(%("#{content}"), __FILE__) 44 | unpack(unpacked, data_type) 45 | end 46 | 47 | def self.unpack(unpacked, data_type) 48 | case data_type 49 | when :float32, :float, :float16 50 | unpacked.unpack("f*") 51 | when :float64 52 | unpacked.unpack("d*") 53 | when :int32, :int 54 | unpacked.unpack("L*") 55 | when :uint32 56 | unpacked.unpack("l*") 57 | when :int64 58 | unpacked.unpack("q*") 59 | when :uint64 60 | unpacked.unpack("Q*") 61 | when :uint8 62 | unpacked.unpack("C*") 63 | when :boolean 64 | unpacked.unpack("C*").map { |v| v == 1 } 65 | end 66 | end 67 | end 68 | end 69 | -------------------------------------------------------------------------------- /lib/tensor_stream/graph_serializers/serializer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class Serializer 3 | def serialize(filename, tensor, session = nil, graph_keys = nil) 4 | File.write(filename, get_string(tensor, session, graph_keys = nil)) 5 | end 6 | 7 | def get_string(tensor, session = nil) 8 | end 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /lib/tensor_stream/graph_serializers/yaml.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Parses pbtext files and loads it as a graph 3 | class Yaml < TensorStream::Serializer 4 | include TensorStream::StringHelper 5 | include TensorStream::OpHelper 6 | 7 | def get_string(tensor_or_graph, session = nil, graph_keys = nil) 8 | graph = tensor_or_graph.is_a?(Tensor) ? tensor_or_graph.graph : tensor_or_graph 9 | serialized_arr = [] 10 | 11 | node_keys = graph_keys.nil? ? graph.node_keys : graph.node_keys.select { |k| graph_keys.include?(k) } 12 | 13 | node_keys.each do |k| 14 | node = if block_given? 15 | yield graph, k 16 | else 17 | graph.get_tensor_by_name(k) 18 | end 19 | next unless node.is_a?(Operation) 20 | 21 | serialized_arr << node.to_h 22 | end 23 | 24 | serialized_arr.to_yaml 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /lib/tensor_stream/helpers/op_helper.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # module that contains helper functions useful for ops 3 | module OpHelper 4 | def _op(code, *args) 5 | default_graph = Graph.get_default_graph 6 | 7 | op = default_graph.add_op!(code.to_sym, *args) 8 | if !default_graph.get_dependency_scope.nil? 9 | i_op(:identity, op, default_graph.get_dependency_scope, name: [op.name, "tuple", "control_dependency"].join("/")) 10 | else 11 | op 12 | end 13 | end 14 | 15 | # same as op but with a marker that it was internal generated 16 | def i_op(code, *args) 17 | options = if args.last.is_a?(Hash) 18 | args.pop 19 | else 20 | {} 21 | end 22 | 23 | args << options.merge(internal: true) 24 | Graph.get_default_graph.add_op!(code.to_sym, *args) 25 | end 26 | 27 | def i_var(data_type, rank, shape, variable_scope, options = {}) 28 | new_var = Variable.new(data_type) 29 | new_var.prepare(rank, shape, variable_scope, options) 30 | new_var.op = new_var.graph.add_variable!(new_var, options.merge(shape: @shape, data_type: data_type)) 31 | 32 | new_var 33 | end 34 | 35 | def cons(value, options = {}) 36 | TensorStream.constant(value, options) 37 | end 38 | 39 | def i_cons(value, options = {}) 40 | TensorStream.constant(value, options.merge(internal: true)) 41 | end 42 | 43 | def shape_eval(input, output_type = :int32) 44 | return [] unless input.is_a?(Array) 45 | arr = [] 46 | arr_ptr = input 47 | 48 | Kernel.loop do 49 | arr << (TensorStream::Ops::FLOATING_POINT_TYPES.include?(output_type) ? arr_ptr.size.to_f : arr_ptr.size) 50 | arr_ptr = arr_ptr[0] 51 | 52 | break unless arr_ptr.is_a?(Array) 53 | end 54 | 55 | arr 56 | end 57 | 58 | def fp_type?(type) 59 | TensorStream::Ops::FLOATING_POINT_TYPES.include?(type) 60 | end 61 | 62 | def int_type?(type) 63 | TensorStream::Ops::INTEGER_TYPES.include?(type) 64 | end 65 | 66 | def format_source(trace) 67 | grad_source = trace.detect { |c| c.to_s.include?(File.join("lib", "tensor_stream", "math_gradients")) } 68 | source = trace.reject { |c| c.to_s.include?(File.join("lib", "tensor_stream")) }.first 69 | [grad_source, trace].compact.join("\n") 70 | end 71 | 72 | def shapes_fully_specified_and_equal(x, y) 73 | return false if !shape_full_specified(x) || !shape_full_specified(y) 74 | return false if x.shape.shape != y.shape.shape 75 | 76 | true 77 | end 78 | 79 | def shape_full_specified(tensor) 80 | return false if tensor.shape.nil? 81 | return false if tensor.shape.shape.nil? 82 | 83 | tensor.shape.shape.each { |s| return false if s.nil? || (s < 0) } 84 | true 85 | end 86 | 87 | def reduced_shape(input_shape, axes) 88 | input_shape = TensorStream.convert_to_tensor(input_shape) 89 | axes = TensorStream.convert_to_tensor(axes) 90 | input_rank = i_op(:size, input_shape) 91 | axes = TensorStream.range(0, input_rank) if axes.nil? 92 | axes = (axes + input_rank) % input_rank 93 | axes_shape = i_op(:shape, axes) 94 | 95 | TensorStream.dynamic_stitch([TensorStream.range(0, input_rank), axes], 96 | [input_shape, i_op(:fill, axes_shape, 1)]) 97 | end 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /lib/tensor_stream/helpers/string_helper.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # helper string methods usually found in ActiveSupport but 3 | # need to replicate here since we don't want to use ActiveSupport 4 | module StringHelper 5 | def camelize(string, uppercase_first_letter = true) 6 | string = if uppercase_first_letter 7 | string.sub(/^[a-z\d]*/) { $&.capitalize } 8 | else 9 | string.sub(/^(?:(?=\b|[A-Z_])|\w)/) { $&.downcase } 10 | end 11 | string.gsub(/(?:_|(\/))([a-z\d]*)/) { "#{$1}#{$2.capitalize}" }.gsub("/", "::") 12 | end 13 | 14 | def underscore(string) 15 | string.gsub(/::/, "/"). 16 | gsub(/([A-Z]+)([A-Z][a-z])/, '\1_\2'). 17 | gsub(/([a-z\d])([A-Z])/, '\1_\2'). 18 | tr("-", "_").downcase 19 | end 20 | 21 | def symbolize_keys(hash) 22 | hash.map { |k, v| 23 | [k.to_sym, v] 24 | }.to_h 25 | end 26 | 27 | def constantize(camel_cased_word) 28 | names = camel_cased_word.split("::") 29 | 30 | # Trigger a built-in NameError exception including the ill-formed constant in the message. 31 | Object.const_get(camel_cased_word) if names.empty? 32 | 33 | # Remove the first blank element in case of '::ClassName' notation. 34 | names.shift if names.size > 1 && names.first.empty? 35 | 36 | names.inject(Object) do |constant, name| 37 | if constant == Object 38 | constant.const_get(name) 39 | else 40 | candidate = constant.const_get(name) 41 | next candidate if constant.const_defined?(name, false) 42 | next candidate unless Object.const_defined?(name) 43 | 44 | # Go down the ancestors to check if it is owned directly. The check 45 | # stops when we reach Object or the end of ancestors tree. 46 | constant = constant.ancestors.inject { |const, ancestor| 47 | break const if ancestor == Object 48 | break ancestor if ancestor.const_defined?(name, false) 49 | const 50 | } 51 | 52 | # owner is in Object, so raise 53 | constant.const_get(name, false) 54 | end 55 | end 56 | end 57 | end 58 | end 59 | -------------------------------------------------------------------------------- /lib/tensor_stream/helpers/tensor_mixins.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module TensorMixins 3 | def +(other) 4 | _op(:add, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 5 | end 6 | 7 | def [](index) 8 | if index.is_a?(Range) 9 | last = if index.end.nil? 10 | [TensorStream.shape(self)[0]] 11 | else 12 | [index.max + 1] 13 | end 14 | _op(:strided_slice, self, [index.min], last, [1]) 15 | else 16 | _op(:index, self, index) 17 | end 18 | end 19 | 20 | def *(other) 21 | _op(:mul, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 22 | end 23 | 24 | def **(other) 25 | _op(:pow, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 26 | end 27 | 28 | def /(other) 29 | _op(:div, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 30 | end 31 | 32 | def -(other) 33 | _op(:sub, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 34 | end 35 | 36 | def -@ 37 | _op(:negate, self) 38 | end 39 | 40 | def %(other) 41 | TensorStream.mod(self, other) 42 | end 43 | 44 | def floor(name: nil) 45 | TensorStream.floor(self, name: name) 46 | end 47 | 48 | def ceil(name: nil) 49 | TensorStream.ceil(self, name: name) 50 | end 51 | 52 | def round(name: nil) 53 | TensorStream.round(self, name: name) 54 | end 55 | 56 | def log(name: nil) 57 | TensorStream.log(self, name: name) 58 | end 59 | 60 | def reshape(shape, name: nil) 61 | TensorStream.reshape(self, shape, name: name) 62 | end 63 | 64 | def zero? 65 | _op(:equal, self, TensorStream.constant(0, dtype: data_type, name: "equal/is_zero?")) 66 | end 67 | 68 | def ==(other) 69 | TensorStream.check_data_types(self, other) 70 | _op(:equal, self, other) 71 | end 72 | 73 | def <(other) 74 | _op(:less, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 75 | end 76 | 77 | def !=(other) 78 | _op(:not_equal, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 79 | end 80 | 81 | def >(other) 82 | _op(:greater, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 83 | end 84 | 85 | def >=(other) 86 | _op(:greater_equal, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 87 | end 88 | 89 | def <=(other) 90 | _op(:less_equal, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 91 | end 92 | 93 | def and(other) 94 | _op(:logical_and, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 95 | end 96 | 97 | def matmul(other) 98 | _op(:mat_mul, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 99 | end 100 | 101 | def dot(other) 102 | _op(:mat_mul, self, TensorStream.convert_to_tensor(other, dtype: data_type)) 103 | end 104 | 105 | def cast(data_type = :float32, name: nil) 106 | TensorStream.cast(self, data_type, name: name) 107 | end 108 | 109 | def var(name: nil) 110 | TensorStream.variable(self, name: name) 111 | end 112 | 113 | ## 114 | # Apply a reduction to tensor 115 | def reduce(op_type = :+, axis: nil, keepdims: false, name: nil) 116 | reduce_op = case op_type.to_sym 117 | when :+ 118 | :sum 119 | when :* 120 | :prod 121 | when :mean 122 | :mean 123 | else 124 | raise "unsupported reduce op type #{op_type} valid values are :+, :*, :prod, :mean" 125 | end 126 | raise "blocks are not supported for tensors" if block_given? 127 | 128 | TensorStream.reduce(reduce_op, self, axis, keepdims: keepdims, name: name) 129 | end 130 | end 131 | end 132 | -------------------------------------------------------------------------------- /lib/tensor_stream/images.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Images 3 | extend OpHelper 4 | extend TensorStream::Utils 5 | 6 | def self.decode_png(contents, channels: 0, dtype: :uint8, name: nil, new_shape: nil) 7 | _op(:decode_png, contents, channels: channels, data_type: dtype, name: name, new_shape: new_shape) 8 | end 9 | 10 | def self.decode_jpeg(contents, channels: 0, dtype: :uint8, name: nil, new_shape: nil) 11 | _op(:decode_jpg, contents, channels: channels, data_type: dtype, name: name, new_shape: new_shape) 12 | end 13 | 14 | def self.encode_png(contents, compression: -1, name: nil, new_shape: nil, resample_method: nil) 15 | check_allowed_types(contents, %i[uint8 uint16]) 16 | contents = convert_to_tensor(contents, dtype: :uint16) 17 | _op(:encode_png, contents, compression: compression, name: name, new_shape: new_shape, resample_method: resample_method) 18 | end 19 | end 20 | end 21 | -------------------------------------------------------------------------------- /lib/tensor_stream/initializer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class Initializer 3 | attr_writer :op 4 | def initialize(op) 5 | @op = op 6 | end 7 | 8 | def op 9 | @op.call 10 | end 11 | 12 | def shape 13 | nil 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /lib/tensor_stream/math/math_ops.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # High level math functions 3 | class Maths 4 | extend TensorStream::OpHelper 5 | 6 | module MathFunctions 7 | 8 | ## 9 | # Normalizes along dimension axis using an L2 norm. 10 | def l2_normalize(x, axis: nil, epsilon: 1e-12, name: nil) 11 | TensorStream.name_scope(name, "l2_normalize", values: [x]) do |name| 12 | x = TensorStream.convert_to_tensor(x, name: "x") 13 | square_sum = TensorStream.reduce_sum(TensorStream.square(x), axis, keepdims: true) 14 | x_inv_norm = TensorStream.rsqrt(TensorStream.maximum(square_sum, epsilon)) 15 | TensorStream.multiply(x, x_inv_norm, name: name) 16 | end 17 | end 18 | end 19 | 20 | extend MathFunctions 21 | end 22 | end -------------------------------------------------------------------------------- /lib/tensor_stream/monkey_patches/array.rb: -------------------------------------------------------------------------------- 1 | class Array 2 | include TensorStream::MonkeyPatch 3 | 4 | def /(other) 5 | TensorStream.convert_to_tensor(self) * other 6 | end 7 | 8 | def %(other) 9 | TensorStream.convert_to_tensor(self) % other 10 | end 11 | 12 | def **(other) 13 | TensorStream.convert_to_tensor(self)**other 14 | end 15 | 16 | def max_index 17 | if first.is_a?(Float) 18 | highest = first 19 | highest_index = 0 20 | each_with_index do |item, index| 21 | next if item.nan? 22 | 23 | if item > highest 24 | highest = item 25 | highest_index = index 26 | end 27 | end 28 | highest_index 29 | else 30 | index(max) 31 | end 32 | end 33 | 34 | def min_index 35 | if first.is_a?(Float) 36 | highest = first 37 | highest_index = 0 38 | each_with_index do |item, index| 39 | next if item.nan? 40 | 41 | if item < highest 42 | highest = item 43 | highest_index = index 44 | end 45 | end 46 | highest_index 47 | else 48 | index(min) 49 | end 50 | end 51 | end 52 | -------------------------------------------------------------------------------- /lib/tensor_stream/monkey_patches/float.rb: -------------------------------------------------------------------------------- 1 | class Float 2 | include TensorStream::MonkeyPatch 3 | 4 | def self.placeholder(name: nil, width: 32, shape: nil) 5 | raise "invalid width passed #{width}" unless [16, 32, 64].include?(width) 6 | 7 | data_type = :"float#{width}" 8 | TensorStream.placeholder(data_type, name: name, shape: shape) 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /lib/tensor_stream/monkey_patches/integer.rb: -------------------------------------------------------------------------------- 1 | class Integer 2 | include TensorStream::MonkeyPatch 3 | 4 | def self.placeholder(name: nil, width: 32, shape: nil) 5 | raise "invalid width passed #{width}" unless [16, 32, 64].include?(width) 6 | 7 | data_type = :"int#{width}" 8 | TensorStream.placeholder(data_type, name: name, shape: shape) 9 | end 10 | end 11 | -------------------------------------------------------------------------------- /lib/tensor_stream/monkey_patches/op_patch.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module OpPatch 3 | def self.included(klass) 4 | ops = if klass == Array 5 | {:+ => "add", :- => "sub", :* => "mul"} 6 | else 7 | {:+ => "add", :- => "sub", :/ => "div", :% => "mod", :* => "mul", :** => "pow"} 8 | end 9 | 10 | ops.each do |m, name| 11 | klass.send(:alias_method, :"_tensor_stream_#{name}_orig", m) 12 | klass.send(:remove_method, m) 13 | end 14 | end 15 | 16 | def +(other) 17 | if other.is_a?(TensorStream::Tensor) 18 | TensorStream.convert_to_tensor(self, dtype: other.data_type) + other 19 | else 20 | _tensor_stream_add_orig(other) 21 | end 22 | end 23 | 24 | def -(other) 25 | if other.is_a?(TensorStream::Tensor) 26 | TensorStream.convert_to_tensor(self, dtype: other.data_type) - other 27 | else 28 | _tensor_stream_sub_orig(other) 29 | end 30 | end 31 | 32 | def *(other) 33 | if other.is_a?(TensorStream::Tensor) 34 | TensorStream.convert_to_tensor(self, dtype: other.data_type) * other 35 | else 36 | _tensor_stream_mul_orig(other) 37 | end 38 | end 39 | 40 | def /(other) 41 | if other.is_a?(TensorStream::Tensor) 42 | TensorStream.convert_to_tensor(self, dtype: other.data_type) * other 43 | else 44 | _tensor_stream_div_orig(other) 45 | end 46 | end 47 | 48 | def %(other) 49 | if other.is_a?(TensorStream::Tensor) 50 | TensorStream.convert_to_tensor(self, dtype: other.data_type) % other 51 | else 52 | _tensor_stream_mod_orig(other) 53 | end 54 | end 55 | 56 | def **(other) 57 | if other.is_a?(TensorStream::Tensor) 58 | TensorStream.convert_to_tensor(self, dtype: other.data_type)**other 59 | else 60 | _tensor_stream_pow_orig(other) 61 | end 62 | end 63 | end 64 | end 65 | 66 | Integer.include TensorStream::OpPatch 67 | Float.include TensorStream::OpPatch 68 | Array.include TensorStream::OpPatch 69 | -------------------------------------------------------------------------------- /lib/tensor_stream/monkey_patches/patch.rb: -------------------------------------------------------------------------------- 1 | # require 'pry-byebug' 2 | module TensorStream 3 | # various monkey patches to FixNum types 4 | module MonkeyPatch 5 | def shape 6 | TensorStream.shape_eval(self) 7 | end 8 | 9 | def t(name = nil, dtype: nil) 10 | TensorStream.convert_to_tensor(self, name: name, dtype: dtype) 11 | end 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /lib/tensor_stream/ops/add.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :add do |op| 2 | op.what_it_does "Returns x + y element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | x, y = params 14 | next [grad, grad] if shapes_fully_specified_and_equal(x, y) 15 | 16 | sx = ts.shape(x, name: "add/shape_x") 17 | sy = ts.shape(y, name: "add/shape_y") 18 | rx, ry = _broadcast_gradient_args(sx, sy) 19 | 20 | [ts.reshape(ts.reduce_sum(grad, rx, name: "add/reduce_sum_x"), sx), 21 | ts.reshape(ts.reduce_sum(grad, ry, name: "add/reduce_sum_y"), sy),] 22 | end 23 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/argmax.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :argmax do |op| 2 | op.what_it_does "Returns the index with the largest value across axes of a tensor." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'NUMERIC_TYPES' 5 | op.parameter :axis, "Describes which axis of the input tensor to reduce across. For vectors, use axis = 0", :nil, validate: 'INTEGER_TYPES' 6 | 7 | op.option :name, "Optional name", :nil 8 | op.option :dimension, "Same as axis", :nil 9 | op.option :output_type, "Output data type defaults to int32", ":int32" 10 | 11 | op.define_gradient do |grad, node, params| 12 | [nil, nil] 13 | end 14 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/argmin.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :argmin do |op| 2 | op.what_it_does "Returns the index with the smallest value across axes of a tensor." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'NUMERIC_TYPES' 5 | op.parameter :axis, "Describes which axis of the input tensor to reduce across. For vectors, use axis = 0", :nil, validate: 'INTEGER_TYPES' 6 | 7 | op.option :name, "Optional name", :nil 8 | op.option :dimension, "Same as axis", :nil 9 | op.option :output_type, "Output data type defaults to int32", ":int32" 10 | 11 | op.define_gradient do |grad, node, params| 12 | [nil, nil] 13 | end 14 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/bias_add.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :bias_add do |op| 2 | op.what_it_does "Adds bias to value." 3 | 4 | op.parameter :value, "A Tensor", :nil, validate: 'NUMERIC_TYPES' 5 | op.parameter :bias, "A 1 D tensor", :nil, validate: 'NUMERIC_TYPES' 6 | 7 | op.supports_broadcasting! 8 | op.exclude! 9 | 10 | op.option :name, "Optional name", :nil 11 | op.option :data_format, "A string. 'NHWC' and 'NCHW' are supported.", :nil 12 | 13 | op.define_gradient do |grad, node, _params| 14 | [grad, _op(:bias_add_grad, grad, data_format: node.options[:data_format])] 15 | end 16 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/case.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :case do |op| 2 | op.exclude! 3 | 4 | op.define_gradient do |grad, node, params| 5 | n_preds = node.inputs.size - 2 6 | 7 | case_grads = Array.new(n_preds) { |index| 8 | i_op(:case_grad, index, node.inputs[0], node.inputs[2 + index], grad) 9 | } 10 | 11 | [nil, i_op(:case_grad, -1, node.inputs[0], node.inputs[1], grad)] + case_grads 12 | end 13 | 14 | op.define_shape do |tensor| 15 | tensor.inputs[2]&.shape&.shape 16 | end 17 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/cast.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :cast do |op| 2 | op.exclude! 3 | 4 | op.define_gradient do |grad, node, params| 5 | t = %i[float16 float32 float64] 6 | src_type = node.inputs[0].data_type 7 | dst_type = grad.data_type 8 | 9 | if t.key?(src_type) && t.key?(dst_type) 10 | next ts.cast(grad, src_type) 11 | end 12 | 13 | nil 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/ceil.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :ceil do |op| 2 | op.what_it_does "Returns element-wise smallest integer in not less than x" 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | 6 | op.option :name, "Optional name", :nil 7 | 8 | op.define_gradient do |grad, node, params| 9 | nil 10 | end 11 | 12 | op.define_shape do |tensor| 13 | tensor.inputs[0].shape.shape 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/const.rb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/lib/tensor_stream/ops/const.rb -------------------------------------------------------------------------------- /lib/tensor_stream/ops/cos.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :cos do |op| 2 | op.what_it_does "Computes cos of input element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | -grad * ts.sin(params[0]) 9 | end 10 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/div.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :div do |op| 2 | op.what_it_does "Returns x / y element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | x, y = params 14 | sx = i_op(:shape, x) 15 | sy = i_op(:shape, y) 16 | rx, ry = _broadcast_gradient_args(sx, sy) 17 | 18 | [ts.reshape(ts.reduce_sum(ts.div(grad, y), rx), sx), 19 | ts.reshape(ts.reduce_sum(grad * ts.div(ts.div(-x, y), y), ry), sy),] 20 | end 21 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/equal.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :equal do |op| 2 | op.what_it_does "Returns the truth value of (x == y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.equal(a, b) }) 14 | end 15 | 16 | op.define_data_type do 17 | :boolean 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/expand_dims.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :expand_dims do |op| 2 | op.what_it_does "Inserts a dimension of 1 into a tensor's shape. " 3 | op.what_it_does "Given a tensor input, this operation inserts a dimension of 1 at the dimension index axis of input's shape. The " 4 | op.what_it_does "dimension index axis starts at zero; if you specify a negative number for axis it is counted backward from the end." 5 | 6 | op.parameter :input, "A tensor" 7 | op.parameter :axis, "Specifies the dimension index at which to expand the shape of input. Must be in the range [-rank(input) - 1, rank(input)]." 8 | op.option :name, "Optional name", :nil 9 | 10 | op.define_gradient do |grad, node, params| 11 | [_reshape_to_input(node, grad), nil] 12 | end 13 | 14 | op.define_shape do |tensor| 15 | nil 16 | end 17 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/fill.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :fill do |op| 2 | op.what_it_does "This operation creates a tensor of shape dims and fills it with value." 3 | 4 | op.parameter :dims, "tensor shape" 5 | op.parameter :value, "scalar value to fill with" 6 | 7 | op.option :name, "Optional name", :nil 8 | 9 | op.define_gradient do |grad, node, params| 10 | [nil, TensorStream.reduce_sum(grad)] 11 | end 12 | 13 | op.define_shape do |tensor| 14 | a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape] 15 | next nil if a_shape.nil? 16 | 17 | a_shape.is_a?(Array) ? a_shape : [a_shape] 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/floor.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :floor do |op| 2 | op.what_it_does "Returns element-wise largest integer not greater than x." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | 6 | op.option :name, "Optional name", :nil 7 | 8 | op.define_gradient do |grad, node, params| 9 | nil 10 | end 11 | 12 | op.define_shape do |tensor| 13 | tensor.inputs[0].shape.shape 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/floor_div.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :floor_div do |op| 2 | op.what_it_does "Returns element-wise integer divistion." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | [nil, nil] 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/greater.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :greater do |op| 2 | op.what_it_does "Returns the truth value of (x > y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_data_type do 13 | :boolean 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/greater_equal.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :greater_equal do |op| 2 | op.what_it_does "Returns the truth value of (x >= y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_data_type do 13 | :boolean 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/less.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :less do |op| 2 | op.what_it_does "Returns the truth value of (x < y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, _params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.less(a, b) }) 14 | end 15 | 16 | op.define_data_type do 17 | :boolean 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/less_equal.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :less_equal do |op| 2 | op.what_it_does "Returns the truth value of (x <= y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.greater_equal(a, b) }) 14 | end 15 | 16 | op.define_data_type do 17 | :boolean 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/log.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :log do |op| 2 | op.what_it_does "Computes natural logarithm of x element-wise." 3 | 4 | op.parameter :input, "tensor X" 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | grad * TensorStream.reciprocal(params[0]) 9 | end 10 | 11 | op.define_shape do |tensor| 12 | tensor.inputs[0].shape.shape 13 | end 14 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/mat_mul.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :mat_mul do |op| 2 | op.other_names %w(matmul) 3 | op.what_it_does "Multiplies matrix a by matrix b, producing a * b. The inputs must, following any transpositions, be tensors of rank 2 ." 4 | 5 | op.parameter :input_a, "tensor X" 6 | op.parameter :input_b, "tensor Y" 7 | 8 | op.apply_data_type_coercion! 9 | op.supports_broadcasting! 10 | 11 | op.option :transpose_a, "Transpose matrix A first", :false 12 | op.option :transpose_b, "Transpose matrix B first", :false 13 | op.option :name, "Optional name", :nil 14 | 15 | op.define_gradient do |grad, node, params| 16 | x, y = params 17 | t_a = node.options[:transpose_a] 18 | t_b = node.options[:transpose_b] 19 | 20 | if !t_a && !t_b 21 | grad_a = ts.matmul(grad, y, transpose_b: true) 22 | grad_b = ts.matmul(x, grad, transpose_a: true) 23 | elsif !ta && tb 24 | grad_a = ts.matmul(grad, y) 25 | grad_b = ts.matmul(grad, x, transpose_a: true) 26 | elsif t_a && !t_b 27 | grad_a = ts.matmul(y, grad, transpose_b: true) 28 | grad_b = ts.matmul(x, grad) 29 | elsif t_a && t_b 30 | grad_a = ts.matmul(y, grad, transpose_a: true, transpose_b: true) 31 | grad_b = ts.matmul(grad, x, transpose_a: true, transpose_b: true) 32 | end 33 | 34 | [grad_a, grad_b] 35 | end 36 | 37 | op.define_shape do |tensor| 38 | next nil if tensor.inputs[0].shape.shape.nil? || tensor.inputs[1].shape.shape.nil? 39 | next [] if tensor.inputs[0].shape.shape.empty? || tensor.inputs[1].shape.shape.empty? 40 | next nil if tensor.inputs[0].shape.shape.size != 2 || tensor.inputs[1].shape.shape.size != 2 41 | 42 | shape1, m = if tensor.options[:transpose_a] 43 | [tensor.inputs[0].shape.shape[0], tensor.inputs[0].shape.shape[1]] 44 | else 45 | [tensor.inputs[0].shape.shape[1], tensor.inputs[0].shape.shape[0]] 46 | end 47 | 48 | shape2, n = if tensor.options[:transpose_b] 49 | [tensor.inputs[1].shape.shape[1], tensor.inputs[1].shape.shape[0]] 50 | else 51 | [tensor.inputs[1].shape.shape[0], tensor.inputs[1].shape.shape[1]] 52 | end 53 | 54 | next nil if shape1.nil? || shape2.nil? || shape1 < 0 || shape2 < 0 55 | 56 | raise TensorStream::ValueError, "incompatible shape sizes for matrix multiplication (#{shape1} != #{shape2}) #{tensor.inputs[0].shape.shape} vs #{tensor.inputs[1].shape.shape}" if shape1 != shape2 57 | 58 | [m, n] 59 | end 60 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/max.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :max do |op| 2 | op.what_it_does "Returns the max of x and y (i.e. x > y ? x : y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X", nil, validate: 'NUMERIC_TYPES' 5 | op.parameter :input_b, "tensor Y", nil, validate: 'NUMERIC_TYPES' 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.greater_equal(a, b) }) 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/min.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :min do |op| 2 | op.what_it_does "Returns the min of x and y (i.e. x < y ? x : y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X", nil, validate: 'NUMERIC_TYPES' 5 | op.parameter :input_b, "tensor Y", nil, validate: 'NUMERIC_TYPES' 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.less_equal(a, b) }) 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/mod.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :mod do |op| 2 | op.what_it_does "Returns element-wise remainder of division." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | x, y = params 14 | sx = ts.shape(x) 15 | sy = ts.shape(y) 16 | rx, ry = _broadcast_gradient_args(sx, sy) 17 | floor_xy = ts.floor_div(x, y) 18 | gx = ts.reshape(ts.reduce_sum(grad, rx), sx) 19 | gy = ts.reshape(ts.reduce_sum(grad * ts.negative(floor_xy), ry), sy) 20 | 21 | [gx, gy] 22 | end 23 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/mul.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :mul do |op| 2 | op.what_it_does "Returns x * y element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | x, y = params 14 | sx = ts.shape(x) 15 | sy = ts.shape(y) 16 | rx, ry = _broadcast_gradient_args(sx, sy) 17 | 18 | [ts.reshape(ts.reduce_sum(ts.mul(grad, y), rx), sx), 19 | ts.reshape(ts.reduce_sum(ts.mul(x, grad), ry), sy)] 20 | end 21 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/negate.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :negate do |op| 2 | op.what_it_does "Computes numerical negative value element-wise." 3 | 4 | op.parameter :input, "tensor X" 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | -grad 9 | end 10 | 11 | op.define_shape do |tensor| 12 | tensor.inputs[0].shape.shape 13 | end 14 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/not_equal.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :not_equal do |op| 2 | op.what_it_does "Returns the truth value of (x != y) element-wise." 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | _min_or_max_grad(node.inputs, grad, ->(a, b) { ts.not_equal(a, b) }) 14 | end 15 | 16 | op.define_data_type do 17 | :boolean 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/ones_like.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :ones_like do |op| 2 | op.what_it_does "Creates a tensor with all elements set to 1." 3 | op.what_it_does "Given a single tensor (tensor), this operation returns a" 4 | op.what_it_does "tensor of the same type and shape as tensor with all elements set to 1." 5 | op.what_it_does "Optionally, you can specify a new type (dtype) for the returned tensor." 6 | 7 | 8 | op.parameter :input, "A tensor" 9 | op.option :dtype, "Optional new data type to cast into", :nil, alias: :data_type 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_shape do |tensor| 13 | tensor.inputs[0].shape.shape 14 | end 15 | 16 | op.define_gradient do |grad, node, params| 17 | nil # non differentiable 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/pow.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :pow do |op| 2 | op.what_it_does "Computes the power of one value to another X^Y element wise" 3 | 4 | op.parameter :input_a, "tensor X" 5 | op.parameter :input_b, "tensor Y" 6 | 7 | op.apply_data_type_coercion! 8 | op.supports_broadcasting! 9 | 10 | op.option :name, "Optional name", :nil 11 | 12 | op.define_gradient do |grad, node, params| 13 | x, y = params 14 | z = node 15 | sx = ts.shape(x) 16 | sy = ts.shape(y) 17 | rx, ry = _broadcast_gradient_args(sx, sy) 18 | gx = ts.reduce_sum(grad * y * ts.pow(x, y - 1), rx) 19 | 20 | log_x = ts.where(x > 0, ts.log(x), ts.zeros_like(x)) 21 | gy = ts.reduce_sum(grad * z * log_x, ry) 22 | 23 | [gx, gy] 24 | end 25 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/prod.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :prod do |op| 2 | op.other_names %w(reduce_prod) 3 | op.what_it_does "Computes the product of elements across dimensions of a tensor." 4 | op.what_it_does "Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the" 5 | op.what_it_does "tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are" 6 | op.what_it_does "retained with length 1." 7 | op.what_it_does "If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned." 8 | 9 | op.parameter :input_a, "tensor X" 10 | op.parameter :axis, "tensor X", :nil, validate: 'INTEGER_TYPES' 11 | 12 | op.option :name, "Optional name", :nil 13 | op.option :keepdims, "If true, retains reduced dimensions with length 1.", :false 14 | 15 | op.add_custom "input_a = TensorStream.convert_to_tensor(input_a)" 16 | op.add_custom "return input_a if input_a.shape.scalar?" 17 | op.add_custom "axis = cast_axis(input_a, axis)" 18 | 19 | op.define_gradient do |grad, node, params| 20 | x, y = params 21 | input_shape = ts.shape(x) 22 | y = ts.range(0, ts.rank(x)) if y.nil? 23 | reduction_indices = ts.reshape(y, [-1]) 24 | 25 | output_shape_kept_dims = ts.reduced_shape(input_shape, y) 26 | tile_scaling = _safe_shape_div(input_shape, output_shape_kept_dims) 27 | grad = ts.reshape(grad, output_shape_kept_dims) 28 | grad = ts.tile(grad, tile_scaling) 29 | 30 | perm, reduced_num, other_num = ts.device("/cpu:0") { 31 | rank = ts.rank(x) 32 | reduction_indices = (reduction_indices + rank) % rank 33 | reduced = ts.cast(reduction_indices, :int32) 34 | idx = ts.range(0, rank) 35 | other, = ts.setdiff1d(idx, reduced) 36 | [ts.concat([reduced, other], 0), 37 | ts.reduce_prod(ts.gather(input_shape, reduced)), 38 | ts.reduce_prod(ts.gather(input_shape, other)),] 39 | } 40 | 41 | permuted = ts.transpose(x, perm) 42 | permuted_shape = ts.shape(permuted) 43 | 44 | reshaped = ts.reshape(permuted, [reduced_num, other_num]) 45 | 46 | # Calculate product, leaving out the current entry 47 | left = ts.cumprod(reshaped, axis: 0, exclusive: true) 48 | right = ts.cumprod(reshaped, axis: 0, exclusive: true, reverse: true) 49 | y = ts.reshape(left * right, permuted_shape) 50 | 51 | # Invert the transpose and reshape operations. 52 | # Make sure to set the statically known shape information through a reshape. 53 | out = grad * ts.transpose(y, ts.invert_permutation(perm)) 54 | [ts.reshape(out, input_shape, name: "prod"), nil] 55 | end 56 | 57 | op.define_shape do |tensor| 58 | _infer_reduction_op_shape(tensor) 59 | end 60 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/random_uniform.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :random_uniform do |op| 2 | op.what_it_does "Outputs random values from a uniform distribution." 3 | 4 | op.parameter :shape, "A 1-D integer Tensor or array. The shape of the output tensor." 5 | 6 | op.option :name, "Optional name", :nil 7 | op.option :dtype, "The type of the output: float16, float32, float64, int32, or int64", ":float32" 8 | op.option :minval, "A 0-D Tensor or ruby value of type dtype. The lower bound on the range of random values to generate. Defaults to 0.", 0 9 | op.option :maxval, "A 0-D Tensor or ruby value of type dtype. The upper bound on the range of random values to generate. Defaults to 1 if dtype is floating point.", 1 10 | op.option :seed, " A ruby integer. Used to create a random seed for the distribution. See set_random_seed for behavior.", :nil 11 | 12 | op.define_shape do |tensor| 13 | a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape] 14 | next nil if a_shape.nil? 15 | 16 | a_shape.is_a?(Array) ? a_shape : [a_shape] 17 | end 18 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/range.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :range do |op| 2 | op.what_it_does "Creates a sequence of numbers." 3 | op.what_it_does "Creates a sequence of numbers that begins at start and extends by increments of delta up to but not including limit." 4 | 5 | op.parameter :start, "Acts as first entry in the range if limit is not nil; otherwise, acts as range limit and first entry defaults to 0.", "0" 6 | op.parameter :limit, "Upper limit of sequence, exclusive. If nil, defaults to the value of start while the first entry of the range defaults to 0.", "0" 7 | op.parameter :delta, "Number that increments start. Defaults to 1.", 1 8 | 9 | op.option :name, " A name for the operation. Defaults to \"range\".", "\"range\"" 10 | op.option :dtype, "The type of the elements of the resulting tensor.", :nil 11 | op.option :output_type, "Output data type defaults to int32", ":int32" 12 | 13 | op.define_gradient do |grad, node, params| 14 | nil # non differentiable 15 | end 16 | 17 | op.define_shape do |tensor| 18 | nil 19 | end 20 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/rank.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :rank do |op| 2 | op.what_it_does "Returns the rank of a tensor" 3 | 4 | op.parameter :input, "A tensor" 5 | op.option :name, "Optional name", :nil 6 | 7 | op.add_custom "input = convert_to_tensor(input)" 8 | op.add_custom "return cons(input.shape.ndims) if input.shape.known?" 9 | 10 | op.define_shape do |tensor| 11 | [] 12 | end 13 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/reshape.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :reshape do |op| 2 | op.what_it_does "Reshapes a tensor." 3 | op.what_it_does "Given tensor, this operation returns a tensor that has the same values as tensor with shape shape." 4 | 5 | op.parameter :input, "A tensor" 6 | op.parameter :shape, "A new tensor shape" 7 | op.option :name, "Optional name", :nil 8 | 9 | op.define_gradient do |grad, node, params| 10 | [ts.reshape(grad, ts.shape(node.inputs[0])), nil] 11 | end 12 | 13 | op.define_shape do |tensor| 14 | new_shape = tensor.inputs[1]&.const_value ? tensor.inputs[1].const_value : nil 15 | next nil if new_shape.nil? 16 | next nil if tensor.inputs[0].shape.nil? 17 | 18 | input_shape = tensor.inputs[0].shape.shape 19 | next new_shape if input_shape.nil? && !new_shape.include?(-1) && !new_shape.include?(nil) 20 | next nil if input_shape.nil? || input_shape.include?(nil) 21 | 22 | TensorStream::TensorShape.fix_inferred_elements(new_shape, input_shape.reduce(:*)) 23 | end 24 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/round.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :round do |op| 2 | op.what_it_does "Rounds the values of a tensor to the nearest integer, element-wise" 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | 6 | op.option :name, "Optional name", :nil 7 | 8 | op.define_gradient do |grad, node, params| 9 | nil 10 | end 11 | 12 | op.define_shape do |tensor| 13 | tensor.inputs[0].shape.shape 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/rsqrt.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :rsqrt do |op| 2 | op.what_it_does "Computes reciprocal of square root of x element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | # Returns -0.5 * grad * conj(y)^3. 9 | i_op(:rsqrt_grad, node, grad) 10 | end 11 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/shape.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :shape do |op| 2 | op.what_it_does "This operation returns a 1-D integer tensor representing the shape of input" 3 | 4 | op.parameter :input, "A tensor" 5 | op.option :name, "Optional name", :nil 6 | op.option :out_type, "Optional output type", ":int32" 7 | 8 | op.add_custom 'return constant(shape_eval(input, out_type), dtype: out_type, name: "Shape/#{name}") if input.is_a?(Array) && !input[0].is_a?(Tensor)' 9 | op.add_custom 'return constant(input.shape.shape, dtype: out_type, name: "Shape/#{input.name}_c") if shape_full_specified(input)' 10 | 11 | op.define_shape do |tensor| 12 | tensor.inputs[0].shape.shape ? [tensor.inputs[0].shape.shape.size] : nil 13 | end 14 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/sigmoid.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :sigmoid do |op| 2 | op.what_it_does "Computes sigmoid of x element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, _node, params| 8 | i_op(:sigmoid_grad, params[0], grad) 9 | end 10 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/sign.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :sign do |op| 2 | op.what_it_does "Computes sign of input element-wise." 3 | op.what_it_does_code "y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0." 4 | op.what_it_does "Zero is returned for NaN inputs." 5 | 6 | op.parameter :input_a, "tensor X" 7 | op.option :name, "Optional name", :nil 8 | 9 | op.define_gradient do |grad, node, params| 10 | ts.zeros(ts.shape(params[0]), dtype: params[0].data_type) 11 | end 12 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/sin.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :sin do |op| 2 | op.what_it_does "Computes sin of input element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | grad * ts.cos(params[0]) 9 | end 10 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/size.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :size do |op| 2 | op.what_it_does "Returns the size of a tensor." 3 | op.what_it_does "Returns a 0-D Tensor representing the number of elements in input of type out_type. Defaults to :int32." 4 | 5 | op.parameter :input, "A tensor" 6 | op.option :name, "Optional name", :nil 7 | op.option :out_type, "Optional output type", ":int32" 8 | 9 | op.define_gradient do |grad, node, params| 10 | nil # non differentiable 11 | end 12 | 13 | op.define_shape do |tensor| 14 | [] 15 | end 16 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/strided_slice.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :strided_slice do |op| 2 | op.what_it_does "Extracts a strided slice of a tensor " 3 | op.what_it_does "this op extracts a slice of size `(end-begin)/stride` 4 | from the given `input_` tensor. Starting at the location specified by `begin` 5 | the slice continues by adding `stride` to the index until all dimensions are 6 | not less than `end`. 7 | Note that a stride can be negative, which causes a reverse slice." 8 | 9 | op.parameter :input, "A tensor" 10 | op.parameter :_begin, "start index" 11 | op.parameter :_end, "end index" 12 | op.parameter :strides, "end index", :nil 13 | op.option :name, "Optional name", :nil 14 | 15 | op.define_gradient do |grad, node, params| 16 | input, b_index, e_index, strides = params 17 | x = ts.shape(input, out_type: node.inputs[0].data_type) 18 | 19 | _op(:strided_slice_grad, x, b_index, e_index, strides, grad) 20 | end 21 | 22 | op.define_shape do |tensor| 23 | end 24 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/sub.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :sub do |op| 2 | op.other_names %w(subtract) 3 | op.what_it_does "Returns x - y element-wise." 4 | 5 | op.parameter :input_a, "tensor X" 6 | op.parameter :input_b, "tensor Y" 7 | 8 | op.apply_data_type_coercion! 9 | op.supports_broadcasting! 10 | 11 | op.option :name, "Optional name", :nil 12 | 13 | op.define_gradient do |grad, node, params| 14 | x, y = params 15 | next [grad, -grad] if shapes_fully_specified_and_equal(x, y) 16 | 17 | sx = ts.shape(x, name: "sub/shape_x") 18 | sy = ts.shape(y, name: "sub/shape_y") 19 | rx, ry = _broadcast_gradient_args(sx, sy) 20 | 21 | [ts.reshape(ts.reduce_sum(grad, rx, name: "add/reduce_sub_x"), sx), 22 | -ts.reshape(ts.reduce_sum(grad, ry, name: "add/reduce_sub_y"), sy),] 23 | end 24 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/sum.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :sum do |op| 2 | op.other_names %w(reduce_sum) 3 | op.what_it_does "Computes the sum of elements across dimensions of a tensor." 4 | op.what_it_does "Reduces input_tensor along the dimensions given in axis. Unless keepdims is true, the rank of the" 5 | op.what_it_does "tensor is reduced by 1 for each entry in axis. If keepdims is true, the reduced dimensions are" 6 | op.what_it_does "retained with length 1." 7 | op.what_it_does "If axis has no entries, all dimensions are reduced, and a tensor with a single element is returned." 8 | 9 | op.parameter :input_a, "tensor X" 10 | op.parameter :axis_p, "tensor X", :nil, validate: 'INTEGER_TYPES' 11 | 12 | op.option :axis, "axis", :nil, exclude: true 13 | op.option :name, "Optional name", :nil 14 | op.option :keepdims, "If true, retains reduced dimensions with length 1.", :false 15 | 16 | op.add_custom "input_a = TensorStream.convert_to_tensor(input_a)" 17 | op.add_custom "return input_a if input_a.shape.scalar?" 18 | op.add_custom "axis_p = axis_p || axis" 19 | op.add_custom "axis_p = cast_axis(input_a, axis_p)" 20 | 21 | op.define_gradient do |grad, node, params| 22 | x, y = params 23 | _sum_grad(x, y, grad) 24 | end 25 | 26 | op.define_shape do |tensor| 27 | _infer_reduction_op_shape(tensor) 28 | end 29 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/tan.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :tan do |op| 2 | op.what_it_does "Computes tan of input element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | secx = ts.reciprocal(ts.cos(params[0])) 9 | secx2 = ts.square(secx) 10 | grad * secx2 11 | end 12 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/tanh.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :tanh do |op| 2 | op.what_it_does "Computes tanh of input element-wise." 3 | 4 | op.parameter :input_a, "tensor X", validate: 'FLOATING_POINT_TYPES' 5 | op.option :name, "Optional name", :nil 6 | 7 | op.define_gradient do |grad, node, params| 8 | grad * i_op(:tanh_grad, params[0]) 9 | end 10 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/tile.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :tile do |op| 2 | op.what_it_does "Constructs a tensor by tiling a given tensor." 3 | op.what_it_does "This operation creates a new tensor by replicating input multiples times." 4 | op.what_it_does "The output tensor's i'th dimension has input.dims(i) * multiples[i] elements," 5 | op.what_it_does "and the values of input are replicated multiples[i] times along the 'i'th dimension. For example, tiling [a b c d] by [2] produces [a b c d a b c d]." 6 | 7 | op.parameter :input, "A tensor" 8 | op.parameter :multiples, "Must be one of the following types: int32, int64. 1-D. Length must be the same as the number of dimensions in input" 9 | op.option :name, "Optional name", :nil 10 | 11 | 12 | op.define_gradient do |grad, node, params| 13 | nil # non differentiable 14 | end 15 | 16 | op.define_shape do |tensor| 17 | nil 18 | end 19 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/top_k.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :top_k do |op| 2 | op.what_it_does "Finds values and indices of the `k` largest entries for the last dimension." 3 | 4 | op.parameter :input, "1-D or higher `Tensor` with last dimension at least `k`." 5 | op.parameter :k, "0-D `int32` `Tensor`. Number of top elements to look for along the last dimension (along each row for matrices)", 1 6 | op.option :sorted, "If true the resulting `k` elements will be sorted by the values in descending order.", "true" 7 | op.option :name, "Optional name", :nil 8 | 9 | op.add_custom_post "[result[0], result[1]]" 10 | 11 | op.define_shape do |tensor| 12 | next nil unless tensor.inputs[0].shape.known? 13 | 14 | input_shape = tensor.inputs[0].shape.shape.dup 15 | k = tensor.options[:k] 16 | input_shape[-1] = k 17 | input_shape 18 | end 19 | 20 | op.define_gradient do |grad, node, params| 21 | #TODO 22 | end 23 | end -------------------------------------------------------------------------------- /lib/tensor_stream/ops/zeros.rb: -------------------------------------------------------------------------------- 1 | TensorStream::OpMaker.define_operation :zeros do |op| 2 | op.what_it_does "Creates a tensor with all elements set to zero" 3 | 4 | op.parameter :shape, "A 1-D integer Tensor or ruby array. The shape of the output tensor." 5 | 6 | op.option :dtype, "Optional name", ":float32" 7 | op.option :name, "Optional name", :nil 8 | 9 | op.define_shape do |tensor| 10 | a_shape = tensor.inputs[0] ? tensor.inputs[0].const_value : tensor.options[:shape] 11 | next nil if a_shape.nil? 12 | 13 | a_shape.is_a?(Array) ? a_shape : [a_shape] 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/placeholder.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Class that defines a TensorStream placeholder 3 | class Placeholder < Tensor 4 | def initialize(data_type, rank, shape, options = {}) 5 | setup_initial_state(options) 6 | 7 | @data_type = DataTypeUtils.norm_dtype(data_type.to_sym) 8 | @rank = rank 9 | @shape = TensorShape.new(shape, rank) 10 | @value = nil 11 | @is_const = false 12 | 13 | @name = [@graph.get_name_scope, options[:name] || build_name].compact.reject(&:empty?).join("/") 14 | @op = Graph.get_default_graph.add_op!(:placeholder, data_type: @data_type, shape: @shape, internal_name: @name) 15 | end 16 | 17 | def inspect 18 | "Placeholder(#{@name} shape: #{@shape || "?"} data_type: #{@data_type})" 19 | end 20 | 21 | private 22 | 23 | def build_name 24 | "Placeholder#{graph.get_placeholder_counter}" 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /lib/tensor_stream/profile/report_tool.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | ## 3 | # Utiliity functions for creating performance reports 4 | class ReportTool 5 | def self.profile_for(session, order_by: :slowest) 6 | context = session.last_session_context 7 | eval_times = context[:profile][:operations].map { |name, profile| 8 | [name, profile[:op], profile[:eval_time], profile[:shape]] 9 | } 10 | 11 | if order_by == :slowest 12 | eval_times.sort_by { |a| a[2] }.reverse! 13 | else 14 | eval_times.sort_by { |a| a[2] } 15 | end 16 | end 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /lib/tensor_stream/tensor_shape.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # class that defines a shape for TensorFlow compatibility 3 | class TensorShape 4 | attr_accessor :rank, :shape 5 | 6 | def initialize(shape, rank = nil) 7 | @shape = shape 8 | @rank = rank.nil? && shape ? shape.size : rank 9 | end 10 | 11 | def to_s 12 | return "?" if @shape.nil? 13 | 14 | dimensions = @shape.collect { |r| 15 | "Dimension(#{r})" 16 | }.join(",") 17 | "TensorShape([#{dimensions}])" 18 | end 19 | 20 | def [](index) 21 | new_shape = @shape[index] 22 | TensorShape.new(@shape[index]) 23 | end 24 | 25 | def ndims 26 | shape ? shape.size : nil 27 | end 28 | 29 | def scalar? 30 | known? && shape.size.zero? 31 | end 32 | 33 | def known? 34 | return false if shape.nil? 35 | 36 | a_shape = shape.is_a?(Array) ? shape : [shape] 37 | a_shape.each { |s| return false if s.nil? || s < 0 } 38 | 39 | true 40 | end 41 | 42 | def fully_defined? 43 | known? 44 | end 45 | 46 | def merge_with(other) 47 | assert_compatible_with(other) 48 | 49 | if @shape.nil? 50 | TensorShape.new(other) 51 | else 52 | TensorShape.new(@shape) 53 | end 54 | end 55 | 56 | def compatible_with?(other) 57 | other = as_dimension(other) 58 | 59 | shape.nil? || other.nil? || shape == other 60 | end 61 | 62 | def as_dimension(value) 63 | value.is_a?(TensorShape) ? value.shape : value 64 | end 65 | 66 | def value 67 | shape 68 | end 69 | 70 | ## 71 | # Raises an exception if `other` is not compatible with this shape. 72 | def assert_compatible_with(other) 73 | raise TensorStream::ValueError, "Dimensions #{self} and #{other} are not compatible" unless compatible_with?(other) 74 | end 75 | 76 | def self.infer_shape(shape_a, shape_b) 77 | return nil if shape_a.nil? || shape_b.nil? 78 | return shape_a if shape_b.empty? 79 | return shape_b if shape_a.empty? 80 | return shape_a if shape_a == shape_b 81 | return shape_b if shape_b.size > shape_a.size 82 | return shape_a if shape_a.size > shape_b.size 83 | 84 | reversed_a = shape_a.reverse 85 | reversed_b = shape_b.reverse 86 | 87 | reversed_a.each_with_index.collect { |s, index| 88 | next s if index >= reversed_b.size 89 | next nil if s.nil? || reversed_b[index].nil? 90 | next nil if s.is_a?(Tensor) || reversed_b[index].is_a?(Tensor) 91 | next reversed_b[index] if reversed_b[index] > s 92 | 93 | s 94 | }.reverse 95 | end 96 | 97 | def self.reshape(arr, new_shape) 98 | arr = arr.is_a?(Array) ? arr.flatten : [arr] 99 | new_shape = new_shape.is_a?(TensorShape) ? new_shape.shape : new_shape 100 | new_shape = TensorShape.fix_inferred_elements(new_shape, arr.size) 101 | return arr[0] if arr.size == 1 && new_shape.empty? 102 | 103 | new_shape = new_shape.dup 104 | 105 | s = new_shape.shift 106 | 107 | if new_shape.size.zero? 108 | raise "reshape dimen mismatch #{arr.size} != #{s}" if arr.size != s 109 | 110 | return arr 111 | end 112 | 113 | dim = (arr.size / s) 114 | return arr if dim.zero? 115 | 116 | arr.each_slice(dim).collect do |slice| 117 | reshape(slice, new_shape.dup) 118 | end 119 | end 120 | 121 | def self.fix_inferred_elements(shape, total_size) 122 | return shape if shape.empty? 123 | return nil if shape[0].is_a?(Tensor) 124 | 125 | current_size = shape.inject(1) { |product, n| n > 0 ? product * n : product } 126 | inferred_size = total_size.nil? ? nil : total_size / current_size 127 | shape.map { |s| s == -1 ? inferred_size : s } 128 | end 129 | end 130 | end 131 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/adadelta_optimizer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # High Level implementation of the Adadelta algorithm 4 | class AdadeltaOptimizer < Optimizer 5 | include TensorStream::OpHelper 6 | 7 | attr_accessor :learning_rate 8 | 9 | def initialize(learning_rate = 0.001, rho = 0.95, epsilon = 1e-8, 10 | use_locking: false, name: "Adadelta") 11 | @learning_rate = learning_rate 12 | @rho = rho 13 | @epsilon = epsilon 14 | 15 | # Tensor versions of the constructor arguments, created in _prepare(). 16 | @learning_rate_tensor = nil 17 | @rho_t = nil 18 | @epsilon_t = nil 19 | super(name: name, use_locking: use_locking) 20 | end 21 | 22 | protected 23 | 24 | def create_slots(var_list) 25 | var_list.each do |v| 26 | zeros_slot(v, "accum", @name) 27 | zeros_slot(v, "accum_update", @name) 28 | end 29 | end 30 | 31 | def prepare 32 | @learning_rate_tensor = convert_to_tensor(@learning_rate, name: "lr") 33 | @rho_t = convert_to_tensor(@rho, name: "rho") 34 | @epsilon_t = convert_to_tensor(@epsilon, name: "epsilon") 35 | end 36 | 37 | def apply_dense(grad, var) 38 | accum = get_slot(var, "accum") 39 | accum_update = get_slot(var, "accum_update") 40 | _op(:apply_adadelta, 41 | var, 42 | accum, 43 | accum_update, 44 | TensorStream.cast(@learning_rate_tensor, var.data_type), 45 | TensorStream.cast(@rho_t, var.data_type), 46 | TensorStream.cast(@epsilon_t, var.data_type), 47 | grad, 48 | use_locking: @use_locking) 49 | end 50 | end 51 | end 52 | end 53 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/adagrad_optimizer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # High Level implementation of the Adagrad algorithm 4 | class AdagradOptimizer < Optimizer 5 | include TensorStream::OpHelper 6 | 7 | attr_accessor :learning_rate 8 | 9 | def initialize(learning_rate, initial_accumulator_value = 0.1, 10 | use_locking: false, name: "Adagrad") 11 | @learning_rate = learning_rate 12 | @initial_accumulator_value = initial_accumulator_value 13 | @learning_rate_tensor = nil 14 | super(name: name, use_locking: use_locking) 15 | end 16 | 17 | protected 18 | 19 | def create_slots(var_list) 20 | var_list.each do |v| 21 | dtype = v.data_type 22 | init = nil 23 | if v.shape.known? 24 | init = TensorStream.constant_initializer(@initial_accumulator_value, dtype: dtype) 25 | else 26 | init_constant = TensorStream.fill(TensorStream.shape(v), @initial_accumulator_value) 27 | init = TensorStream.cast(init_constant, dtype) 28 | end 29 | get_or_make_slot_with_initializer(v, init, v.shape, dtype, "accumulator", @name) 30 | end 31 | end 32 | 33 | def prepare 34 | learning_rate = call_if_callable(@learning_rate) 35 | @learning_rate_tensor = TensorStream.convert_to_tensor(learning_rate, name: "learning_rate") 36 | end 37 | 38 | def apply_dense(grad, var) 39 | acc = get_slot(var, "accumulator") 40 | _op(:apply_adagrad, 41 | var, acc, TensorStream.cast(@learning_rate_tensor, var.data_type), 42 | grad, use_locking: @use_locking) 43 | end 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/gradient_descent_optimizer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # High Level implementation of the gradient descent algorithm 4 | class GradientDescentOptimizer < Optimizer 5 | include TensorStream::OpHelper 6 | 7 | attr_accessor :learning_rate 8 | 9 | def initialize(learning_rate, use_locking: false, name: "GradientDescent") 10 | @learning_rate = learning_rate 11 | @learning_rate_tensor = nil 12 | super(name: name, use_locking: use_locking) 13 | end 14 | 15 | protected 16 | 17 | def prepare 18 | learning_rate = call_if_callable(@learning_rate) 19 | @learning_rate_tensor = convert_to_tensor(learning_rate, name: "learning_rate") 20 | end 21 | 22 | def apply_dense(grad, var) 23 | i_op(:apply_gradient_descent, var, TensorStream.cast(@learning_rate_tensor, grad.data_type), grad) 24 | end 25 | end 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/learning_rate_decay.rb: -------------------------------------------------------------------------------- 1 | ## 2 | # Ruby port of tensorflow's learning rate decay functions 3 | module TensorStream 4 | module Train 5 | module LearningRateDecay 6 | include TensorStream::Utils 7 | include TensorStream::OpHelper 8 | include TensorStream::Ops 9 | 10 | ## 11 | # Applies exponential decay to the learning rate 12 | def exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase: false, name: nil) 13 | raise TensorStream::ValueError, "global_step is required for exponential_decay." if global_step.nil? 14 | 15 | name_scope(name, default: "ExponentialDecay", values: [learning_rate, global_step, decay_steps, decay_rate]) do 16 | learning_rate = convert_to_tensor(learning_rate, name: "learning_rate") 17 | data_type = learning_rate.data_type 18 | decay_steps = cast(decay_steps, data_type) 19 | decay_rate = cast(decay_rate, data_type) 20 | 21 | global_step_recomp = cast(global_step, data_type) 22 | p = global_step_recomp / decay_steps 23 | p = floor(p) if staircase 24 | multiply(learning_rate, pow(decay_rate, p), name: name) 25 | end 26 | end 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/momentum_optimizer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # Optimizer that implements the Momentum algorithm. loosely based on the tensorflow implementation. 4 | class MomentumOptimizer < Optimizer 5 | include OpHelper 6 | 7 | ## 8 | # Construct a new Momentum optimizer. 9 | # 10 | # Args: 11 | # learning_rate: A Tensor or a floating point value that indicates the learning rate 12 | # momentum: A Tensor or a floating point value for the momentum 13 | # name: Optional name prefix 14 | # use_nesterov: boolean - Flag that indicates if nesterov momentum is to be used. http://jmlr.org/proceedings/papers/v28/sutskever13.pdf 15 | # use_locking: boolean - filler argument for compatibility, not used at the moment 16 | def initialize(learning_rate, momentum, name: "momentum", use_nesterov: false, use_locking: false) 17 | @learning_rate = learning_rate 18 | @momentum = momentum 19 | @use_nesterov = use_nesterov 20 | super(name: name, use_locking: use_locking) 21 | end 22 | 23 | protected 24 | 25 | def prepare 26 | @learning_rate_tensor = TensorStream.convert_to_tensor(@learning_rate, name: "learning_rate") 27 | @momentum_tensor = TensorStream.convert_to_tensor(@momentum, name: "momentum") 28 | end 29 | 30 | def create_slots(var_list) 31 | var_list.each do |v| 32 | zeros_slot(v, "momentum", @name) 33 | end 34 | end 35 | 36 | def apply_dense(grad, var) 37 | mom = get_slot(var, "momentum") 38 | 39 | _op(:apply_momentum, var, mom, 40 | TensorStream.cast(@learning_rate_tensor, var.data_type), 41 | grad, 42 | TensorStream.cast(@momentum_tensor, var.data_type), 43 | use_locking: @use_locking, 44 | use_nesterov: @use_nesterov) 45 | end 46 | end 47 | end 48 | end 49 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/rmsprop_optimizer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # High Level implementation of the RMSProp algorithm 4 | # This is a straight port from TensorFlows rmsprop.py 5 | class RMSPropOptimizer < Optimizer 6 | include TensorStream::OpHelper 7 | 8 | attr_accessor :learning_rate 9 | 10 | ## 11 | # Optimizer that implements the RMSProp algorithm. 12 | # 13 | # [paper](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf). 14 | def initialize(learning_rate, decay = 0.9, momentum = 0.0, epsilon = 1e-10, centered: false, 15 | use_locking: false, name: "RMSProp") 16 | @learning_rate = learning_rate 17 | @decay = decay 18 | @momentum = momentum 19 | @epsilon = epsilon 20 | @centered = centered 21 | 22 | # Tensor versions of the constructor arguments, created in _prepare(). 23 | @learning_rate_tensor = nil 24 | @decay_tensor = nil 25 | @momentum_tensor = nil 26 | @epsilon_tensor = nil 27 | 28 | super(name: name, use_locking: use_locking) 29 | end 30 | 31 | protected 32 | 33 | def prepare 34 | lr = call_if_callable(@learning_rate) 35 | decay = call_if_callable(@decay) 36 | momentum = call_if_callable(@momentum) 37 | epsilon = call_if_callable(@epsilon) 38 | 39 | @learning_rate_tensor = TensorStream.convert_to_tensor(lr, name: "learning_rate") 40 | @decay_tensor = TensorStream.convert_to_tensor(decay, name: "decay") 41 | @momentum_tensor = TensorStream.convert_to_tensor(momentum, name: "momentum") 42 | @epsilon_tensor = TensorStream.convert_to_tensor(epsilon, name: "epsilon") 43 | end 44 | 45 | def create_slots(var_list) 46 | # Create slots for the first and second moments. 47 | var_list.each do |v| 48 | init_rms = if v.shape.known? 49 | TensorStream.ones_initializer(dtype: v.data_type) 50 | else 51 | TensorStream.ones_like(v) 52 | end 53 | 54 | get_or_make_slot_with_initializer(v, init_rms, v.shape, v.data_type, "rms", @name) 55 | 56 | zeros_slot(v, "mg", @name) if @centered 57 | zeros_slot(v, "momentum", @name) 58 | end 59 | end 60 | 61 | def apply_dense(grad, var) 62 | rms = get_slot(var, "rms") 63 | mom = get_slot(var, "momentum") 64 | 65 | if @centered 66 | mg = get_slot(var, "mg") 67 | _op(:apply_centered_rms_prop, var, mg, rms, mom, 68 | TensorStream.cast(@learning_rate_tensor, var.data_type), 69 | TensorStream.cast(@decay_tensor, var.data_type), 70 | TensorStream.cast(@momentum_tensor, var.data_type), 71 | TensorStream.cast(@epsilon_tensor, var.data_type), 72 | grad, use_locking: @use_locking) 73 | else 74 | _op(:apply_rms_prop, var, rms, mom, 75 | TensorStream.cast(@learning_rate_tensor, var.data_type), 76 | TensorStream.cast(@decay_tensor, var.data_type), 77 | TensorStream.cast(@momentum_tensor, var.data_type), 78 | TensorStream.cast(@epsilon_tensor, var.data_type), 79 | grad, use_locking: @use_locking) 80 | end 81 | end 82 | end 83 | end 84 | end 85 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/slot_creator.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | module SlotCreator 4 | include TensorStream::Utils 5 | 6 | ## 7 | # Helper function for creating a slot variable. 8 | def create_slot_var(_primary, val, scope, shape) 9 | slot = get_variable(scope, initializer: val, trainable: false, shape: shape, 10 | validate_shape: val.shape && val.shape.known?) 11 | slot 12 | end 13 | 14 | ## 15 | # Create a slot initialized to the given value 16 | # 17 | # Args: 18 | # primary: Variable - The primary 'Variable' or 'Tensor' 19 | # val: Tensor - A `Tensor` specifying the initial value of the slot 20 | # name: String - Name to use for the slot variable 21 | # colocate_with_primary: Boolean - If true the slot is located 22 | # on the same device as `primary` 23 | # 24 | # Returns: A `Variable` object 25 | def create_slot(primary, val, name, colocate_with_primary: true) 26 | TensorStream.variable_scope(nil, primary.op.name + "/" + name) do 27 | return create_slot_var(primary, val, "", nil) if colocate_with_primary 28 | 29 | TensorStream.colocate_with(primary) do 30 | return create_slot_var(primary, val, "", nil) 31 | end 32 | end 33 | end 34 | 35 | def create_slot_with_initializer(primary, initializer, shape, dtype, name, colocate_with_primary: true) 36 | prefix = primary.op.name 37 | TensorStream.variable_scope(nil, prefix + "/" + name) do 38 | create_slot_var(primary, initializer, "", shape.shape) 39 | end 40 | end 41 | 42 | ## 43 | # Create a slot initialized to 0 with same shape as the primary object. 44 | # 45 | # Args: 46 | # primary: The pirmary variable or Tensor 47 | # name: String - Name to use for the slot variable 48 | # dtype: Symbol - Type of the slot variable 49 | # colocate_with_primary: boolean - If true the slot is located on the same device as primary 50 | # 51 | # Returns: 52 | # A `Variable` object 53 | def create_zeros_slot(primary, name, dtype: nil, colocate_with_primary: true) 54 | dtype = primary.data_type if dtype.nil? 55 | slot_shape = primary.shape 56 | slot_shape = if slot_shape.fully_defined? 57 | slot_shape.shape 58 | else 59 | TensorStream.shape(primary.initialized_value) 60 | end 61 | val = TensorStream.zeros(slot_shape, dtype: dtype) 62 | create_slot(primary, val, name, 63 | colocate_with_primary: colocate_with_primary) 64 | end 65 | end 66 | end 67 | end 68 | -------------------------------------------------------------------------------- /lib/tensor_stream/train/utils.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module Train 3 | # convenience methods used for training 4 | module Utils 5 | def create_global_step(graph = nil) 6 | target_graph = graph || TensorStream.get_default_graph 7 | raise TensorStream::ValueError, '"global_step" already exists.' unless get_global_step(target_graph).nil? 8 | 9 | TensorStream.variable_scope.get_variable(TensorStream::GraphKeys::GLOBAL_STEP, shape: [], 10 | dtype: :int64, 11 | initializer: TensorStream.zeros_initializer, 12 | trainable: false, 13 | collections: [TensorStream::GraphKeys::GLOBAL_VARIABLES, 14 | TensorStream::GraphKeys::GLOBAL_STEP,]) 15 | end 16 | 17 | def get_global_step(graph = nil) 18 | target_graph = graph || TensorStream.get_default_graph 19 | global_step_tensors = target_graph.get_collection(TensorStream::GraphKeys::GLOBAL_STEP) 20 | global_step_tensor = if global_step_tensors.nil? || global_step_tensors.empty? 21 | begin 22 | target_graph.get_tensor_by_name("global_step:0") 23 | rescue TensorStream::KeyError 24 | nil 25 | end 26 | elsif global_step_tensors.size == 1 27 | global_step_tensors[0] 28 | else 29 | TensorStream.logger.error("Multiple tensors in global_step collection.") 30 | nil 31 | end 32 | global_step_tensor 33 | end 34 | end 35 | end 36 | end 37 | -------------------------------------------------------------------------------- /lib/tensor_stream/trainer.rb: -------------------------------------------------------------------------------- 1 | require "tensor_stream/train/slot_creator" 2 | require "tensor_stream/train/optimizer" 3 | require "tensor_stream/train/gradient_descent_optimizer" 4 | require "tensor_stream/train/momentum_optimizer" 5 | require "tensor_stream/train/adam_optimizer" 6 | require "tensor_stream/train/adadelta_optimizer" 7 | require "tensor_stream/train/adagrad_optimizer" 8 | require "tensor_stream/train/rmsprop_optimizer" 9 | require "tensor_stream/train/saver" 10 | require "tensor_stream/train/learning_rate_decay" 11 | 12 | module TensorStream 13 | module Trainer 14 | extend TensorStream::Train::Utils 15 | extend TensorStream::Train::LearningRateDecay 16 | extend TensorStream::StringHelper 17 | 18 | def self.write_graph(graph, path, filename, as_text: true, serializer: :yaml) 19 | raise "only supports as_text=true for now" unless as_text 20 | 21 | serializer = constantize("TensorStream::#{camelize(serializer.to_s)}") if serializer.is_a?(Symbol) 22 | 23 | new_filename = File.join(path, filename) 24 | serializer.new.get_string(graph).tap do |str| 25 | File.write(new_filename, str) 26 | end 27 | end 28 | end 29 | end 30 | -------------------------------------------------------------------------------- /lib/tensor_stream/types.rb: -------------------------------------------------------------------------------- 1 | require "ostruct" 2 | 3 | module TensorStream 4 | # Convenience class for specifying valid data_types 5 | module Types 6 | def self.int16 7 | :int16 8 | end 9 | 10 | def self.uint16 11 | :uint16 12 | end 13 | 14 | def self.float16 15 | :float16 16 | end 17 | 18 | def self.float32 19 | :float32 20 | end 21 | 22 | def self.int32 23 | :int32 24 | end 25 | 26 | def self.uint32 27 | :uint32 28 | end 29 | 30 | def self.uint8 31 | :uint8 32 | end 33 | 34 | def self.float64 35 | :float64 36 | end 37 | 38 | def self.string 39 | :string 40 | end 41 | 42 | def self.boolean 43 | :boolean 44 | end 45 | end 46 | end 47 | -------------------------------------------------------------------------------- /lib/tensor_stream/utils/data_type_utils.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class DataTypeUtils 3 | def self.norm_dtype(dtype) 4 | dtype = dtype.to_sym 5 | case dtype 6 | when :int 7 | :int32 8 | when :float 9 | :float32 10 | else 11 | dtype 12 | end 13 | end 14 | end 15 | end -------------------------------------------------------------------------------- /lib/tensor_stream/utils/freezer.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class Freezer 3 | include TensorStream::OpHelper 4 | 5 | ## 6 | # Utility class to convert variables to constants for production deployment 7 | # 8 | def convert(session, checkpoint_folder, output_file) 9 | model_file = File.join(checkpoint_folder, "model.yaml") 10 | TensorStream.graph.as_default do |current_graph| 11 | YamlLoader.new.load_from_string(File.read(model_file)) 12 | saver = TensorStream::Train::Saver.new 13 | saver.restore(session, checkpoint_folder) 14 | 15 | # collect all assign ops and remove them from the graph 16 | remove_nodes = Set.new(current_graph.nodes.values.select { |op| op.is_a?(TensorStream::Operation) && op.operation == :assign }.map { |op| op.consumers.to_a }.flatten.uniq) 17 | 18 | output_buffer = TensorStream::Yaml.new.get_string(current_graph) { |graph, node_key| 19 | node = graph.get_tensor_by_name(node_key) 20 | case node.operation 21 | when :variable_v2 22 | value = Evaluator.read_variable(node.graph, node.options[:var_name]) 23 | if value.nil? 24 | raise "#{node.options[:var_name]} has no value" 25 | end 26 | 27 | options = { 28 | value: value, 29 | data_type: node.data_type, 30 | shape: shape_eval(value), 31 | } 32 | const_op = TensorStream::Operation.new(current_graph, inputs: [], options: options) 33 | const_op.name = node.name 34 | const_op.operation = :const 35 | const_op.data_type = node.data_type 36 | const_op.shape = TensorShape.new(shape_eval(value)) 37 | 38 | const_op 39 | when :assign 40 | nil 41 | else 42 | remove_nodes.include?(node.name) ? nil : node 43 | end 44 | } 45 | File.write(output_file, output_buffer) 46 | end 47 | end 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /lib/tensor_stream/utils/py_ports.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | module PyPorts 3 | def floor_div(a, b) 4 | if (a.is_a?(Float)) 5 | (a.to_i / b.to_i).to_f 6 | else 7 | a / b 8 | end 9 | end 10 | end 11 | end -------------------------------------------------------------------------------- /lib/tensor_stream/variable.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | # Class that defines a TensorStream variable 3 | class Variable < Tensor 4 | attr_accessor :trainable, :options, :buffer, :op 5 | attr_writer :value 6 | 7 | def initialize(data_type) 8 | @data_type = data_type 9 | @options = {} 10 | @is_const = false 11 | end 12 | 13 | def prepare(rank, shape, variable_scope, options = {}) 14 | setup_initial_state(options) 15 | 16 | @rank = rank 17 | @value = nil 18 | 19 | scope_name = variable_scope ? variable_scope.name : nil 20 | variable_scope_initializer = variable_scope ? variable_scope.initializer : nil 21 | @name = [scope_name, options[:name] || build_name].compact.reject(&:empty?).join("/") 22 | @initalizer_tensor = options[:initializer] || variable_scope_initializer || TensorStream.glorot_uniform_initializer 23 | shape = @initalizer_tensor.shape.shape if shape.nil? && @initalizer_tensor && @initalizer_tensor.shape 24 | 25 | @shape = TensorShape.new(shape, rank) 26 | @trainable = options.fetch(:trainable, true) 27 | end 28 | 29 | def trainable? 30 | @trainable 31 | end 32 | 33 | def initializer 34 | init_op = @initalizer_tensor.op 35 | init_op.shape = @shape || init_op.shape 36 | init_op.data_type = @data_type || init_op.data_type 37 | assign(init_op) 38 | end 39 | 40 | def initialized_value 41 | init_op = @initalizer_tensor.op 42 | init_op.shape = @shape || init_op.shape 43 | init_op.data_type = @data_type || init_op.data_type 44 | init_op 45 | end 46 | 47 | def assign(value, name: nil, use_locking: false) 48 | TensorStream.check_data_types(self, value) 49 | _op(:assign, value, name: name, var_name: @name) 50 | end 51 | 52 | def read_value 53 | Evaluator.read_variable(@graph, @name) 54 | end 55 | 56 | def assign_add(value, name: nil) 57 | TensorStream.check_data_types(self, value) 58 | _op(:assign_add, value, data_type: data_type, name: name, var_name: @name) 59 | end 60 | 61 | def to_math(_tensor, _name_only = false, _max_depth = 99, _unused = 0) 62 | @name 63 | end 64 | 65 | def assign_sub(value) 66 | TensorStream.check_data_types(self, value) 67 | _op(:assign_sub, value, data_type: data_type, name: name, var_name: @name) 68 | end 69 | 70 | def self.variables_initializer(collection) 71 | global_variables_ops = TensorStream.get_default_graph.get_collection(collection).map do |variable| 72 | _op(:assign, variable.initializer, var_name: variable.name) 73 | end 74 | 75 | TensorStream.group(global_variables_ops) 76 | end 77 | 78 | def self.global_variables_initializer 79 | variables_initializer(TensorStream::GraphKeys::GLOBAL_VARIABLES) 80 | end 81 | 82 | def inspect 83 | "Variable(#{@name} shape: #{@shape || "?"} data_type: #{@data_type})" 84 | end 85 | 86 | protected 87 | 88 | def build_name 89 | "Variable#{graph.get_var_counter}:#{@rank}" 90 | end 91 | end 92 | end 93 | -------------------------------------------------------------------------------- /lib/tensor_stream/variable_scope.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class VariableScope 3 | include OpHelper 4 | attr_accessor :name, :reuse, :initializer 5 | attr_reader :used_names 6 | 7 | def initialize(name: nil, reuse: nil, initializer: nil) 8 | @name = name 9 | @reuse = reuse 10 | @initializer = initializer 11 | @used_names = [] 12 | end 13 | 14 | def get_variable(name, dtype: nil, shape: nil, initializer: nil, trainable: true, collections: nil, validate_shape: false) 15 | raise TensorStream::ValueError, "validate_shape=true and initializer does not have a defined shape" if validate_shape && !shape.nil && initializer.is_a?(Tensor) 16 | 17 | i_var(dtype || :float32, nil, shape, self, collections: collections, name: name, initializer: initializer, trainable: trainable) 18 | end 19 | 20 | def register_name(name) 21 | @used_names << name unless @used_names.include?(name) 22 | end 23 | end 24 | end 25 | -------------------------------------------------------------------------------- /lib/tensor_stream/version.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | VERSION = "1.0.9".freeze 3 | 4 | def self.version 5 | VERSION 6 | end 7 | end 8 | -------------------------------------------------------------------------------- /samples/neural_networks/iris.rb: -------------------------------------------------------------------------------- 1 | require "bundler/setup" 2 | require "tensor_stream" 3 | # require 'tensor_stream/opencl' 4 | 5 | # This neural network will predict the species of an iris based on sepal and petal size 6 | # Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set 7 | tf = TensorStream 8 | rows = File.readlines("samples/datasets/iris.data").map {|l| l.chomp.split(",") } 9 | 10 | rows.shuffle! 11 | 12 | label_encodings = { 13 | "Iris-setosa" => [1, 0, 0], 14 | "Iris-versicolor" => [0, 1, 0], 15 | "Iris-virginica" => [0, 0, 1], 16 | } 17 | 18 | x_data = rows.map {|row| row[0, 4].map(&:to_f) } 19 | y_data = rows.map {|row| label_encodings[row[4]] } 20 | 21 | # Normalize data values before feeding into network 22 | normalize = ->(val, high, low) { (val - low) / (high - low) } # maps input to float between 0 and 1 23 | 24 | columns = (0..3).map { |i| 25 | x_data.map {|row| row[i] } 26 | } 27 | 28 | x_data.map! do |row| 29 | row.map.with_index do |val, j| 30 | max, min = columns[j].max, columns[j].min 31 | normalize.call(val, max, min) 32 | end 33 | end 34 | 35 | x_train = x_data.slice(0, 100) 36 | y_train = y_data.slice(0, 100) 37 | 38 | x_test = x_data.slice(100, 50) 39 | y_test = y_data.slice(100, 50) 40 | 41 | test_cases = [] 42 | x_train.each_with_index do |x, index| 43 | test_cases << [x, y_train[index]] 44 | end 45 | 46 | validation_cases = [] 47 | x_test.each_with_index do |x, index| 48 | validation_cases << [x, y_test[index]] 49 | end 50 | 51 | def init_weights(shape) 52 | # Weight initialization 53 | TensorStream.random_normal(shape, stddev: 0.1).var 54 | end 55 | 56 | def forwardprop(x, w_1, w_2) 57 | # Forward-propagation. 58 | # IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally. 59 | h = TensorStream.nn.sigmoid(x.matmul(w_1)) # The \sigma function 60 | h.matmul w_2 # The \varphi function 61 | end 62 | 63 | x_size = x_train[0].size 64 | y_size = y_train[0].size 65 | h_size = 256 66 | X = tf.placeholder(:float32, shape: [nil, x_size]) 67 | y = tf.placeholder(:float32, shape: [nil, y_size]) 68 | 69 | # Weight initializations 70 | w_1 = init_weights([x_size, h_size]) 71 | w_2 = init_weights([h_size, y_size]) 72 | 73 | # Forward propagation 74 | yhat = forwardprop(X, w_1, w_2) 75 | predict = tf.argmax(yhat, 1) 76 | 77 | # Backward propagation 78 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: yhat)) 79 | 80 | # updates = TensorStream::Train::GradientDescentOptimizer.new(0.01).minimize(cost) 81 | # updates = TensorStream::Train::MomentumOptimizer.new(0.01, 0.5, use_nesterov: true).minimize(cost) 82 | updates = TensorStream::Train::RMSPropOptimizer.new(0.01).minimize(cost) 83 | 84 | # Run SGD 85 | sess = tf.session 86 | init = tf.global_variables_initializer 87 | sess.run(init) 88 | loss = sess.run(cost, feed_dict: {X => x_test, y => y_test}) 89 | puts "loss test data set #{loss}" 90 | loss = sess.run(cost, feed_dict: {X => x_train, y => y_train}) 91 | puts "Testing the untrained network..." 92 | puts loss 93 | start_time = Time.now 94 | (0..100).each do |epoch| 95 | x_train.size.times do |i| 96 | sess.run(updates, feed_dict: {X => [x_train[i]], y => [y_train[i]]}) 97 | end 98 | 99 | loss = sess.run(cost, feed_dict: {X => x_train, y => y_train}) 100 | puts "epoch: #{epoch}, loss #{loss}" 101 | end 102 | 103 | loss = sess.run(cost, feed_dict: {X => x_train, y => y_train}) 104 | puts "loss after training #{loss}" 105 | loss = sess.run(cost, feed_dict: {X => x_test, y => y_test}) 106 | puts "loss test data set #{loss}" 107 | puts("time elapsed ", Time.now.to_i - start_time.to_i) 108 | -------------------------------------------------------------------------------- /samples/neural_networks/mnist_data.rb: -------------------------------------------------------------------------------- 1 | # A ruby port of the example code discussed by Martin Gorner in 2 | # "TensorFlow and Deep Learning without a PhD, Part 1 (Google Cloud Next '17)"" 3 | # 4 | # https://www.youtube.com/watch?v=u4alGiomYP4 5 | # 6 | # Requirements: 7 | # mnist-learn gem 8 | # opencl_ruby_ffi gem 9 | require "bundler/setup" 10 | require "tensor_stream" 11 | require "mnist-learn" 12 | 13 | # Enable OpenCL hardware accelerated computation, not using OpenCL can be very slow 14 | # gem install tensor_stream-opencl 15 | require 'tensor_stream/opencl' 16 | 17 | tf = TensorStream 18 | 19 | # Import MNIST data 20 | puts "downloading minst data" 21 | mnist = Mnist.read_data_sets("/tmp/data", one_hot: true) 22 | puts "downloading finished" 23 | 24 | x = Float.placeholder shape: [nil, 784] 25 | w = tf.zeros([784, 10]).var 26 | b = tf.zeros([10]).var 27 | 28 | # model 29 | y = tf.nn.softmax(x.reshape([-1, 784]).matmul(w) + b) 30 | 31 | y_ = Float.placeholder shape: [nil, 10] 32 | 33 | # loss function 34 | cross_entropy = -(y_ * y.log).reduce 35 | 36 | is_correct = tf.argmax(y, 1) == tf.argmax(y_, 1) 37 | accuracy = is_correct.cast.reduce :mean 38 | 39 | optimizer = TensorStream::Train::AdamOptimizer.new 40 | train_step = optimizer.minimize(cross_entropy) 41 | 42 | sess = tf.session 43 | init = tf.global_variables_initializer 44 | sess.run(init) 45 | 46 | (0...1000).each do |i| 47 | # load batch of images and correct answers 48 | batch_x, batch_y = mnist.train.next_batch(100) 49 | train_data = {x => batch_x, y_ => batch_y} 50 | 51 | # train 52 | sess.run(train_step, feed_dict: train_data) 53 | if i % 10 == 0 54 | # success? add code to print it 55 | a, c = sess.run([accuracy, cross_entropy], feed_dict: train_data) 56 | puts "#{i} train accuracy #{a}, error #{c}" 57 | 58 | # success on test data? 59 | test_data = {x => mnist.test.images, y_ => mnist.test.labels} 60 | a, c = sess.run([accuracy, cross_entropy], feed_dict: test_data) 61 | puts " test accuracy #{a}, error #{c}" 62 | end 63 | end 64 | -------------------------------------------------------------------------------- /samples/neural_networks/rnn.rb: -------------------------------------------------------------------------------- 1 | # RNN sample 2 | # 3 | # Ruby port Example based on article by Erik Hallström 4 | # https://medium.com/@erikhallstrm/hello-world-rnn-83cd7105b767 5 | # 6 | # 7 | 8 | require "bundler/setup" 9 | require "tensor_stream" 10 | # require 'tensor_stream/opencl' 11 | # require 'pry-byebug' 12 | 13 | tf = TensorStream 14 | 15 | num_epochs = 100 16 | total_series_length = 50000 17 | truncated_backprop_length = 15 18 | state_size = 4 19 | num_classes = 2 20 | echo_step = 3 21 | batch_size = 5 22 | num_batches = total_series_length / batch_size / truncated_backprop_length 23 | randomizer = TensorStream.random_uniform([total_series_length], minval: 0, maxval: 2) 24 | 25 | def generate_data(randomizer, total_series_length, batch_size, echo_step) 26 | x = randomizer.eval 27 | y = x.rotate(-echo_step) 28 | 29 | y[echo_step] = 0 30 | 31 | x = TensorStream::TensorShape.reshape(x, [batch_size, -1]) # The first index changing slowest, subseries as rows 32 | y = TensorStream::TensorShape.reshape(y, [batch_size, -1]) 33 | [x, y] 34 | end 35 | 36 | batchX_placeholder = tf.placeholder(:float32, shape: [batch_size, truncated_backprop_length], name: "batch_x") 37 | batchY_placeholder = tf.placeholder(:int32, shape: [batch_size, truncated_backprop_length], name: "batch_y") 38 | 39 | init_state = tf.placeholder(:float32, shape: [batch_size, state_size], name: "init_state") 40 | 41 | W = tf.variable(tf.random_uniform([state_size + 1, state_size]), dtype: :float32, name: "W") 42 | b = tf.variable(tf.zeros([state_size]), dtype: :float32, name: "b") 43 | 44 | W2 = tf.variable(tf.random_uniform([state_size, num_classes]), dtype: :float32, name: "W2") 45 | b2 = tf.variable(tf.zeros([num_classes]), dtype: :float32, name: "b2") 46 | 47 | inputs_series = tf.unpack(batchX_placeholder, axis: 1) 48 | labels_series = tf.unpack(batchY_placeholder, axis: 1) 49 | 50 | current_state = init_state 51 | states_series = [] 52 | 53 | inputs_series.each do |current_input| 54 | current_input = tf.reshape(current_input, [batch_size, 1]) 55 | input_and_state_concatenated = tf.concat([current_input, current_state], 1) # Increasing number of columns 56 | next_state = tf.tanh(tf.matmul(input_and_state_concatenated, W) + b) # Broadcasted addition 57 | states_series << next_state 58 | current_state = next_state 59 | end 60 | 61 | logits_series = states_series.collect { |state| 62 | tf.matmul(state, W2) + b2 63 | } 64 | 65 | predictions_series = logits_series.collect { |logits| 66 | tf.nn.softmax(logits) 67 | } 68 | 69 | losses = logits_series.zip(labels_series).collect { |logits, labels| 70 | tf.nn.sparse_softmax_cross_entropy_with_logits(logits: logits, labels: labels) 71 | } 72 | total_loss = tf.reduce_mean(losses) 73 | 74 | train_step = TensorStream::Train::AdagradOptimizer.new(0.1).minimize(total_loss) 75 | 76 | puts "#{tf.get_default_graph.nodes.keys.size} nodes created" 77 | zeros_state = tf.zeros([batch_size, state_size]).eval 78 | tf.session do |sess| 79 | sess.run(tf.global_variables_initializer) 80 | (0..num_epochs).each do |epoch_idx| 81 | x, y = generate_data(randomizer, total_series_length, batch_size, echo_step) 82 | _current_state = zeros_state 83 | print("New data, epoch", epoch_idx, "\n") 84 | (0..num_batches - 1).each do |batch_idx| 85 | start_idx = batch_idx * truncated_backprop_length 86 | end_idx = start_idx + truncated_backprop_length 87 | 88 | batchX = x.map { |x| x[start_idx...end_idx] } 89 | batchY = y.map { |y| y[start_idx...end_idx] } 90 | 91 | _total_loss, _train_step, _current_state, _predictions_series = sess.run( 92 | [total_loss, train_step, current_state, predictions_series], 93 | feed_dict: { 94 | batchX_placeholder => batchX, 95 | batchY_placeholder => batchY, 96 | init_state => _current_state, 97 | } 98 | ) 99 | 100 | if batch_idx % 10 == 0 101 | print("Step", batch_idx, " Loss ", _total_loss, "\n") 102 | end 103 | end 104 | end 105 | end 106 | -------------------------------------------------------------------------------- /samples/others/nearest_neighbor.rb: -------------------------------------------------------------------------------- 1 | ""' 2 | A nearest neighbor learning algorithm example using TensorFlow library. 3 | This example is using the MNIST database of handwritten digits 4 | (http://yann.lecun.com/exdb/mnist/) 5 | 6 | Author: Aymeric Damien 7 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 8 | 9 | Make sure to install the mnist-learn gem !! 10 | '"" 11 | require "bundler/setup" 12 | require "tensor_stream" 13 | require "mnist-learn" 14 | 15 | tf = TensorStream 16 | 17 | # Import MNIST data 18 | mnist = Mnist.read_data_sets("/tmp/data", one_hot: true) 19 | 20 | # In this example, we limit mnist data 21 | Xtr, Ytr = mnist.train.next_batch(5000) # 5000 for training (nn candidates) 22 | Xte, Yte = mnist.test.next_batch(200) # 200 for testing 23 | 24 | # tf Graph Input 25 | xtr = tf.placeholder(:float, shape: [nil, 784]) 26 | xte = tf.placeholder(:float, shape: [784]) 27 | 28 | # Nearest Neighbor calculation using L1 Distance 29 | # Calculate L1 Distance 30 | distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))), 1) 31 | # Prediction: Get min distance index (Nearest neighbor) 32 | pred = tf.argmin(distance, 0) 33 | 34 | accuracy = 0.0 35 | 36 | # Initialize the variables (i.e. assign their default value) 37 | init = tf.global_variables_initializer 38 | 39 | # Start training 40 | tf.session do |sess| 41 | # Run the initializer 42 | sess.run(init) 43 | Xte.size.times do |i| 44 | # Get nearest neighbor 45 | nn_index = sess.run(pred, feed_dict: {xtr => Xtr, xte => Xte[i]}) 46 | print("Test ", i, "Prediction: ", Ytr[nn_index].max, \ 47 | "True Class: ", Yte[i].max, "\n") 48 | if Ytr[nn_index].max == Yte[i].max 49 | accuracy += 1.0 / Xte.size 50 | end 51 | end 52 | 53 | print("Done!") 54 | print("Accuracy:", accuracy) 55 | end 56 | -------------------------------------------------------------------------------- /samples/regression/linear_regression.rb: -------------------------------------------------------------------------------- 1 | require "tensor_stream" 2 | 3 | tf = TensorStream 4 | 5 | learning_rate = 0.01 6 | training_epochs = 1000 7 | display_step = 50 8 | 9 | train_x = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 10 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1,] 11 | 12 | train_y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 13 | 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3,] 14 | 15 | n_samples = train_x.size 16 | 17 | x_value = Float.placeholder 18 | y_value = Float.placeholder 19 | 20 | # Set model weights 21 | weight = rand.t.var name: "weight" 22 | 23 | bias = rand.t.var name: "bias" 24 | 25 | # Construct a linear model 26 | pred = x_value * weight + bias 27 | 28 | # Mean squared error 29 | cost = ((pred - y_value)**2).reduce / (2 * n_samples) 30 | 31 | # Other optimizers -- 32 | # 33 | # optimizer = TensorStream::Train::MomentumOptimizer.new(learning_rate, momentum, use_nesterov: true).minimize(cost) 34 | # optimizer = TensorStream::Train::AdamOptimizer.new(learning_rate).minimize(cost) 35 | # optimizer = TensorStream::Train::AdadeltaOptimizer.new(1.0).minimize(cost) 36 | # optimizer = TensorStream::Train::AdagradOptimizer.new(0.01).minimize(cost) 37 | # optimizer = TensorStream::Train::RMSPropOptimizer.new(0.01, centered: true).minimize(cost) 38 | optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost) 39 | 40 | # Initialize the variables (i.e. assign their default value) 41 | init = tf.global_variables_initializer 42 | 43 | tf.session do |sess| 44 | start_time = Time.now 45 | sess.run(init) 46 | 47 | (0..training_epochs).each do |epoch| 48 | train_x.zip(train_y).each do |x, y| 49 | sess.run(optimizer, feed_dict: {x_value => x, y_value => y}) 50 | end 51 | 52 | if (epoch + 1) % display_step == 0 53 | c = sess.run(cost, feed_dict: {x_value => train_x, y_value => train_y}) 54 | puts("Epoch:", "%04d" % (epoch + 1), "cost=", c, \ 55 | "W=", sess.run(weight), "b=", sess.run(bias)) 56 | end 57 | end 58 | 59 | puts "Optimization Finished!" 60 | training_cost = sess.run(cost, feed_dict: {x_value => train_x, y_value => train_y}) 61 | puts "Training cost=", training_cost, "W=", sess.run(weight), "b=", sess.run(bias), '\n' 62 | puts "time elapsed ", Time.now.to_i - start_time.to_i 63 | end 64 | -------------------------------------------------------------------------------- /samples/regression/logistic_regression.rb: -------------------------------------------------------------------------------- 1 | # Model based on https://www.kaggle.com/autuanliuyc/logistic-regression-with-tensorflow 2 | 3 | require "bundler/setup" 4 | require "tensor_stream" 5 | 6 | tf = TensorStream 7 | 8 | rows = File.readlines(File.join("samples", "datasets", "iris.data")).map {|l| l.chomp.split(",") } 9 | 10 | iris = rows[0...100].shuffle! 11 | 12 | transformed_data = iris.collect { |row| 13 | row[0, 4].map(&:to_f) 14 | } 15 | 16 | columns = (0..3).map { |i| 17 | transformed_data.map { |row| row[i] } 18 | } 19 | 20 | # Normalize data values before feeding into network 21 | normalize = ->(val, high, low) { (val - low) / (high - low) } # maps input to float between 0 and 1 22 | 23 | transformed_data.map! do |row| 24 | row.map.with_index do |val, j| 25 | max, min = columns[j].max, columns[j].min 26 | normalize.call(val, max, min) 27 | end 28 | end 29 | 30 | srand(5) 31 | seed = 5 32 | tf.set_random_seed(seed) 33 | 34 | train_x = transformed_data[0..50].map { |x| x[0..3].map(&:to_f) } 35 | train_y = iris[0..50].map { |x| x[4] == "Iris-setosa" ? 0.0 : 1.0 } 36 | 37 | test_x = transformed_data[51..100].map { |x| x[0..3].map(&:to_f) } 38 | test_y = iris[51..100].map { |x| x[4] == "Iris-setosa" ? 0.0 : 1.0 } 39 | 40 | A = tf.random_normal([4, 1]).var 41 | b = tf.random_normal([1, 1]).var 42 | 43 | init = tf.global_variables_initializer 44 | sess = tf.session 45 | sess.run(init) 46 | 47 | data = Float.placeholder shape: [nil, 4] 48 | target = Float.placeholder shape: [nil, 1] 49 | 50 | mod = data.matmul(A) + b 51 | 52 | loss = tf.nn.sigmoid_cross_entropy_with_logits(logits: mod, labels: target).reduce :mean 53 | 54 | learning_rate = 0.003 55 | batch_size = 30 56 | iter_num = 1500 57 | 58 | optimizer = TensorStream::Train::GradientDescentOptimizer.new(learning_rate) 59 | goal = optimizer.minimize(loss) 60 | prediction = tf.sigmoid(mod).round 61 | 62 | # Bool into float32 type 63 | correct = (prediction == target).cast 64 | 65 | # Average 66 | accuracy = correct.reduce :mean 67 | 68 | loss_trace = [] 69 | train_acc = [] 70 | test_acc = [] 71 | 72 | (0..iter_num).each do |epoch| 73 | batch_train_X = train_x 74 | batch_train_y = [train_y].transpose 75 | sess.run(goal, feed_dict: {data => batch_train_X, target => batch_train_y}) 76 | 77 | if epoch % 50 == 0 78 | temp_loss = sess.run(loss, feed_dict: {data => batch_train_X, target => batch_train_y}) 79 | temp_train_acc = sess.run(accuracy, feed_dict: {data => batch_train_X, target => batch_train_y}) 80 | temp_test_acc = sess.run(accuracy, feed_dict: {data => test_x, target => [test_y].transpose}) 81 | puts "epoch #{epoch}, loss #{temp_loss} train acc: #{temp_train_acc}, test acc: #{temp_test_acc}" 82 | end 83 | end 84 | -------------------------------------------------------------------------------- /spec/fixtures/0_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/0_image.png -------------------------------------------------------------------------------- /spec/fixtures/1_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/1_image.png -------------------------------------------------------------------------------- /spec/fixtures/compare.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/compare.png -------------------------------------------------------------------------------- /spec/fixtures/expected_grad_2.json: -------------------------------------------------------------------------------- 1 | [[[[17854.0, 17854.0, 17854.0, 17854.0]], [[17854.0, 17854.0, 17854.0, 17854.0]], [[17854.0, 17854.0, 17854.0, 17854.0]], [[17854.0, 17854.0, 17854.0, 17854.0]], [[17854.0, 17854.0, 17854.0, 17854.0]]], [[[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]]], [[[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]]], [[[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]]], [[[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]], [[18454.0, 18454.0, 18454.0, 18454.0]]]] -------------------------------------------------------------------------------- /spec/fixtures/grayscale_image.json: -------------------------------------------------------------------------------- 1 | [[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[84],[185],[159],[151],[60],[36],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[222],[254],[254],[254],[254],[241],[198],[198],[198],[198],[198],[198],[198],[198],[170],[52],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[67],[114],[72],[114],[163],[227],[254],[225],[254],[254],[254],[250],[229],[254],[254],[140],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[17],[66],[14],[67],[67],[67],[59],[21],[236],[254],[106],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[83],[253],[209],[18],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[22],[233],[255],[83],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[129],[254],[238],[44],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[59],[249],[254],[62],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[133],[254],[187],[5],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[9],[205],[248],[58],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[126],[254],[182],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[75],[251],[240],[57],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[19],[221],[254],[166],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[3],[203],[254],[219],[35],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[38],[254],[254],[77],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[31],[224],[254],[115],[1],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[133],[254],[254],[52],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[61],[242],[254],[254],[52],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[121],[254],[254],[219],[40],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[121],[254],[207],[18],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],[[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]]] -------------------------------------------------------------------------------- /spec/fixtures/lg_model.ckpt: -------------------------------------------------------------------------------- 1 | --- 2 | variables: 3 | weight: 4 | shape: [] 5 | data: eJwz/t1gBwAE/wHt 6 | bias: 7 | shape: [] 8 | data: eJzb/9TDHgAGfgIs 9 | global_step: 10 | -------------------------------------------------------------------------------- /spec/fixtures/lg_model/model-.ckpt: -------------------------------------------------------------------------------- 1 | --- 2 | variables: 3 | weight: 4 | shape: [] 5 | data: eJyrmlpvBwAE5wHN 6 | bias: 7 | shape: [] 8 | data: eJx7rONrDwAE7QGc 9 | global_step: 10 | -------------------------------------------------------------------------------- /spec/fixtures/lg_model/model.meta: -------------------------------------------------------------------------------- 1 | {"gs":null} -------------------------------------------------------------------------------- /spec/fixtures/linear_regression.pb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/linear_regression.pb -------------------------------------------------------------------------------- /spec/fixtures/matmul_graph.pbtxt: -------------------------------------------------------------------------------- 1 | node { 2 | name: "c" 3 | op: "Const" 4 | attr { 5 | key: "T" 6 | value { 7 | type: DT_FLOAT 8 | } 9 | } 10 | attr { 11 | key: "value" 12 | value { 13 | tensor { 14 | dtype: DT_FLOAT 15 | tensor_shape { 16 | } 17 | float_val: 5.0 18 | } 19 | } 20 | } 21 | } 22 | node { 23 | name: "c_1" 24 | op: "Const" 25 | attr { 26 | key: "T" 27 | value { 28 | type: DT_FLOAT 29 | } 30 | } 31 | attr { 32 | key: "value" 33 | value { 34 | tensor { 35 | dtype: DT_FLOAT 36 | tensor_shape { 37 | } 38 | float_val: 6.0 39 | } 40 | } 41 | } 42 | } 43 | node { 44 | name: "nested/c" 45 | op: "Const" 46 | attr { 47 | key: "T" 48 | value { 49 | type: DT_FLOAT 50 | } 51 | } 52 | attr { 53 | key: "value" 54 | value { 55 | tensor { 56 | dtype: DT_FLOAT 57 | tensor_shape { 58 | } 59 | float_val: 10.0 60 | } 61 | } 62 | } 63 | } 64 | node { 65 | name: "nested/inner/c" 66 | op: "Const" 67 | attr { 68 | key: "T" 69 | value { 70 | type: DT_FLOAT 71 | } 72 | } 73 | attr { 74 | key: "value" 75 | value { 76 | tensor { 77 | dtype: DT_FLOAT 78 | tensor_shape { 79 | } 80 | float_val: 20.0 81 | } 82 | } 83 | } 84 | } 85 | node { 86 | name: "nested/d" 87 | op: "Const" 88 | attr { 89 | key: "T" 90 | value { 91 | type: DT_FLOAT 92 | } 93 | } 94 | attr { 95 | key: "value" 96 | value { 97 | tensor { 98 | dtype: DT_FLOAT 99 | tensor_shape { 100 | } 101 | float_val: 10.0 102 | } 103 | } 104 | } 105 | } 106 | node { 107 | name: "Const" 108 | op: "Const" 109 | attr { 110 | key: "T" 111 | value { 112 | type: DT_FLOAT 113 | } 114 | } 115 | attr { 116 | key: "value" 117 | value { 118 | tensor { 119 | dtype: DT_FLOAT 120 | tensor_shape { 121 | dim { 122 | size: 2 123 | } 124 | dim { 125 | size: 2 126 | } 127 | } 128 | tensor_content: "\000\000\200?\000\000\000@\315\314\014@\000\000@@" 129 | } 130 | } 131 | } 132 | } 133 | node { 134 | name: "Const_1" 135 | op: "Const" 136 | attr { 137 | key: "T" 138 | value { 139 | type: DT_FLOAT 140 | } 141 | } 142 | attr { 143 | key: "value" 144 | value { 145 | tensor { 146 | dtype: DT_FLOAT 147 | tensor_shape { 148 | dim { 149 | size: 2 150 | } 151 | dim { 152 | size: 2 153 | } 154 | } 155 | tensor_content: "\000\000\300?\315\314\014@\315\314\014@\000\000@@" 156 | } 157 | } 158 | } 159 | } 160 | node { 161 | name: "Const_2" 162 | op: "Const" 163 | attr { 164 | key: "T" 165 | value { 166 | type: DT_FLOAT 167 | } 168 | } 169 | attr { 170 | key: "value" 171 | value { 172 | tensor { 173 | dtype: DT_FLOAT 174 | tensor_shape { 175 | } 176 | float_val: 1.0 177 | } 178 | } 179 | } 180 | } 181 | node { 182 | name: "matmul:0" 183 | op: "MatMul" 184 | input: "Const" 185 | input: "Const_1" 186 | attr { 187 | key: "T" 188 | value { 189 | type: DT_FLOAT 190 | } 191 | } 192 | } 193 | node { 194 | name: "add_1:0" 195 | op: "Add" 196 | input: "matmul:0" 197 | input: "Const_2" 198 | attr { 199 | key: "T" 200 | value { 201 | type: DT_FLOAT 202 | } 203 | } 204 | } 205 | node { 206 | name: "tanh_2:0" 207 | op: "Tanh" 208 | input: "add_1:0" 209 | attr { 210 | key: "T" 211 | value { 212 | type: DT_FLOAT 213 | } 214 | } 215 | } 216 | versions { 217 | producer: 26 218 | } -------------------------------------------------------------------------------- /spec/fixtures/neural_network_raw.py: -------------------------------------------------------------------------------- 1 | """ Neural Network. 2 | 3 | A 2-Hidden Layers Fully Connected Neural Network (a.k.a Multilayer Perceptron) 4 | implementation with TensorFlow. This example is using the MNIST database 5 | of handwritten digits (http://yann.lecun.com/exdb/mnist/). 6 | 7 | Links: 8 | [MNIST Dataset](http://yann.lecun.com/exdb/mnist/). 9 | 10 | Author: Aymeric Damien 11 | Project: https://github.com/aymericdamien/TensorFlow-Examples/ 12 | """ 13 | 14 | from __future__ import print_function 15 | 16 | # Import MNIST data 17 | from tensorflow.examples.tutorials.mnist import input_data 18 | mnist = input_data.read_data_sets("/tmp/data/", one_hot=True) 19 | 20 | import tensorflow as tf 21 | 22 | # Parameters 23 | learning_rate = 0.1 24 | num_steps = 500 25 | batch_size = 128 26 | display_step = 100 27 | 28 | # Network Parameters 29 | n_hidden_1 = 256 # 1st layer number of neurons 30 | n_hidden_2 = 256 # 2nd layer number of neurons 31 | num_input = 784 # MNIST data input (img shape: 28*28) 32 | num_classes = 10 # MNIST total classes (0-9 digits) 33 | 34 | # tf Graph input 35 | X = tf.placeholder("float", [None, num_input]) 36 | Y = tf.placeholder("float", [None, num_classes]) 37 | 38 | # Store layers weight & bias 39 | weights = { 40 | 'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])), 41 | 'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])), 42 | 'out': tf.Variable(tf.random_normal([n_hidden_2, num_classes])) 43 | } 44 | biases = { 45 | 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 46 | 'b2': tf.Variable(tf.random_normal([n_hidden_2])), 47 | 'out': tf.Variable(tf.random_normal([num_classes])) 48 | } 49 | 50 | 51 | # Create model 52 | def neural_net(x): 53 | # Hidden fully connected layer with 256 neurons 54 | layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) 55 | # Hidden fully connected layer with 256 neurons 56 | layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2']) 57 | # Output fully connected layer with a neuron for each class 58 | out_layer = tf.matmul(layer_2, weights['out']) + biases['out'] 59 | return out_layer 60 | 61 | # Construct model 62 | logits = neural_net(X) 63 | prediction = tf.nn.softmax(logits) 64 | 65 | # Define loss and optimizer 66 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( 67 | logits=logits, labels=Y)) 68 | optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) 69 | train_op = optimizer.minimize(loss_op) 70 | 71 | # Evaluate model 72 | correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1)) 73 | accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 74 | 75 | # Initialize the variables (i.e. assign their default value) 76 | init = tf.global_variables_initializer() 77 | 78 | # Start training 79 | with tf.Session() as sess: 80 | 81 | # Run the initializer 82 | sess.run(init) 83 | 84 | for step in range(1, num_steps+1): 85 | batch_x, batch_y = mnist.train.next_batch(batch_size) 86 | # Run optimization op (backprop) 87 | sess.run(train_op, feed_dict={X: batch_x, Y: batch_y}) 88 | if step % display_step == 0 or step == 1: 89 | # Calculate batch loss and accuracy 90 | loss, acc = sess.run([loss_op, accuracy], feed_dict={X: batch_x, 91 | Y: batch_y}) 92 | print("Step " + str(step) + ", Minibatch Loss= " + \ 93 | "{:.4f}".format(loss) + ", Training Accuracy= " + \ 94 | "{:.3f}".format(acc)) 95 | 96 | print("Optimization Finished!") 97 | 98 | # Calculate accuracy for MNIST test images 99 | print("Testing Accuracy:", \ 100 | sess.run(accuracy, feed_dict={X: mnist.test.images, 101 | Y: mnist.test.labels})) 102 | -------------------------------------------------------------------------------- /spec/fixtures/ruby_16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/ruby_16.png -------------------------------------------------------------------------------- /spec/fixtures/ruby_512.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/ruby_512.png -------------------------------------------------------------------------------- /spec/fixtures/sample_jpeg.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/sample_jpeg.jpg -------------------------------------------------------------------------------- /spec/fixtures/sample_jpeg_bw.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jedld/tensor_stream/f0566610267f7bee3d5e618d441dc5d04e31d794/spec/fixtures/sample_jpeg_bw.jpg -------------------------------------------------------------------------------- /spec/fixtures/tensorflow.proto: -------------------------------------------------------------------------------- 1 | node { 2 | name: "Const" 3 | op: "Const" 4 | attr { 5 | key: "dtype" 6 | value { 7 | type: DT_FLOAT 8 | } 9 | } 10 | attr { 11 | key: "value" 12 | value { 13 | tensor { 14 | dtype: DT_FLOAT 15 | tensor_shape { 16 | dim { 17 | size: 2 18 | } 19 | dim { 20 | size: 4 21 | } 22 | } 23 | tensor_content: "\000\000\200?\315\314\214?\315\314\014@33S@\315\314\214?\315\314\014@33S@\000\000\200@" 24 | } 25 | } 26 | } 27 | } 28 | node { 29 | name: "Const_1" 30 | op: "Const" 31 | attr { 32 | key: "dtype" 33 | value { 34 | type: DT_INT32 35 | } 36 | } 37 | attr { 38 | key: "value" 39 | value { 40 | tensor { 41 | dtype: DT_INT32 42 | tensor_shape { 43 | } 44 | int_val: 2 45 | } 46 | } 47 | } 48 | } 49 | node { 50 | name: "v1/initial_value" 51 | op: "Const" 52 | attr { 53 | key: "dtype" 54 | value { 55 | type: DT_FLOAT 56 | } 57 | } 58 | attr { 59 | key: "value" 60 | value { 61 | tensor { 62 | dtype: DT_FLOAT 63 | tensor_shape { 64 | } 65 | float_val: 1.0 66 | } 67 | } 68 | } 69 | } 70 | node { 71 | name: "v1" 72 | op: "VariableV2" 73 | attr { 74 | key: "container" 75 | value { 76 | s: "" 77 | } 78 | } 79 | attr { 80 | key: "dtype" 81 | value { 82 | type: DT_FLOAT 83 | } 84 | } 85 | attr { 86 | key: "shape" 87 | value { 88 | shape { 89 | } 90 | } 91 | } 92 | attr { 93 | key: "shared_name" 94 | value { 95 | s: "" 96 | } 97 | } 98 | } 99 | node { 100 | name: "v1/Assign" 101 | op: "Assign" 102 | input: "v1" 103 | input: "v1/initial_value" 104 | attr { 105 | key: "T" 106 | value { 107 | type: DT_FLOAT 108 | } 109 | } 110 | attr { 111 | key: "_class" 112 | value { 113 | list { 114 | s: "loc:@v1" 115 | } 116 | } 117 | } 118 | attr { 119 | key: "use_locking" 120 | value { 121 | b: true 122 | } 123 | } 124 | attr { 125 | key: "validate_shape" 126 | value { 127 | b: true 128 | } 129 | } 130 | } 131 | node { 132 | name: "v1/read" 133 | op: "Identity" 134 | input: "v1" 135 | attr { 136 | key: "T" 137 | value { 138 | type: DT_FLOAT 139 | } 140 | } 141 | attr { 142 | key: "_class" 143 | value { 144 | list { 145 | s: "loc:@v1" 146 | } 147 | } 148 | } 149 | } 150 | node { 151 | name: "Const_2" 152 | op: "Const" 153 | attr { 154 | key: "dtype" 155 | value { 156 | type: DT_FLOAT 157 | } 158 | } 159 | attr { 160 | key: "value" 161 | value { 162 | tensor { 163 | dtype: DT_FLOAT 164 | tensor_shape { 165 | } 166 | float_val: 2.0 167 | } 168 | } 169 | } 170 | } 171 | node { 172 | name: "mul_1" 173 | op: "Mul" 174 | input: "Const" 175 | input: "Const_2" 176 | attr { 177 | key: "T" 178 | value { 179 | type: DT_FLOAT 180 | } 181 | } 182 | } 183 | node { 184 | name: "add" 185 | op: "Add" 186 | input: "mul_1" 187 | input: "v1/read" 188 | attr { 189 | key: "T" 190 | value { 191 | type: DT_FLOAT 192 | } 193 | } 194 | } 195 | versions { 196 | producer: 26 197 | } -------------------------------------------------------------------------------- /spec/fixtures/test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - :op: const 3 | :name: Const 4 | :inputs: [] 5 | :attrs: 6 | value: !binary |- 7 | AACAP83MjD/NzAxAMzNTQM3MjD/NzAxAMzNTQAAAgEA= 8 | data_type: :float32 9 | shape: 10 | - 2 11 | - 4 12 | - :op: const 13 | :name: Const_1 14 | :inputs: [] 15 | :attrs: 16 | value: 2.0 17 | data_type: :float32 18 | shape: [] 19 | - :op: const 20 | :name: Const_2 21 | :inputs: [] 22 | :attrs: 23 | value: 1.0 24 | data_type: :float32 25 | shape: [] 26 | - :op: variable_v2 27 | :name: v1 28 | :inputs: [] 29 | :attrs: 30 | container: 31 | :name: v1 32 | :options: {} 33 | :shape: [] 34 | data_type: :float32 35 | - :op: assign 36 | :name: assign 37 | :inputs: 38 | - v1 39 | - Const_2 40 | :attrs: {} 41 | - :op: const 42 | :name: Const_3 43 | :inputs: [] 44 | :attrs: 45 | value: "\x01\0\0\0\x02\0\0\0\x03\0\0\0\x04\0\0\0" 46 | data_type: :int32 47 | shape: 48 | - 4 49 | - :op: const 50 | :name: Const_4 51 | :inputs: [] 52 | :attrs: 53 | value: Hello World 54 | data_type: :string 55 | shape: [] 56 | - :op: placeholder 57 | :name: Placeholder 58 | :inputs: [] 59 | :attrs: 60 | data_type: :float32 61 | shape: 62 | - :op: const 63 | :name: Const_5 64 | :inputs: [] 65 | :attrs: 66 | value: | 67 | --- 68 | - h 69 | - e 70 | - l 71 | - l 72 | - o 73 | data_type: :string 74 | shape: 75 | - 5 76 | - :op: mul 77 | :name: mul 78 | :inputs: 79 | - Const 80 | - Const_1 81 | :attrs: {} 82 | - :op: add 83 | :name: add 84 | :inputs: 85 | - mul 86 | - v1 87 | :attrs: {} 88 | - :op: const 89 | :name: Const_6 90 | :inputs: [] 91 | :attrs: 92 | value: "\0\0\0\0" 93 | data_type: :int32 94 | shape: 95 | - 1 96 | - :op: sum 97 | :name: sum 98 | :inputs: 99 | - Const 100 | - Const_6 101 | :attrs: 102 | keepdims: false 103 | -------------------------------------------------------------------------------- /spec/fixtures/test_samples/test.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | test_inputs = [ 4 | [0.5937, 0.2343, 1.4332, 0.4395], 5 | [-1.0227, -0.6915, 1.2367, 0.3452], 6 | [-0.5675, 1.0374, 1.0429, 0.8839], 7 | [-0.1066, -0.0469, -1.6317, -1.4836], 8 | [0.7835, -3.0105, 1.713, -0.4536], 9 | [-0.3076, 1.3662, -0.6537, 0.0905], 10 | [-0.2459, 0.2243, -2.7048, 0.848], 11 | ] 12 | 13 | num_inputs = 4 14 | num_neurons = 5 15 | inputs = tf.placeholder("float", shape=(None, num_inputs)) 16 | biases = tf.constant([0.5012, 1.302, -1.6217, 0.669, 0.1494], name='b1') 17 | biases2 = tf.constant([0.2012, 1.102, -1.5217, 0.469, 0.0494], name='b2') 18 | 19 | weights = tf.constant([ 20 | [-0.9135, 1.0376, 0.8537, 0.4376, 1.3255], 21 | [-0.5921, -1.4081, 1.0614, -0.5283, 1.1832], 22 | [0.7285, -0.7844, 0.1793, -0.5275, -0.4426], 23 | [-1.4976, 0.4433, 2.2317, -2.0479, 0.7791]], name='w') 24 | 25 | weights_layer2 = tf.constant([ 26 | [-1.0465, -0.8766, 1.6849, -0.6625, 0.7928], 27 | [2.0412, 1.3564, 0.7905, 0.6434, -2.5495], 28 | [2.4276, -0.6893, -1.5917, 0.0911, 0.9112], 29 | [-0.012, 0.0794, 1.3829, -1.018, -0.9328], 30 | [0.061, 0.9791, -2.1727, -0.9553, -1.434]], name='w2') 31 | 32 | 33 | sess = tf.Session() 34 | 35 | layer_1 = tf.matmul(inputs, weights) + biases 36 | neural_net = tf.matmul(layer_1, weights_layer2) + biases2 37 | 38 | output = sess.run(neural_net, feed_dict={ inputs: test_inputs }) 39 | 40 | g0 = tf.gradients(layer_1, [weights, biases]) 41 | g = tf.gradients(neural_net, [weights, biases]) 42 | g2 = tf.gradients(neural_net, [weights_layer2, biases2]) 43 | 44 | weight_gradient0, biases_gradient0 = sess.run(g0, feed_dict = { inputs: test_inputs }) 45 | weight_gradient, biases_gradient = sess.run(g, feed_dict = { inputs: test_inputs }) 46 | weight_gradient2, biases_gradient2 = sess.run(g2, feed_dict = { inputs => test_inputs }) 47 | -------------------------------------------------------------------------------- /spec/fixtures/test_samples/test2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | batch_x = [ 4 | [0.686274, 0.10196, 0.6509, 1.0, 0.9686, 0.49803, 0.0, 0.0, 0.0, 0.0], 5 | [0.543244, 0.10123, 0.4509, 0.0, 0.6986, 0.39803, 1.0, 0.0, 0.0, 0.0]] 6 | 7 | batch_y = [ 8 | [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], 9 | [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] 10 | ] 11 | 12 | num_input = 10 13 | num_classes = 10 14 | n_hidden_1 = 4 # 1st layer number of neurons 15 | n_hidden_2 = 4 # 2nd layer number of neurons 16 | 17 | X = batch_x #tf.placeholder(tf.float32, shape=[None, num_input]) 18 | Y = batch_y # tf.placeholder(tf.float32, shape=[None, num_classes]) 19 | 20 | h1_init = tf.constant([[0.5937, 0.2343, 1.4332, 0.4395], 21 | [-1.0227, -0.6915, 1.2367, 0.3452], 22 | [-0.5675, 1.0374, 1.0429, 0.8839], 23 | [-0.1066, -0.0469, -1.6317, -1.4836], 24 | [0.7835, -3.0105, 1.713, -0.4536], 25 | [-0.3076, 1.3662, -0.6537, 0.0905], 26 | [-0.2459, 0.2243, -2.7048, 0.848], 27 | [0.3589, 0.3542, -0.0959, -1.327], 28 | [-0.4685, 0.0844, 0.2794, 2.1275], 29 | [-1.0733, 0.6189, 0.845, 0.033]]) 30 | 31 | h2_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669], [0.1494, -0.7837, -0.2978, 1.7745], [1.9727, -0.5312, -0.7391, 0.9187], [-0.6412, -1.4434, -0.8801, 0.9343]]) 32 | h3_init = tf.constant([[0.5012, 1.302, -1.6217, 0.669, 0.1494, -0.7837, -0.2978, 1.7745, 1.9727, -0.5312], 33 | [-0.7391, 0.9187, -0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488], 34 | [-0.9135, 1.0376, 0.8537, 0.4376, 1.3255, -0.5921, -1.4081, 1.0614, -0.5283, 1.1832], 35 | [0.7285, -0.7844, 0.1793, -0.5275, -0.4426, -1.4976, 0.4433, 2.2317, -2.0479, 0.7791]]) 36 | 37 | 38 | b1_init = tf.constant([0.1494, -0.7837, -0.2978, 1.7745]) 39 | 40 | b2_init = tf.constant([1.9727, -0.5312, -0.7391, 0.9187]) 41 | out_init = tf.constant([-0.6412, -1.4434, -0.8801, 0.9343, -0.1665, -0.0032, 0.2959, -2.0488, -0.9135, 1.0376]) 42 | 43 | h1 = tf.Variable(h1_init, dtype=tf.float32, name='h1') 44 | h2 = tf.Variable(h2_init, dtype=tf.float32, name='h2') 45 | h3 = tf.Variable(h3_init, dtype=tf.float32, name='out') 46 | 47 | b1 = tf.Variable(b1_init, dtype=tf.float32, name='b1') 48 | b2 = tf.Variable(b2_init, dtype=tf.float32, name='b2') 49 | out = tf.Variable(out_init, dtype=tf.float32, name='out2') 50 | 51 | layer_1 = tf.add(tf.matmul(X, h1), b1) 52 | # Hidden fully connected layer with 256 neurons 53 | layer_2 = tf.add(tf.matmul(layer_1, h2), b2) 54 | # Output fully connected layer with a neuron for each class 55 | 56 | sess = tf.Session() 57 | 58 | logits = tf.matmul(layer_2, h3) + out 59 | prediction = tf.nn.softmax(logits) 60 | 61 | loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y)) 62 | 63 | optimizer = tf.train.GradientDescentOptimizer(0.01) 64 | train_op = optimizer.minimize(loss_op) 65 | init = tf.global_variables_initializer() 66 | 67 | sess.run(init) 68 | # print(sess.run(layer_1)) 69 | tf.gradients(loss_op, [logits]) 70 | print("------------") 71 | 72 | print("H1: ", sess.run(h1)) 73 | print("------------ Running train 1") 74 | # sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y }) 75 | sess.run(train_op) 76 | print("H1:", sess.run(h1)) 77 | print("H2:", sess.run(h2)) 78 | print("H3:", sess.run(h3)) 79 | 80 | print(sess.run(b1)) 81 | print(sess.run(b2)) 82 | print(sess.run(out)) 83 | 84 | # sess.run(train_op, feed_dict={ X: batch_x, Y: batch_y }) 85 | print("------------- Running train 2") 86 | sess.run(train_op) 87 | print("H1:", sess.run(h1)) -------------------------------------------------------------------------------- /spec/fixtures/tf.case.pbtext: -------------------------------------------------------------------------------- 1 | node { 2 | name: "Const" 3 | op: "Const" 4 | attr { 5 | key: "dtype" 6 | value { 7 | type: DT_FLOAT 8 | } 9 | } 10 | attr { 11 | key: "value" 12 | value { 13 | tensor { 14 | dtype: DT_FLOAT 15 | tensor_shape { 16 | } 17 | float_val: 1.0 18 | } 19 | } 20 | } 21 | } 22 | node { 23 | name: "Const_1" 24 | op: "Const" 25 | attr { 26 | key: "dtype" 27 | value { 28 | type: DT_FLOAT 29 | } 30 | } 31 | attr { 32 | key: "value" 33 | value { 34 | tensor { 35 | dtype: DT_FLOAT 36 | tensor_shape { 37 | } 38 | float_val: 2.0 39 | } 40 | } 41 | } 42 | } 43 | node { 44 | name: "Less" 45 | op: "Less" 46 | input: "Const" 47 | input: "Const_1" 48 | attr { 49 | key: "T" 50 | value { 51 | type: DT_FLOAT 52 | } 53 | } 54 | } 55 | node { 56 | name: "case/cond/Switch" 57 | op: "Switch" 58 | input: "Less" 59 | input: "Less" 60 | attr { 61 | key: "T" 62 | value { 63 | type: DT_BOOL 64 | } 65 | } 66 | } 67 | node { 68 | name: "case/cond/switch_t" 69 | op: "Identity" 70 | input: "case/cond/Switch:1" 71 | attr { 72 | key: "T" 73 | value { 74 | type: DT_BOOL 75 | } 76 | } 77 | } 78 | node { 79 | name: "case/cond/switch_f" 80 | op: "Identity" 81 | input: "case/cond/Switch" 82 | attr { 83 | key: "T" 84 | value { 85 | type: DT_BOOL 86 | } 87 | } 88 | } 89 | node { 90 | name: "case/cond/pred_id" 91 | op: "Identity" 92 | input: "Less" 93 | attr { 94 | key: "T" 95 | value { 96 | type: DT_BOOL 97 | } 98 | } 99 | } 100 | node { 101 | name: "case/cond/Const" 102 | op: "Const" 103 | input: "^case/cond/switch_t" 104 | attr { 105 | key: "dtype" 106 | value { 107 | type: DT_INT32 108 | } 109 | } 110 | attr { 111 | key: "value" 112 | value { 113 | tensor { 114 | dtype: DT_INT32 115 | tensor_shape { 116 | } 117 | int_val: 17 118 | } 119 | } 120 | } 121 | } 122 | node { 123 | name: "case/cond/Const_1" 124 | op: "Const" 125 | input: "^case/cond/switch_f" 126 | attr { 127 | key: "dtype" 128 | value { 129 | type: DT_INT32 130 | } 131 | } 132 | attr { 133 | key: "value" 134 | value { 135 | tensor { 136 | dtype: DT_INT32 137 | tensor_shape { 138 | } 139 | int_val: 23 140 | } 141 | } 142 | } 143 | } 144 | node { 145 | name: "case/cond/Merge" 146 | op: "Merge" 147 | input: "case/cond/Const_1" 148 | input: "case/cond/Const" 149 | attr { 150 | key: "N" 151 | value { 152 | i: 2 153 | } 154 | } 155 | attr { 156 | key: "T" 157 | value { 158 | type: DT_INT32 159 | } 160 | } 161 | } 162 | versions { 163 | producer: 27 164 | } -------------------------------------------------------------------------------- /spec/fixtures/ts_test_graph_simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - :op: const 3 | :name: Const 4 | :data_type: :float32 5 | :inputs: [] 6 | :attrs: 7 | :value: 8 | - - 1.0 9 | - 1.1 10 | - 2.2 11 | - 3.3 12 | - - 1.1 13 | - 2.2 14 | - 3.3 15 | - 4.0 16 | :data_type: :float32 17 | :shape: 18 | - 2 19 | - 4 20 | - :op: const 21 | :name: Const_1 22 | :data_type: :float32 23 | :inputs: [] 24 | :attrs: 25 | :value: 2.0 26 | :data_type: :float32 27 | :shape: [] 28 | - :op: const 29 | :name: Const_2 30 | :data_type: :float32 31 | :inputs: [] 32 | :attrs: 33 | :value: 1.0 34 | :data_type: :float32 35 | :shape: [] 36 | - :op: variable_v2 37 | :name: variable_v2 38 | :data_type: :float32 39 | :inputs: [] 40 | :attrs: 41 | :var_name: v1 42 | :data_type: :float32 43 | - :op: const 44 | :name: Const_3 45 | :data_type: :int32 46 | :inputs: [] 47 | :attrs: 48 | :value: 49 | - 1 50 | - 2 51 | - 3 52 | - 4 53 | :data_type: :int32 54 | :shape: 55 | - 4 56 | - :op: const 57 | :name: Const_4 58 | :data_type: :string 59 | :inputs: [] 60 | :attrs: 61 | :value: Hello World 62 | :data_type: :string 63 | :shape: [] 64 | - :op: placeholder 65 | :name: Placeholder 66 | :data_type: :float32 67 | :inputs: [] 68 | :attrs: 69 | :data_type: :float32 70 | :shape: 71 | - :op: const 72 | :name: Const_5 73 | :data_type: :string 74 | :inputs: [] 75 | :attrs: 76 | :value: 77 | - h 78 | - e 79 | - l 80 | - l 81 | - o 82 | :data_type: :string 83 | :shape: 84 | - 5 85 | - :op: mul 86 | :name: mul 87 | :data_type: :float32 88 | :inputs: 89 | - Const 90 | - Const_1 91 | :attrs: {} 92 | - :op: add 93 | :name: add 94 | :data_type: :float32 95 | :inputs: 96 | - mul 97 | - variable_v2 98 | :attrs: {} 99 | - :op: const 100 | :name: Const_6 101 | :data_type: :int32 102 | :inputs: [] 103 | :attrs: 104 | :value: 105 | - 0 106 | :data_type: :int32 107 | :shape: 108 | - 1 109 | - :op: sum 110 | :name: sum 111 | :data_type: :float32 112 | :inputs: 113 | - Const 114 | - Const_6 115 | :attrs: 116 | :keepdims: false 117 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require "bundler/setup" 2 | require "simplecov" 3 | SimpleCov.start 4 | require 'pry-byebug' 5 | require "tensor_stream" 6 | require "awesome_print" 7 | 8 | 9 | Dir["./spec/support/**/*.rb"].sort.each { |f| require f } 10 | 11 | RSpec.configure do |config| 12 | # Enable flags like --only-failures and --next-failure 13 | config.example_status_persistence_file_path = ".rspec_status" 14 | 15 | config.expect_with :rspec do |c| 16 | c.syntax = :expect 17 | end 18 | 19 | config.before(:each) do 20 | TensorStream::Tensor.reset_counters 21 | # TensorStream::Operation.reset_counters 22 | TensorStream.reset_default_graph 23 | end 24 | end 25 | 26 | # Helper function to truncate floating point values (for testing) 27 | # truncation is done in tests since different machines return the last digits of 28 | # fp values differently 29 | def tr(t, places = 4) 30 | if t.is_a?(Array) 31 | return t.collect do |v| 32 | tr(v, places) 33 | end 34 | end 35 | 36 | return t unless t.is_a?(Float) 37 | 38 | t.round(places) 39 | end 40 | 41 | def trf(t, places) 42 | if t.is_a?(Array) 43 | return t.collect do |v| 44 | trf(v, places) 45 | end 46 | end 47 | 48 | return t unless t.is_a?(Float) 49 | t.truncate(places) 50 | end 51 | -------------------------------------------------------------------------------- /spec/support/freezer_spec.rb: -------------------------------------------------------------------------------- 1 | ## 2 | # Tests that deal with model serialization and deserialization 3 | # 4 | RSpec.shared_examples "freezer ops" do 5 | require "tensor_stream/utils/freezer" 6 | let(:ts) { TensorStream } 7 | let(:freezer) { TensorStream::Freezer.new } 8 | let(:output_file_location) { "/tmp/lg_model_freezed.yaml" } 9 | 10 | before do 11 | File.delete(output_file_location) if File.exist?(output_file_location) 12 | end 13 | 14 | before(:each) do 15 | TensorStream::Tensor.reset_counters 16 | TensorStream::Operation.reset_counters 17 | tf.reset_default_graph 18 | sess.clear_session_cache 19 | end 20 | 21 | specify "convert a checkpoint to a freezed model" do 22 | checkpoint_file = File.join("spec", "fixtures", "lg_model") 23 | 24 | y1 = freezer.convert(sess, checkpoint_file, output_file_location) 25 | actual = File.read(output_file_location) 26 | expected = File.read(File.join("spec", "fixtures", "lg_model_freezed.yaml")) 27 | expect(actual).to eq(expected) 28 | 29 | # check if model works 30 | target_graph = TensorStream::YamlLoader.new.load_from_file(output_file_location) 31 | 32 | X = target_graph["X"] 33 | pred = target_graph["add"] 34 | expect(tr(sess.run(pred, feed_dict: {X => 0.2}))).to eq(0.8514) 35 | end 36 | 37 | specify "load a freezed model and eval it" do 38 | target_graph = TensorStream::YamlLoader.new.load_from_file(File.join("spec", "fixtures", "mnist.yaml")) 39 | # Load test images 40 | image_1 = File.open(File.join("spec", "fixtures", "0_image.png"), 'rb') { |io| io.read } 41 | image_2 = File.open(File.join("spec", "fixtures", "1_image.png"), 'rb') { |io| io.read } 42 | decoded_image_1 = TensorStream.image.decode_png(image_1, channels: 1) 43 | decoded_image_2 = TensorStream.image.decode_png(image_2, channels: 1) 44 | 45 | input = target_graph["Placeholder"] 46 | output = TensorStream.argmax(target_graph["out"], 1) 47 | 48 | reshaped_images = 255.0.t - [decoded_image_1, decoded_image_2].t.cast(:float32) 49 | 50 | result = sess.run(output, feed_dict: {input => reshaped_images}) 51 | expect(result).to eq([7, 2]) 52 | end 53 | end 54 | -------------------------------------------------------------------------------- /spec/support/saver_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.shared_examples "TensorStream::Train::Saver" do 2 | before(:each) do 3 | TensorStream::Tensor.reset_counters 4 | TensorStream::Operation.reset_counters 5 | tf.reset_default_graph 6 | sess.clear_session_cache 7 | end 8 | 9 | let(:tf) { TensorStream } 10 | 11 | it "saves models using the saver" do 12 | v1 = tf.get_variable("v1", shape: [3], initializer: tf.zeros_initializer) 13 | v2 = tf.get_variable("v2", shape: [5], initializer: tf.zeros_initializer) 14 | 15 | inc_v1 = v1.assign(v1 + 1.0) 16 | dec_v2 = v2.assign(v2 - 1.0) 17 | 18 | # Add an op to initialize the variables. 19 | init_op = tf.global_variables_initializer 20 | 21 | # Add ops to save and restore all the variables. 22 | saver = tf::Train::Saver.new 23 | 24 | # Later, launch the model, initialize the variables, do some work, and save the 25 | # variables to disk. 26 | sess.run(init_op) 27 | # Do some work with the model. 28 | sess.run(inc_v1) 29 | sess.run(dec_v2) 30 | # Save the variables to disk. 31 | save_path = saver.save(sess, "/tmp/model") 32 | print("Model saved in path: %s" % save_path) 33 | end 34 | 35 | it "restores variables using the saver" do 36 | tf.reset_default_graph 37 | 38 | # Create some variables. 39 | v1 = tf.get_variable("v1", shape: [3]) 40 | v2 = tf.get_variable("v2", shape: [5]) 41 | 42 | # Add ops to save and restore all the variables. 43 | saver = tf::Train::Saver.new 44 | 45 | # Later, launch the model, use the saver to restore variables from disk, and 46 | # do some work with the model. 47 | 48 | # Restore variables from disk. 49 | saver.restore(sess, "/tmp/model") 50 | 51 | print("Model restored.") 52 | # Check the values of the variables 53 | 54 | expect(sess.run(v1)).to eq([1.0, 1.0, 1.0]) 55 | expect(sess.run(v2)).to eq([-1.0, -1.0, -1.0, -1.0, -1.0]) 56 | end 57 | end 58 | -------------------------------------------------------------------------------- /spec/support/supported_op.rb: -------------------------------------------------------------------------------- 1 | module SupportedOp 2 | def supported_op(op, &block) 3 | op = op.is_a?(Symbol) ? op.to_s : op 4 | op = op.delete(".") 5 | if described_class.ops.key?(op.to_sym) 6 | context(".#{op}", &block) 7 | end 8 | end 9 | end 10 | -------------------------------------------------------------------------------- /spec/support/variable_op_spec.rb: -------------------------------------------------------------------------------- 1 | RSpec.shared_examples "supported variable behaviors" do 2 | extend SupportedOp 3 | let(:ts) { TensorStream } 4 | before(:each) do 5 | TensorStream::Tensor.reset_counters 6 | TensorStream::Operation.reset_counters 7 | ts.reset_default_graph 8 | end 9 | 10 | it "can use variables in placeholders" do 11 | v1 = tf.variable([1.0, 1.0]) 12 | init = tf.global_variables_initializer 13 | puts "init variables" 14 | sess.run(init) 15 | 16 | x = Float.placeholder 17 | f = x + 2 18 | expect(sess.run(f, feed_dict: { x => v1 })).to eq([3.0, 3.0]) 19 | end 20 | end -------------------------------------------------------------------------------- /spec/tensor_stream/debugging/debugging_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | RSpec.describe TensorStream::Debugging do 4 | before(:each) do 5 | TensorStream::Tensor.reset_counters 6 | TensorStream::Operation.reset_counters 7 | ts.reset_default_graph 8 | sess.clear_session_cache 9 | end 10 | 11 | let(:ts) { TensorStream } 12 | let(:sess) { ts.session } 13 | context "#add_check_numerics_ops" do 14 | specify do 15 | a = ts.constant([1.0, 1.0, 1.0]) 16 | b = ts.constant([0.0, 0.0, 0.0]) 17 | f = a / b + 1 18 | 19 | ts.add_check_numerics_ops 20 | expect { sess.run(f) }.to raise_error 21 | end 22 | end 23 | end 24 | -------------------------------------------------------------------------------- /spec/tensor_stream/device_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | RSpec.describe TensorStream::Graph do 4 | let(:tf) { TensorStream } 5 | context ".list_local_devices" do 6 | specify do 7 | expect(tf.list_local_devices).to include "job:localhost/ts:ruby:cpu" 8 | end 9 | end 10 | 11 | context ".device placement" do 12 | specify do 13 | v = tf.device("/device:GPU:1") { 14 | tf.get_variable("v", shape: [1]) 15 | } 16 | expect(v.device).to eq("/device:GPU:1") 17 | end 18 | end 19 | end 20 | -------------------------------------------------------------------------------- /spec/tensor_stream/examples/basic_operation_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | require "matrix" 4 | 5 | RSpec.describe "Illustrates basic tensorstream operations" do 6 | it "performs matrix multiplication" do 7 | # Basic constant operations 8 | # The value returned by the constructor represents the output 9 | # of the Constant op. 10 | a = TensorStream.constant(2) 11 | b = TensorStream.constant(3) 12 | 13 | # Launch the default graph. 14 | TensorStream.session do |sess| 15 | puts("a=2, b=3") 16 | puts("Addition with constants: %i" % sess.run(a + b)) 17 | expect(sess.run(a + b)).to eq(5.0) 18 | puts("Multiplication with constants: %i" % sess.run(a * b)) 19 | expect(sess.run(a * b)).to eq(6.0) 20 | 21 | # Basic Operations with variable as graph input 22 | # The value returned by the constructor represents the output 23 | # of the Variable op. (define as input when running session) 24 | # tf Graph input 25 | a = TensorStream.placeholder(:int16) 26 | b = TensorStream.placeholder(:int16) 27 | 28 | # Define some operations 29 | add = TensorStream.add(a, b) 30 | mul = TensorStream.multiply(a, b) 31 | 32 | # Launch the default graph. 33 | TensorStream.session do |sess| 34 | # Run every operation with variable input 35 | puts("Addition with variables: %i" % sess.run(add, feed_dict: {a => 2, b => 3})) 36 | puts("Multiplication with variables: %i" % sess.run(mul, feed_dict: {a => 2, b => 3})) 37 | 38 | # ---------------- 39 | # More in details: 40 | # Matrix Multiplication from TensorFlow official tutorial 41 | 42 | # Create a Constant op that produces a 1x2 matrix. The op is 43 | # added as a node to the default graph. 44 | # 45 | # The value returned by the constructor represents the output 46 | # of the Constant op. 47 | matrix1 = TensorStream.constant([[3.0, 3.0]]) 48 | 49 | # Create another Constant that produces a 2x1 matrix. 50 | matrix2 = TensorStream.constant([[2.0], [2.0]]) 51 | 52 | # Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs. 53 | # The returned value, 'product', represents the result of the matrix 54 | # multiplication. 55 | product = TensorStream.matmul(matrix1, matrix2) 56 | 57 | # To run the matmul op we call the session 'run()' method, passing 'product' 58 | # which represents the output of the matmul op. This indicates to the call 59 | # that we want to get the output of the matmul op back. 60 | # 61 | # All inputs needed by the op are run automatically by the session. They 62 | # typically are run in parallel. 63 | # 64 | # The call 'run(product)' thus causes the execution of threes ops in the 65 | # graph: the two constants and matmul. 66 | # 67 | # The output of the op is returned in 'result' as a numpy `ndarray` object. 68 | TensorStream.session do |sess| 69 | result = sess.run(product) 70 | expect(result).to eq([[12.0]]) 71 | end 72 | end 73 | end 74 | end 75 | end 76 | -------------------------------------------------------------------------------- /spec/tensor_stream/examples/hello_world_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | require "matrix" 4 | 5 | RSpec.describe "Hello world sample" do 6 | it "prints hello world" do 7 | # Simple hello world using TensorStream 8 | 9 | # Create a Constant op 10 | hello = TensorStream.constant("Hello, TensorStream!") 11 | 12 | # Start the TensorStream session 13 | sess = TensorStream.session(:ruby_evaluator) 14 | 15 | expect(sess.run(hello)).to eq("Hello, TensorStream!") 16 | puts(sess.run(hello)) 17 | end 18 | end 19 | -------------------------------------------------------------------------------- /spec/tensor_stream/examples/iris.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require "brains" 4 | 5 | # This neural network will predict the species of an iris based on sepal and petal size 6 | # Dataset: http://en.wikipedia.org/wiki/Iris_flower_data_set 7 | 8 | rows = File.readlines("iris.data").map {|l| l.chomp.split(",") } 9 | 10 | rows.shuffle! 11 | 12 | label_encodings = { 13 | "Iris-setosa" => [1, 0, 0], 14 | "Iris-versicolor" => [0, 1, 0], 15 | "Iris-virginica" => [0, 0, 1], 16 | } 17 | 18 | x_data = rows.map {|row| row[0, 4].map(&:to_f) } 19 | y_data = rows.map {|row| label_encodings[row[4]] } 20 | 21 | # Normalize data values before feeding into network 22 | normalize = ->(val, high, low) { (val - low) / (high - low) } # maps input to float between 0 and 1 23 | 24 | columns = (0..3).map { |i| 25 | x_data.map {|row| row[i] } 26 | } 27 | 28 | x_data.map! do |row| 29 | row.map.with_index do |val, j| 30 | max, min = columns[j].max, columns[j].min 31 | normalize.call(val, max, min) 32 | end 33 | end 34 | 35 | x_train = x_data.slice(0, 100) 36 | y_train = y_data.slice(0, 100) 37 | 38 | x_test = x_data.slice(100, 50) 39 | y_test = y_data.slice(100, 50) 40 | 41 | test_cases = [] 42 | x_train.each_with_index do |x, index| 43 | test_cases << [x, y_train[index]] 44 | end 45 | 46 | validation_cases = [] 47 | x_test.each_with_index do |x, index| 48 | test_cases << [x, y_test[index]] 49 | end 50 | 51 | # Build a 3 layer network: 4 input neurons, 4 hidden neurons, 3 output neurons 52 | # Bias neurons are automatically added to input + hidden layers; no need to specify these 53 | nn = Brains::Net.create(4, 3, 1, {neurons_per_layer: 4}) 54 | nn.randomize_weights 55 | 56 | prediction_success = ->(actual, ideal) { 57 | predicted = (0..2).max_by {|i| actual[i] } 58 | ideal[predicted] == 1 59 | } 60 | 61 | mse = ->(actual, ideal) { 62 | errors = actual.zip(ideal).map {|a, i| a - i } 63 | (errors.inject(0) {|sum, err| sum += err**2}) / errors.length.to_f 64 | } 65 | 66 | error_rate = ->(errors, total) { ((errors / total.to_f) * 100).round } 67 | 68 | run_test = ->(nn, inputs, expected_outputs) { 69 | success, failure, errsum = 0, 0, 0 70 | inputs.each.with_index do |input, i| 71 | output = nn.feed input 72 | prediction_success.call(output, expected_outputs[i]) ? success += 1 : failure += 1 73 | errsum += mse.call(output, expected_outputs[i]) 74 | end 75 | [success, failure, errsum / inputs.length.to_f] 76 | } 77 | 78 | puts "Testing the untrained network..." 79 | 80 | success, failure, avg_mse = run_test.call(nn, x_test, y_test) 81 | 82 | puts "Untrained classification success: #{success}, failure: #{failure} (classification error: #{error_rate.call(failure, x_test.length)}%, mse: #{(avg_mse * 100).round(2)}%)" 83 | 84 | puts "\nTraining the network...\n\n" 85 | 86 | t1 = Time.now 87 | result = nn.optimize(test_cases, 0.01, 1_000) { |i, error| 88 | puts "#{i} #{error}" 89 | } 90 | 91 | # puts result 92 | puts "\nDone training the network: #{result[:iterations]} iterations, #{(result[:error] * 100).round(2)}% mse, #{(Time.now - t1).round(1)}s" 93 | 94 | puts "\nTesting the trained network..." 95 | 96 | success, failure, avg_mse = run_test.call(nn, x_test, y_test) 97 | 98 | puts "Trained classification success: #{success}, failure: #{failure} (classification error: #{error_rate.call(failure, x_test.length)}%, mse: #{(avg_mse * 100).round(2)}%)" 99 | -------------------------------------------------------------------------------- /spec/tensor_stream/examples/layers_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | require "matrix" 4 | 5 | RSpec.describe "create layers sample" do 6 | xit "creates a dense layers that accepts input vectors" do 7 | x = TensorStream.placeholder(:float32, shape: [nil, 3]) 8 | linear_model = TensorStream.layers.dense(units: 1) 9 | y = linear_model(x) 10 | puts(y) 11 | expect(y.to_s).to eq("") 12 | end 13 | end 14 | -------------------------------------------------------------------------------- /spec/tensor_stream/libraries/layers.rb: -------------------------------------------------------------------------------- 1 | module TensorStream 2 | class Layers 3 | class Dense 4 | def initialize(inputs, units, activiation: nil, use_bias: true, 5 | kernel_initializer: nil, 6 | bias_initializer: -> { TensorStream.zeros_initializer }, 7 | trainable: true, 8 | name: nil) 9 | end 10 | end 11 | end 12 | end 13 | -------------------------------------------------------------------------------- /spec/tensor_stream/nn_ops_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::NN do 5 | let(:tf) { TensorStream } # Tensorflow compatibility alias 6 | let(:sess) {TensorStream.session(:ruby_evaluator) } 7 | 8 | context ".relu" do 9 | it "Computes rectified linear: max(features, 0)." do 10 | outputs = tf.constant([-1.0, -1.1, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0]) 11 | f = tf.nn.relu(outputs) 12 | expect(sess.run(f)).to eq([0, 0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0]) 13 | end 14 | 15 | specify "gradients" do 16 | outputs = tf.constant([-1.0, -1.1, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0]) 17 | f = tf.nn.relu(outputs) 18 | g = tf.gradients(f, [outputs]) 19 | expect(sess.run(g)).to eq([[0.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]) 20 | end 21 | end 22 | end 23 | -------------------------------------------------------------------------------- /spec/tensor_stream/operation_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::Operation do 5 | let(:tf) { TensorStream } # allow calls to look like tensorflow 6 | let(:sess) { tf.session } 7 | 8 | before(:each) do 9 | TensorStream::Tensor.reset_counters 10 | TensorStream::Operation.reset_counters 11 | tf.reset_default_graph 12 | sess.clear_session_cache 13 | end 14 | 15 | # determine constant operations to aid evaluator in optimizing the graph 16 | specify "constant inference" do 17 | x = tf.placeholder(:float32) 18 | y = tf.variable(2.0, name: "y") 19 | a = tf.constant(1.0) 20 | b = tf.constant(2.0) 21 | c = a + b 22 | d = tf.sin(c) 23 | f = d + x 24 | g = a + y 25 | h = tf.cos(g) 26 | 27 | expect(d.is_const).to equal(true) 28 | expect(c.is_const).to equal(true) 29 | expect(f.is_const).to equal(false) 30 | expect(g.is_const).to equal(false) 31 | expect(h.is_const).to equal(false) 32 | end 33 | 34 | xcontext ".to_math" do 35 | specify "generates a math string for debugging" do 36 | a = tf.constant(1.0) 37 | b = tf.constant(2.0) 38 | c = tf.constant(1.0) 39 | 40 | f = tf.sin(tf.cos((a * b % tf.ones_like(c) - c)**2)) / 1 41 | expect(f.to_math(true).delete("\n").delete(" ")).to eq("(sin(cos(((mod(Const_1,ones_like(Const_2))-Const_2)^Const:0)))/Const_1:0)") 42 | end 43 | end 44 | end 45 | -------------------------------------------------------------------------------- /spec/tensor_stream/profile/report_tool_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | RSpec.describe TensorStream::ReportTool do 4 | let(:ts) { TensorStream } 5 | let(:session) { ts.session(profile_enabled: true) } 6 | 7 | before(:each) do 8 | TensorStream::Tensor.reset_counters 9 | TensorStream::Operation.reset_counters 10 | ts.reset_default_graph 11 | session.clear_session_cache 12 | end 13 | 14 | context ".profile_for" do 15 | specify "Generates profile report information" do 16 | SHAPES = [32, 32] 17 | a = ts.constant(session.run(ts.random_uniform(SHAPES))) 18 | b = ts.constant(session.run(ts.random_uniform(SHAPES))) 19 | a1 = a.dot(b) 20 | a2 = a1 + b 21 | a3 = ts.sigmoid(a2) 22 | a4 = ts.reduce_sum(a3) 23 | session.run(a4) 24 | profile = TensorStream::ReportTool.profile_for(session) 25 | name, op, _elpased, shape, _source = profile.first 26 | expect(name).to eq "sum" 27 | expect(shape).to eq [] 28 | end 29 | end 30 | end 31 | -------------------------------------------------------------------------------- /spec/tensor_stream/serializers/graphml_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::Graphml do 5 | let(:tf) { TensorStream } 6 | let(:sess) { tf.session } 7 | let(:fixture) { File.join("spec", "fixtures", "test.graphml") } 8 | let(:serializer) do 9 | TensorStream::Graphml.new 10 | end 11 | 12 | before do 13 | @fixture = File.read(fixture) 14 | tf.reset_default_graph 15 | end 16 | 17 | xit "saves a graph into graphml format" do 18 | # construct a graph 19 | a = tf.constant([[1.0, 1.1, 2.2, 3.3], [1.1, 2.2, 3.3, 4.0]]) 20 | b = tf.constant(2) 21 | c = tf.variable(1.0, name: "v1") 22 | d = tf.constant([1, 2, 3, 4]) 23 | e = tf.constant("Hello World") 24 | f = tf.placeholder(:float32, shape: [2, 2]) 25 | g = tf.constant(["h", "e", "l", "l", "o"]) 26 | func = a * b + c * f 27 | func2 = tf.reduce_sum(func, [0]) 28 | grad = tf.gradients(func2, [a, d]) 29 | tf.train.write_graph(grad, "/tmp", "ts_test_graph.graphml", serializer: described_class) 30 | expected_content = File.read(fixture) 31 | test_content = File.read(File.join("/tmp", "ts_test_graph.graphml")) 32 | expect(test_content).to eq(expected_content) 33 | end 34 | end 35 | -------------------------------------------------------------------------------- /spec/tensor_stream/serializers/pbtext_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::Pbtext do 5 | let(:tf) { TensorStream } 6 | let(:sess) { tf.session } 7 | let(:serializer) do 8 | TensorStream::Pbtext.new 9 | end 10 | 11 | before do 12 | @fixture = File.read(File.join("spec", "fixtures", "test.pbtxt.proto")) 13 | tf.reset_default_graph 14 | end 15 | 16 | xit "saves a graph into pbtext format" do 17 | # construct a graph 18 | a = tf.constant([[1.0, 1.1, 2.2, 3.3], [1.1, 2.2, 3.3, 4.0]]) 19 | b = tf.constant(2.0) 20 | c = tf.variable(1.0, name: "v1") 21 | d = tf.constant([1, 2, 3, 4]) 22 | e = tf.constant("Hello World") 23 | f = tf.placeholder(:float32) 24 | g = tf.constant(["h", "e", "l", "l", "o"]) 25 | func = a * b + c 26 | func2 = tf.reduce_sum(a, [0]) 27 | 28 | tf.train.write_graph(tf.get_default_graph, "/tmp", "ts_test_graph.pbtext") 29 | expected_content = File.read(File.join("spec", "fixtures", "test.pbtxt.proto")) 30 | test_content = File.read(File.join("/tmp", "ts_test_graph.pbtext")) 31 | expect(test_content).to eq(expected_content) 32 | end 33 | end 34 | -------------------------------------------------------------------------------- /spec/tensor_stream/session_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::Session do 5 | let(:tf) { TensorStream } 6 | before(:each) do 7 | TensorStream::Tensor.reset_counters 8 | TensorStream::Operation.reset_counters 9 | TensorStream::Graph.create_default 10 | end 11 | 12 | context "#run" do 13 | it "can execute operations" do 14 | a = TensorStream.constant(3.0, dtype: TensorStream::Types.float32) 15 | b = TensorStream.constant(4.0) # also tf.float32 implicitly 16 | c = TensorStream.constant(5.0) 17 | total = a + b 18 | product = a * c 19 | sess = TensorStream.session 20 | expect(sess.run(total)).to eq(7.0) 21 | expect(sess.run(product)).to eq(15.0) 22 | end 23 | 24 | it "different values on multiple runs" do 25 | tf.set_random_seed(1234) 26 | vec = tf.random_uniform([3]) 27 | out1 = vec + 1 28 | out2 = vec + 2 29 | 30 | sess = tf.session(:ruby_evaluator) 31 | expect(sess.run(out1)).to eq([1.1915194503788924, 1.6221087710398319, 1.4377277390071144]) 32 | expect(sess.run(out2)).to eq([2.7853585837137693, 2.7799758081188033, 2.2725926052826417]) 33 | expect(sess.run(vec)).to eq([0.2764642551430967, 0.8018721775350193, 0.9581393536837052]) 34 | end 35 | 36 | it "uniform values on a single run" do 37 | vec = TensorStream.random_uniform([3], seed: 1234) 38 | out1 = vec + 1 39 | out2 = vec + 2 40 | 41 | sess = TensorStream.session(:ruby_evaluator) 42 | expect(sess.run(out1, out2)).to eq([[1.1915194503788924, 1.6221087710398319, 1.4377277390071144], [2.1915194503788924, 2.622108771039832, 2.4377277390071144]]) 43 | end 44 | 45 | it "can assign placeholders" do 46 | x = TensorStream.placeholder(TensorStream::Types.float32) 47 | y = TensorStream.placeholder(TensorStream::Types.float32) 48 | p = TensorStream.placeholder(TensorStream::Types.float32, name: "my_placeholder") 49 | z = x + y + p 50 | sess = TensorStream.session(:ruby_evaluator) 51 | 52 | expect(sess.run(z, feed_dict: {x => 3, y => 4.5, "my_placeholder" => 1})).to eql(8.5) 53 | expect(sess.run(z, feed_dict: {x => [1, 3], y => [2, 4], "my_placeholder" => [1, 1]})).to eql([4.0, 8.0]) 54 | end 55 | 56 | context "#close" do 57 | it "closes session and releases resources" do 58 | sess = TensorStream.session 59 | sess.close 60 | expect(sess.closed?).to be 61 | end 62 | end 63 | 64 | context "#list_devices" do 65 | let(:sess) { TensorStream.session } 66 | it "list available device sin this session" do 67 | expect(sess.list_devices.map(&:name)).to include "cpu" 68 | end 69 | end 70 | 71 | xit "evaluate all while retaining some variables" do 72 | session = TensorStream::Session.default_session 73 | x = TensorStream.variable(1.0, :float32) 74 | y = TensorStream.variable(2.0, :float32) 75 | 76 | expression = TensorStream.sin(x) + TensorStream.cos(y) 77 | session.run(TensorStream.global_variables_initializer) 78 | partial_eval = session.run(expression, retain: [x]) 79 | expect(partial_eval.to_math).to eq("(sin(Variable:0) + -0.4161468365471424)") 80 | end 81 | 82 | context "exceptions" do 83 | specify "checks for missing placeholder exceptions" do 84 | session = TensorStream::Session.default_session 85 | x = tf.placeholder(:float32) 86 | a = tf.constant(1.0) + x 87 | session.run(a, feed_dict: {x => 1}) 88 | expect { 89 | session.run(a) 90 | }.to raise_error TensorStream::ValueError 91 | end 92 | end 93 | end 94 | end 95 | -------------------------------------------------------------------------------- /spec/tensor_stream/tensor_shape_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::TensorShape do 5 | context ".infer_shape" do 6 | let (:shape) { TensorStream::TensorShape } 7 | it "returns the resulting shape, given two shapes" do 8 | expect(shape.infer_shape([], [])).to eq([]) 9 | expect(shape.infer_shape([1], [])).to eq([1]) 10 | expect(shape.infer_shape([2, 2], [])).to eq([2, 2]) 11 | expect(shape.infer_shape([nil, 2], [])).to eq([nil, 2]) 12 | expect(shape.infer_shape([nil, 2], [5, 2])).to eq([nil, 2]) 13 | expect(shape.infer_shape([5, 5], [5, 1])).to eq([5, 5]) 14 | end 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /spec/tensor_stream/tensor_stream_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream do 5 | let(:tf) { TensorStream } 6 | 7 | before do 8 | TensorStream.disable_eager_execution 9 | end 10 | 11 | describe ".VERSION" do 12 | it "returns the version" do 13 | expect(TensorStream.version).to eq("1.0.9") 14 | end 15 | end 16 | 17 | xdescribe ".enable_eager_execution" do 18 | it "enables eager execution" do 19 | TensorStream.enable_eager_execution 20 | expect(TensorStream.executing_eagerly?).to be 21 | a = TensorStream.constant(2) 22 | b = TensorStream.constant(3) 23 | print("a = %i" % a) 24 | print("b = %i" % b) 25 | 26 | x = [[2.0]] 27 | m = TensorStream.matmul(x, x) 28 | expect(tr(m.to_a)).to eq([[4.0]]) 29 | 30 | d = TensorStream.constant(3.1) 31 | expect(tr(d.to_f)).to eq(3.1) 32 | end 33 | end 34 | 35 | describe ".trainable_variables" do 36 | it "Retrieves trainable variables for the current graph" do 37 | a = tf.variable(1, dtype: :float) 38 | b = tf.variable(2, dtype: :int32) 39 | c = tf.variable(2, dtype: :float32, trainable: false) 40 | 41 | expect(TensorStream.trainable_variables.map(&:name)).to eq([a, b].map(&:name)) 42 | end 43 | end 44 | 45 | context ".variable_scope" do 46 | it "allows to define prefixes" do 47 | tf.variable_scope("foo") do 48 | tf.variable_scope("bar") do 49 | x = tf.zeros([], name: "qux") 50 | expect(x.name).to eq("foo/bar/qux") 51 | end 52 | end 53 | end 54 | 55 | specify "set default initializer" do 56 | tf.set_random_seed(1234) 57 | tf.variable_scope(initializer: tf.random_normal([2, 2])) do 58 | v1 = tf.get_variable("test", shape: [2, 2]) 59 | expect(v1.name).to eq("test") 60 | sess = tf.session(:ruby_evaluator) 61 | sess.run(tf.global_variables_initializer) 62 | expect(sess.run(v1)).to eq([[0.5011628459350929, 1.301972948852967], [-1.621722019401658, 0.6690221526288901]]) 63 | end 64 | end 65 | end 66 | end 67 | -------------------------------------------------------------------------------- /spec/tensor_stream/train/learning_rate_decay_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | RSpec.describe TensorStream::Train::LearningRateDecay do 4 | let(:ts) { TensorStream } 5 | 6 | context ".exponential_decay" do 7 | specify "Applies exponential decay to the learning rate" do 8 | n_samples = 5 9 | 10 | m = ts.variable(0.0, dtype: :float32) 11 | b = ts.variable(0.0, dtype: :float32) 12 | global_step = ts.variable(0, trainable: false) 13 | x = ts.placeholder(:float32) 14 | y = ts.placeholder(:float32) 15 | 16 | pred = m * x + b 17 | 18 | cost = ((pred - y)**2).reduce(:+) / (2 * n_samples) 19 | 20 | init = ts.global_variables_initializer 21 | sess = ts.session 22 | sess.run(init) 23 | 24 | starter_learning_rate = 0.1 25 | learning_rate = ts.train.exponential_decay(starter_learning_rate, global_step, 26 | 3, 0.96, staircase: true) 27 | 28 | learning_step = TensorStream::Train::GradientDescentOptimizer.new(learning_rate).minimize(cost, global_step: global_step) 29 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 30 | expect(sess.run(learning_rate)).to eq(0.1) 31 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 32 | expect(sess.run(learning_rate)).to eq(0.1) 33 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 34 | expect(sess.run(learning_rate)).to eq(0.096) 35 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 36 | expect(sess.run(learning_rate)).to eq(0.096) 37 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 38 | expect(sess.run(learning_rate)).to eq(0.096) 39 | sess.run(learning_step, feed_dict: {x => 6.2, y => 26.3}) 40 | expect(sess.run(learning_rate)).to eq(0.09216) 41 | end 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /spec/tensor_stream/train/utils_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | 3 | RSpec.describe TensorStream::Train::Utils do 4 | before(:each) do 5 | TensorStream::Tensor.reset_counters 6 | TensorStream::Operation.reset_counters 7 | TensorStream.reset_default_graph 8 | end 9 | 10 | context "#get_global_step" do 11 | it "gets the current global step variable" do 12 | expect(TensorStream.train.get_global_step).to be_nil 13 | end 14 | 15 | it "gets the current global step variable when there is one" do 16 | TensorStream.train.create_global_step 17 | expect(TensorStream.train.get_global_step).to be 18 | end 19 | end 20 | 21 | context "#create_global_step" do 22 | it "creates a global step if there is none" do 23 | global_step_var = TensorStream.train.create_global_step 24 | expect(global_step_var).to be 25 | 26 | # should be in GLOBAL_STEP collection 27 | expect(TensorStream.get_collection(TensorStream::GraphKeys::GLOBAL_STEP)).to eq([global_step_var]) 28 | end 29 | 30 | it "returns an error if there is already one" do 31 | TensorStream.train.create_global_step 32 | expect { 33 | TensorStream.train.create_global_step 34 | }.to raise_error TensorStream::ValueError 35 | end 36 | end 37 | end 38 | -------------------------------------------------------------------------------- /spec/tensor_stream/variable_spec.rb: -------------------------------------------------------------------------------- 1 | require "spec_helper" 2 | require "benchmark" 3 | 4 | RSpec.describe TensorStream::Variable do 5 | let(:tf) { TensorStream } 6 | before(:each) do 7 | described_class.reset_counters 8 | TensorStream::Operation.reset_counters 9 | tf.reset_default_graph 10 | TensorStream::Session.default_session.clear_session_cache 11 | end 12 | 13 | let(:sess) { tf.session(:ruby_evaluator)} 14 | 15 | context "Variable" do 16 | it "define a variable" do 17 | # Set model weights 18 | w = TensorStream.variable(rand, name: "weight") 19 | expect(TensorStream.get_collection(TensorStream::GraphKeys::GLOBAL_VARIABLES)).to include(w) 20 | end 21 | 22 | it "can specify initializer" do 23 | mammal = TensorStream.variable("Elephant", dtype: :string) 24 | expect { mammal.eval }.to raise_exception 25 | end 26 | 27 | it "can access after initialized" do 28 | mammal = TensorStream.variable("Elephant", dtype: :string) 29 | expect(TensorStream.get_collection(TensorStream::GraphKeys::TRAINABLE_VARIABLES)).to include(mammal) 30 | sess.run(TensorStream.global_variables_initializer) 31 | expect(sess.run(mammal)).to eq("Elephant") 32 | end 33 | 34 | specify "has a default data type" do 35 | w = TensorStream.variable(rand, name: "weight") 36 | expect(w.dtype).to eq(:float32) 37 | end 38 | end 39 | 40 | context ".get_variable" do 41 | let!(:variable) { 42 | tf.get_variable("other_variable", dtype: TensorStream::Types.int32, 43 | initializer: TensorStream.constant([23, 42])) 44 | } 45 | 46 | it "create a variable and add it to the graph" do 47 | expect(TensorStream.get_collection(TensorStream::GraphKeys::GLOBAL_VARIABLES)).to include(variable) 48 | end 49 | 50 | it "cannot access variable unless it is initalized" do 51 | expect { variable.eval }.to raise_exception 52 | end 53 | 54 | it "can access after initialized" do 55 | sess.run(TensorStream.global_variables_initializer) 56 | expect(variable.eval).to eq([23, 42]) 57 | end 58 | 59 | it "retrievies an existing variable" do 60 | w = tf.variable(rand, name: "weight") 61 | tf.variable_scope("foo", reuse: true) do |scope| 62 | e = tf.variable(rand, name: "weight") 63 | expect(e.name).to eq("foo/weight") 64 | expect(e).to eq(w) 65 | end 66 | end 67 | 68 | it "adds to a collection" do 69 | w = tf.get_variable("weight", dtype: :int32, shape: [5, 5], collections: ["test"]) 70 | tf.get_default_graph.get_collection("test").include?(w) 71 | end 72 | end 73 | end 74 | -------------------------------------------------------------------------------- /tensor_stream.gemspec: -------------------------------------------------------------------------------- 1 | lib = File.expand_path("../lib", __FILE__) 2 | $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) 3 | require "tensor_stream/version" 4 | 5 | Gem::Specification.new do |spec| 6 | spec.name = "tensor_stream" 7 | spec.version = TensorStream::VERSION 8 | spec.authors = ["Joseph Emmanuel Dayo"] 9 | spec.email = ["joseph.dayo@gmail.com"] 10 | 11 | spec.summary = "A Pure ruby tensorflow implementation" 12 | spec.description = "A reimplementation of TensorFlow for ruby. This is a ground up implementation with no dependency on TensorFlow. Effort has been made to make the programming style as near to TensorFlow as possible, comes with a pure ruby evaluator by default as well with support for an opencl evaluator." 13 | spec.homepage = "http://www.github.com/jedld/tensor_stream" 14 | spec.license = "MIT" 15 | 16 | # Prevent pushing this gem to RubyGems.org. To allow pushes either set the 'allowed_push_host' 17 | # to allow pushing to a single host or delete this section to allow pushing to any host. 18 | if spec.respond_to?(:metadata) 19 | spec.metadata["allowed_push_host"] = "https://rubygems.org" 20 | spec.metadata["source_code_uri"] = "https://github.com/jedld/tensor_stream" 21 | spec.metadata["changelog_uri"] = "https://github.com/jedld/tensor_stream/blob/master/CHANGELOG.md" 22 | else 23 | raise "RubyGems 2.0 or newer is required to protect against " \ 24 | "public gem pushes." 25 | end 26 | 27 | spec.files = `git ls-files -z`.split("\x0").reject do |f| 28 | f.match(%r{^(test|spec|features)/}) 29 | end 30 | spec.bindir = "exe" 31 | spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) } 32 | spec.require_paths = ["lib"] 33 | 34 | spec.add_development_dependency "bundler" 35 | spec.add_development_dependency "rake", "~> 12.3" 36 | spec.add_development_dependency "rspec", "~> 3.0" 37 | spec.add_development_dependency "awesome_print" 38 | spec.add_development_dependency "rubocop" 39 | if RUBY_ENGINE == "ruby" 40 | spec.add_development_dependency "pry-byebug" 41 | spec.add_development_dependency "byepry" 42 | spec.add_development_dependency "tensor_stream-opencl" 43 | spec.add_dependency "jpeg" 44 | end 45 | spec.add_development_dependency "colorize" 46 | spec.add_development_dependency "rspec_junit_formatter" 47 | spec.add_development_dependency "mnist-learn" 48 | spec.add_development_dependency "chakin-rb" 49 | spec.add_development_dependency "simplecov" 50 | spec.add_development_dependency "standard" 51 | spec.add_development_dependency "rubyzip" 52 | spec.add_dependency "deep_merge" 53 | spec.add_dependency "concurrent-ruby" 54 | spec.add_dependency "chunky_png" 55 | end 56 | --------------------------------------------------------------------------------