├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── .travis.yml ├── CONTRIBUTING.md ├── Examples ├── ExampleCommon │ ├── CatalogUtil.cs │ ├── ExampleCommon.csproj │ ├── ImageUtil.cs │ └── Properties │ │ └── AssemblyInfo.cs ├── ExampleInceptionInference │ ├── ExampleInceptionInference.csproj │ ├── Program.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── README.md │ └── packages.config ├── ExampleObjectDetection │ ├── App.config │ ├── ExampleObjectDetection.csproj │ ├── ImageEditor.cs │ ├── Program.cs │ ├── Properties │ │ └── AssemblyInfo.cs │ ├── README.md │ ├── demo-picture.jpg │ ├── packages.config │ └── test_images │ │ └── input.jpg ├── FExampleInceptionInference │ ├── AssemblyInfo.fs │ ├── FSharpExampleInceptionInference.fsproj │ ├── Program.fs │ └── README.md ├── ImageCompression │ ├── ImageCompression.fs │ ├── ImageCompression.fsproj │ └── example.png └── README.md ├── LICENSE ├── Learn ├── DataConverter.cs ├── Datasets │ ├── Helper.cs │ └── MNIST.cs ├── Learn.csproj ├── Properties │ └── AssemblyInfo.cs ├── README.md └── packages.config ├── Makefile ├── OpGenerator ├── OpGenerator.cs ├── OpGenerator.csproj ├── Opdefs.cs ├── Properties │ └── AssemblyInfo.cs ├── README.md ├── api_def.cs └── packages.config ├── README.md ├── SampleTest ├── LowLevelTests.cs ├── Properties │ └── AssemblyInfo.cs ├── README.md ├── SampleTest.cs ├── SampleTest.csproj └── packages.config ├── TensorFlowSharp.sln ├── TensorFlowSharp ├── Buffer.cs ├── Operations.g.cs ├── OperationsExtras.cs ├── Ops │ ├── ArrayOps.cs │ └── RandomOps.cs ├── Optimizer.cs ├── Queue.cs ├── README.md ├── Tensor.cs ├── TensorFlowSharp.csproj ├── Tensorflow.cs ├── Variable.cs ├── brute.pl └── nuget │ └── build │ └── net45 │ └── TensorFlowSharp.targets ├── azure-pipelines.yml ├── docfx ├── api │ ├── .gitignore │ ├── TensorFlow.yml │ ├── TensorFlow │ │ ├── TensorFlow.Adagrad.yml │ │ ├── TensorFlow.AdaptiveOptimizer.yml │ │ ├── TensorFlow.DeviceAttributes.yml │ │ ├── TensorFlow.DeviceType.yml │ │ ├── TensorFlow.MonoPInvokeCallbackAttribute.yml │ │ ├── TensorFlow.Optimizer.yml │ │ ├── TensorFlow.PaddingFIFOQueue.yml │ │ ├── TensorFlow.QueueBase.yml │ │ ├── TensorFlow.RMSProp.yml │ │ ├── TensorFlow.SGD.yml │ │ ├── TensorFlow.TFAttributeMetadata.yml │ │ ├── TensorFlow.TFAttributeType.yml │ │ ├── TensorFlow.TFBuffer.BufferReleaseFunc.yml │ │ ├── TensorFlow.TFBuffer.yml │ │ ├── TensorFlow.TFCode.yml │ │ ├── TensorFlow.TFCore.yml │ │ ├── TensorFlow.TFDataType.yml │ │ ├── TensorFlow.TFDependencies.yml │ │ ├── TensorFlow.TFDevice.yml │ │ ├── TensorFlow.TFDisposable.yml │ │ ├── TensorFlow.TFDisposableThreadSafe.yml │ │ ├── TensorFlow.TFException.yml │ │ ├── TensorFlow.TFFunction.yml │ │ ├── TensorFlow.TFGraph.WhileConstructor.yml │ │ ├── TensorFlow.TFGraph.yml │ │ ├── TensorFlow.TFImportGraphDefOptions.yml │ │ ├── TensorFlow.TFInput.yml │ │ ├── TensorFlow.TFLibrary.yml │ │ ├── TensorFlow.TFOperation.yml │ │ ├── TensorFlow.TFOperationDesc.yml │ │ ├── TensorFlow.TFOutput.yml │ │ ├── TensorFlow.TFScope.yml │ │ ├── TensorFlow.TFSession.PartialRunToken.yml │ │ ├── TensorFlow.TFSession.Runner.yml │ │ ├── TensorFlow.TFSession.yml │ │ ├── TensorFlow.TFSessionOptions.yml │ │ ├── TensorFlow.TFShape.yml │ │ ├── TensorFlow.TFStatus.yml │ │ ├── TensorFlow.TFTensor.Deallocator.yml │ │ ├── TensorFlow.TFTensor.yml │ │ └── TensorFlow.Variable.yml │ ├── index.md │ └── toc.yml ├── articles │ ├── intro.md │ ├── start.md │ └── toc.yml ├── docfx.json ├── index.md └── toc.yml ├── docs ├── api │ ├── TensorFlow.html │ ├── TensorFlow │ │ ├── TensorFlow.Adagrad.html │ │ ├── TensorFlow.AdaptiveOptimizer.html │ │ ├── TensorFlow.DeviceAttributes.html │ │ ├── TensorFlow.DeviceType.html │ │ ├── TensorFlow.MonoPInvokeCallbackAttribute.html │ │ ├── TensorFlow.Optimizer.html │ │ ├── TensorFlow.PaddingFIFOQueue.html │ │ ├── TensorFlow.QueueBase.html │ │ ├── TensorFlow.RMSProp.html │ │ ├── TensorFlow.SGD.html │ │ ├── TensorFlow.TFAttributeMetadata.html │ │ ├── TensorFlow.TFAttributeType.html │ │ ├── TensorFlow.TFBuffer.BufferReleaseFunc.html │ │ ├── TensorFlow.TFBuffer.html │ │ ├── TensorFlow.TFCode.html │ │ ├── TensorFlow.TFCore.html │ │ ├── TensorFlow.TFDataType.html │ │ ├── TensorFlow.TFDependencies.html │ │ ├── TensorFlow.TFDevice.html │ │ ├── TensorFlow.TFDisposable.html │ │ ├── TensorFlow.TFDisposableThreadSafe.html │ │ ├── TensorFlow.TFException.html │ │ ├── TensorFlow.TFFunction.html │ │ ├── TensorFlow.TFGraph.WhileConstructor.html │ │ ├── TensorFlow.TFGraph.html │ │ ├── TensorFlow.TFImportGraphDefOptions.html │ │ ├── TensorFlow.TFInput.html │ │ ├── TensorFlow.TFLibrary.html │ │ ├── TensorFlow.TFOperation.html │ │ ├── TensorFlow.TFOperationDesc.html │ │ ├── TensorFlow.TFOutput.html │ │ ├── TensorFlow.TFScope.html │ │ ├── TensorFlow.TFSession.PartialRunToken.html │ │ ├── TensorFlow.TFSession.Runner.html │ │ ├── TensorFlow.TFSession.html │ │ ├── TensorFlow.TFSessionOptions.html │ │ ├── TensorFlow.TFShape.html │ │ ├── TensorFlow.TFStatus.html │ │ ├── TensorFlow.TFTensor.Deallocator.html │ │ ├── TensorFlow.TFTensor.html │ │ └── TensorFlow.Variable.html │ ├── index.html │ └── toc.html ├── articles │ ├── intro.html │ ├── start.html │ └── toc.html ├── favicon.ico ├── fonts │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.svg │ ├── glyphicons-halflings-regular.ttf │ ├── glyphicons-halflings-regular.woff │ └── glyphicons-halflings-regular.woff2 ├── index.html ├── logo.svg ├── manifest.json ├── search-stopwords.json ├── styles │ ├── docfx.css │ ├── docfx.js │ ├── docfx.vendor.css │ ├── docfx.vendor.js │ ├── lunr.min.js │ ├── main.css │ ├── main.js │ └── search-worker.js ├── toc.html └── xrefmap.yml ├── ecmadocs └── en │ ├── TensorFlow │ ├── Adagrad.xml │ ├── AdaptiveOptimizer.xml │ ├── DeviceAttributes.xml │ ├── DeviceType.xml │ ├── MonoPInvokeCallbackAttribute.xml │ ├── Optimizer.xml │ ├── PaddingFIFOQueue.xml │ ├── QueueBase.xml │ ├── RMSProp.xml │ ├── SGD.xml │ ├── TFAttributeMetadata.xml │ ├── TFAttributeType.xml │ ├── TFBuffer+BufferReleaseFunc.xml │ ├── TFBuffer.xml │ ├── TFCode.xml │ ├── TFCore.xml │ ├── TFDataType.xml │ ├── TFDependencies.xml │ ├── TFDevice.xml │ ├── TFDisposable.xml │ ├── TFDisposableThreadSafe.xml │ ├── TFException.xml │ ├── TFFunction.xml │ ├── TFGraph+WhileConstructor.xml │ ├── TFGraph.xml │ ├── TFImportGraphDefOptions.xml │ ├── TFInput.xml │ ├── TFLibrary.xml │ ├── TFOperation.xml │ ├── TFOperationDesc.xml │ ├── TFOutput.xml │ ├── TFScope.xml │ ├── TFSession+PartialRunToken.xml │ ├── TFSession+Runner.xml │ ├── TFSession.xml │ ├── TFSessionOptions.xml │ ├── TFShape.xml │ ├── TFStatus.xml │ ├── TFTensor+Deallocator.xml │ ├── TFTensor.xml │ └── Variable.xml │ ├── index.xml │ └── ns-TensorFlow.xml └── tests ├── TensorFlowSharp.Tests.CSharp ├── ArrayTests.cs ├── BitwiseOperationTests.cs ├── ClipTests.cs ├── CondTests.cs ├── GettingStarted.cs ├── GradientTests.cs ├── MathTests.cs ├── OptimizerTests.cs ├── PaddingFIFOQueueTests.cs ├── PartialRunTests.cs ├── Properties │ └── AssemblyInfo.cs ├── SessionTests.cs ├── ShapeTests.cs ├── TensorFlowSharp.Tests.CSharp.csproj ├── TensorTests.cs ├── TestData │ ├── Adagrad │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── AdagradTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── Adam │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── AdamTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── Momentum │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── MomentumNesterov │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── MomentumNesterovTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── MomentumTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── RMSProp │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── RMSPropTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── SGD │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── SGDMnist │ │ ├── expected.txt │ │ └── optimizer_lr_test.py │ ├── SGDMnistGPU │ │ └── expected.txt │ └── SGDTimeDecay │ │ ├── expected.txt │ │ └── optimizer_lr_test.py ├── TestUtils.cs ├── VariableTests.cs └── packages.config └── TensorFlowSharp.Tests ├── ArithmeticOperationTests.fs ├── AssemblyInfo.fs ├── NeuralNetOperationTests.fs ├── TensorFlowSharp.Tests.fsproj └── packages.config /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | **Describe the bug** 8 | A clear and concise description of what the bug is. 9 | 10 | **To Reproduce** 11 | Include steps to reproduce the issue locally, include it as a zip file. 12 | 13 | Note: I do not have time to create models, train models, create samples from scratch and then adding your 3-4 lines of code to a sample to debug your problem. You need to provide a complete test case. 14 | 15 | **Expected behavior** 16 | A clear and concise description of what you expected to happen. 17 | 18 | **Screenshots** 19 | If applicable, add screenshots to help explain your problem. 20 | 21 | **Desktop (please complete the following information):** 22 | - OS: [e.g. iOS] 23 | - Browser [e.g. chrome, safari] 24 | - Version [e.g. 22] 25 | 26 | **Smartphone (please complete the following information):** 27 | - Device: [e.g. iPhone6] 28 | - OS: [e.g. iOS8.1] 29 | - Browser [e.g. stock browser, safari] 30 | - Version [e.g. 22] 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | **Is your feature request related to a problem? Please describe.** 8 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 9 | 10 | **Describe the solution you'd like** 11 | A clear and concise description of what you want to happen. 12 | 13 | **Describe alternatives you've considered** 14 | A clear and concise description of any alternative solutions or features you've considered. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Autosave files 2 | *~ 3 | 4 | # build 5 | [Oo]bj/ 6 | [Bb]in/ 7 | packages/ 8 | TestResults/ 9 | 10 | # globs 11 | Makefile.in 12 | *.DS_Store 13 | *.sln.cache 14 | *.suo 15 | *.cache 16 | *.pidb 17 | *.userprefs 18 | *.usertasks 19 | config.log 20 | config.make 21 | config.status 22 | aclocal.m4 23 | install-sh 24 | autom4te.cache/ 25 | *.user 26 | *.tar.gz 27 | tarballs/ 28 | test-results/ 29 | Thumbs.db 30 | 31 | # Mac bundle stuff 32 | *.dmg 33 | *.app 34 | 35 | # resharper 36 | *_Resharper.* 37 | *.Resharper 38 | 39 | # dotCover 40 | *.dotCover 41 | 42 | .vs 43 | *.GhostDoc.xml -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: csharp 2 | solution: TensorFlowSharp.sln 3 | script: 4 | - tsh_version=1.13.0 5 | - wget "https://www.nuget.org/api/v2/package/TensorFlowSharp/$tsh_version" 6 | - mkdir native 7 | - tar xzvf $tsh_version runtimes/linux/native 8 | - cp runtimes/linux/native/* native 9 | - tar xzvf $tsh_version runtimes/win7-x64/native 10 | - cp runtimes/win7-x64/native/* native 11 | - tar xzvf $tsh_version runtimes/osx/native 12 | - cp runtimes/osx/native/* native 13 | - msbuild /t:Restore $TRAVIS_BUILD_DIR/TensorFlowSharp.sln 14 | - cd $TRAVIS_BUILD_DIR/ 15 | - msbuild /p:Configuration=Release TensorFlowSharp.sln 16 | - cp -R $TRAVIS_BUILD_DIR/native/*.dylib $TRAVIS_BUILD_DIR/tests/TensorFlowSharp.Tests/bin/Release/ 17 | - cp -R $TRAVIS_BUILD_DIR/native/*.dylib $TRAVIS_BUILD_DIR/tests/TensorFlowSharp.Tests.CSharp/bin/Release/ 18 | - cd $TRAVIS_BUILD_DIR/packages/xunit.runner.console.2.2.0/tools 19 | - cp "$TRAVIS_BUILD_DIR/tests/TensorFlowSharp.Tests/obj/Release/FSharp.Core.dll" . 20 | - mono --arch=64 xunit.console.exe "$TRAVIS_BUILD_DIR/tests/TensorFlowSharp.Tests/bin/Release/TensorFlowSharp.Tests.dll" "$TRAVIS_BUILD_DIR/tests/TensorFlowSharp.Tests.CSharp/bin/Release/TensorFlowSharp.Tests.CSharp.dll" 21 | mono: "5.10.0" 22 | dotnet: "2.1.4" 23 | os: "osx" 24 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | TensorFlowSharp are bindings to the native TensorFlow library. 2 | 3 | You can either use the TensorFlow C-library release binaries, or build 4 | your own from source. Here are some pre-built TensorFlow binaries you 5 | can use for each platform: 6 | 7 | - Linux 8 | - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-1.1.0.tar.gz 9 | - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-1.1.0.tar.gz 10 | - Mac: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-1.1.0.tar.gz 11 | - Windows: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.2.0-rc0.zip 12 | 13 | Unpack the above .tar.gz suitable for your system on a prefix that your 14 | system's dynamic linker can use, for example, go to `/usr/local` and unpack there. 15 | 16 | Mac note: the package contains a `.so` file, you will need to rename this to `.dylib` for 17 | it to work. 18 | 19 | Once you do that, you need to open the solution file on the top 20 | level directory and build. This will produce both the TensorFlowSharp 21 | library as well as compile the tests and samples. 22 | 23 | ## Building your own native TensorFlow library 24 | 25 | You will wan to use Visual Studio 2017 or Visual Studio for Mac to build. 26 | 27 | To build the TensorFlow C library from source, 28 | [follow these instructions](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/go/README.md#building-the-tensorflow-c-library-from-source). 29 | 30 | This includes checking out the Tensorflow sources, installing Bazel, 31 | and building the core. 32 | 33 | Once you do that, you will need to build the shared library. 34 | First, in the tensorflow directory, run: 35 | 36 | ```bash 37 | ./configure 38 | ``` 39 | 40 | and answer the various prompts about your build. Important: 41 | building with CUDA support provides better runtime performance 42 | but has additional dependencies as discussed in the Tensorflow 43 | installation Web page. 44 | 45 | Once configured, run: 46 | 47 | ```bash 48 | bazel build -c opt //tensorflow:libtensorflow.so 49 | ``` 50 | 51 | If you want debug symbols for Tensorflow, while debugging the binding: 52 | 53 | ```bash 54 | bazel build -c dbg --strip=never //tensorflow:libtensorflow.so 55 | ``` 56 | 57 | You will need the generated library (`libtensorflow.so`) to be installed in a 58 | system accessible location like `/usr/local/lib` 59 | 60 | On Linux: 61 | 62 | ```bash 63 | sudo cp bazel-bin/tensorflow/libtensorflow.so /usr/local/lib/ 64 | ``` 65 | 66 | On MacOS: 67 | 68 | ```bash 69 | sudo cp bazel-bin/tensorflow/libtensorflow.so /usr/local/lib/libtensorflow.dylib 70 | ``` 71 | 72 | ## Running the test 73 | 74 | I am currently using Visual Studio for Mac to do the development, but this 75 | should work on Windows with VS and Linux with MonoDevelop. 76 | 77 | Before the solution will run you will need the shared library generated to 78 | be on a location accessibly by the Mono runtime (for example /usr/local/lib). 79 | 80 | While Tensorflow builds a library with the extension .so, you will need 81 | to make sure that it has the proper name for your platform (tensorflow.dll on Windows, 82 | tensorflow.dylib on Mac) and copy that there. 83 | 84 | Tensorflow is a 64-bit library, so you will need to use a 64-bit Mono to run, 85 | at home (where I am doing this work), I have a copy of 64-bit Mono on /mono, 86 | so you will want to set that in your project configuration, to do this: 87 | 88 | Ensure that your Build/Compiler settings set "Platform Target" to "x64". 89 | 90 | Open the solution file in the top directory, and when you hit run, this will 91 | run the API test. 92 | -------------------------------------------------------------------------------- /Examples/ExampleCommon/CatalogUtil.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Globalization; 4 | using System.IO; 5 | using System.Text.RegularExpressions; 6 | 7 | namespace ExampleCommon 8 | { 9 | public static class CatalogUtil 10 | { 11 | // regexes with different new line symbols 12 | private static string CATALOG_ITEM_PATTERN = @"item {{{0} name: ""(?.*)""{0} id: (?\d+){0} display_name: ""(?.*)""{0}}}"; 13 | private static string CATALOG_ITEM_PATTERN_ENV = string.Format(CultureInfo.InvariantCulture, CATALOG_ITEM_PATTERN, Environment.NewLine); 14 | private static string CATALOG_ITEM_PATTERN_UNIX = string.Format (CultureInfo.InvariantCulture, CATALOG_ITEM_PATTERN, "\n"); 15 | 16 | /// 17 | /// Reads catalog of well-known objects from text file. 18 | /// 19 | /// path to the text file 20 | /// collection of items 21 | public static IEnumerable ReadCatalogItems (string file) 22 | { 23 | using (FileStream stream = File.OpenRead (file)) 24 | using (StreamReader reader = new StreamReader (stream)) { 25 | string text = reader.ReadToEnd (); 26 | if (string.IsNullOrWhiteSpace (text)) { 27 | yield break; 28 | } 29 | 30 | Regex regex = new Regex (CATALOG_ITEM_PATTERN_ENV); 31 | var matches = regex.Matches (text); 32 | if(matches.Count == 0) { 33 | regex = new Regex (CATALOG_ITEM_PATTERN_UNIX); 34 | matches = regex.Matches (text); 35 | } 36 | 37 | foreach (Match match in matches) { 38 | var name = match.Groups [1].Value; 39 | var id = int.Parse (match.Groups [2].Value); 40 | var displayName = match.Groups [3].Value; 41 | 42 | yield return new CatalogItem () { 43 | Id = id, 44 | Name = name, 45 | DisplayName = displayName 46 | }; 47 | } 48 | } 49 | } 50 | } 51 | 52 | public class CatalogItem 53 | { 54 | public int Id { get; set; } 55 | public string Name { get; set; } 56 | public string DisplayName { get; set; } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /Examples/ExampleCommon/ExampleCommon.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | Debug 6 | AnyCPU 7 | {116BA176-F67C-4066-8685-C080705BAA16} 8 | Library 9 | Properties 10 | ExampleCommon 11 | ExampleCommon 12 | v4.7.1 13 | 512 14 | 15 | 0.2 16 | 17 | 18 | true 19 | full 20 | false 21 | bin\Debug\ 22 | DEBUG;TRACE 23 | prompt 24 | 4 25 | true 26 | 27 | 28 | pdbonly 29 | true 30 | bin\Release\ 31 | TRACE 32 | prompt 33 | 4 34 | true 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | {0264c321-34f4-46af-819e-168d1e597232} 55 | TensorFlowSharp 56 | 57 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /Examples/ExampleCommon/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("ExampleCommon")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("ExampleCommon")] 13 | [assembly: AssemblyCopyright("Copyright © 2017")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("116ba176-f67c-4066-8685-c080705baa16")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /Examples/ExampleInceptionInference/ExampleInceptionInference.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Debug 5 | AnyCPU 6 | {069A6736-7711-4805-8660-A267E713BC54} 7 | Exe 8 | ExampleInceptionInference 9 | ExampleInceptionInference 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG; 19 | prompt 20 | 4 21 | true 22 | x64 23 | 24 | 25 | true 26 | bin\Release 27 | prompt 28 | 4 29 | true 30 | x64 31 | 32 | 33 | 34 | 35 | ..\..\packages\Mono.Options.4.4.0.0\lib\net4-client\Mono.Options.dll 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | {0264C321-34F4-46AF-819E-168D1E597232} 48 | TensorFlowSharp 49 | 50 | 51 | {116BA176-F67C-4066-8685-C080705BAA16} 52 | ExampleCommon 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /Examples/ExampleInceptionInference/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | 4 | // Information about this assembly is defined by the following attributes. 5 | // Change them to the values specific to your project. 6 | 7 | [assembly: AssemblyTitle ("ExampleInceptionInference")] 8 | [assembly: AssemblyDescription ("")] 9 | [assembly: AssemblyConfiguration ("")] 10 | [assembly: AssemblyCompany ("")] 11 | [assembly: AssemblyProduct ("")] 12 | [assembly: AssemblyCopyright ("miguel")] 13 | [assembly: AssemblyTrademark ("")] 14 | [assembly: AssemblyCulture ("")] 15 | 16 | // The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}". 17 | // The form "{Major}.{Minor}.*" will automatically update the build and revision, 18 | // and "{Major}.{Minor}.{Build}.*" will update just the revision. 19 | 20 | [assembly: AssemblyVersion ("1.0.*")] 21 | 22 | // The following attributes are used to specify the signing key for the assembly, 23 | // if desired. See the Mono documentation for more information about signing. 24 | 25 | //[assembly: AssemblyDelaySign(false)] 26 | //[assembly: AssemblyKeyFile("")] 27 | -------------------------------------------------------------------------------- /Examples/ExampleInceptionInference/README.md: -------------------------------------------------------------------------------- 1 | An example for using the TensorFlow C# API for image recognition 2 | using a pre-trained inception model (http://arxiv.org/abs/1512.00567). 3 | 4 | Sample usage: 5 | 6 | ``` 7 | mono ExampleInceptionInference.exe [--dir=/tmp/modeldir] imagefile... 8 | 9 | The pre-trained model takes input in the form of a 4-dimensional 10 | tensor with shape [ BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 3 ], 11 | where: 12 | 13 | - BATCH_SIZE allows for inference of multiple images in one pass through the graph 14 | - IMAGE_HEIGHT is the height of the images on which the model was trained 15 | - IMAGE_WIDTH is the width of the images on which the model was trained 16 | - 3 is the (R, G, B) values of the pixel colors represented as a float. 17 | 18 | And produces as output a vector with shape [ NUM_LABELS ]. 19 | output[i] is the probability that the input image was recognized as 20 | having the i-th label. 21 | 22 | A separate file contains a list of string labels corresponding to the 23 | integer indices of the output. 24 | 25 | This example: 26 | - Loads the serialized representation of the pre-trained model into a Graph 27 | - Creates a Session to execute operations on the Graph 28 | - Converts an image file to a Tensor to provide as input to a Session run 29 | - Executes the Session and prints out the label with the highest probability 30 | 31 | To convert an image file to a Tensor suitable for input to the Inception model, 32 | this example: 33 | - Constructs another TensorFlow graph to normalize the image into a 34 | form suitable for the model (for example, resizing the image) 35 | - Creates an executes a Session to obtain a Tensor in this normalized form. -------------------------------------------------------------------------------- /Examples/ExampleInceptionInference/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/App.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/ImageEditor.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Drawing; 3 | 4 | namespace ExampleCommon 5 | { 6 | /// 7 | /// Allows to add graphic elements to the existing image. 8 | /// 9 | public class ImageEditor : IDisposable 10 | { 11 | private Graphics _graphics; 12 | private Image _image; 13 | private string _fontFamily; 14 | private float _fontSize; 15 | private string _outputFile; 16 | 17 | public ImageEditor (string inputFile, string outputFile, string fontFamily = "Ariel", float fontSize = 12) 18 | { 19 | if (string.IsNullOrEmpty (inputFile)) { 20 | throw new ArgumentNullException (nameof (inputFile)); 21 | } 22 | 23 | if (string.IsNullOrEmpty (outputFile)) { 24 | throw new ArgumentNullException (nameof (outputFile)); 25 | } 26 | 27 | _fontFamily = fontFamily; 28 | _fontSize = fontSize; 29 | _outputFile = outputFile; 30 | 31 | _image = Bitmap.FromFile (inputFile); 32 | _graphics = Graphics.FromImage (_image); 33 | } 34 | 35 | /// 36 | /// Adds rectangle with a label in particular position of the image 37 | /// 38 | /// 39 | /// 40 | /// 41 | /// 42 | /// 43 | /// 44 | public void AddBox (float xmin, float xmax, float ymin, float ymax, string text = "", string colorName = "red") 45 | { 46 | var left = xmin * _image.Width; 47 | var right = xmax * _image.Width; 48 | var top = ymin * _image.Height; 49 | var bottom = ymax * _image.Height; 50 | 51 | 52 | var imageRectangle = new Rectangle (new Point (0, 0), new Size (_image.Width, _image.Height)); 53 | _graphics.DrawImage (_image, imageRectangle); 54 | 55 | Color color = Color.FromName(colorName); 56 | Brush brush = new SolidBrush (color); 57 | Pen pen = new Pen (brush); 58 | 59 | _graphics.DrawRectangle (pen, left, top, right - left, bottom - top); 60 | var font = new Font (_fontFamily, _fontSize); 61 | SizeF size = _graphics.MeasureString (text, font); 62 | _graphics.DrawString (text, font, brush, new PointF (left, top - size.Height)); 63 | } 64 | 65 | public void Dispose () 66 | { 67 | if (_image != null) { 68 | _image.Save (_outputFile); 69 | 70 | if (_graphics != null) { 71 | _graphics.Dispose (); 72 | } 73 | 74 | _image.Dispose (); 75 | } 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("ExampleObjectDetection")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("ExampleObjectDetection")] 13 | [assembly: AssemblyCopyright("Copyright © 2017")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("69471b59-576d-446b-8df3-96ec783c0b6c")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/README.md: -------------------------------------------------------------------------------- 1 | # Object Detection Example # 2 | 3 | This example uses tensorflow [object detection model API](https://github.com/tensorflow/models/tree/master/research/object_detection) and TensorFlowSharp library to identify multiple objects in a single image using .NET programming languages like C# and F#. 4 | 5 | ![alt tag](demo-picture.jpg) 6 | 7 | ## Run example ## 8 | 1. ``` git clone https://github.com/migueldeicaza/TensorFlowSharp ``` 9 | 2. build _TensorFlowSharp.sln_ 10 | 3. copy _'libtensorflow.dylib'_ (Mac OS) or _'libtensorflow.dll'_ (Windows) to the project output path (see where you can get the library under [Working on TensorFlowSharp](https://github.com/migueldeicaza/TensorFlowSharp#working-on-tensorflowsharp) section) 11 | 4. Run the ExampleObjectDetection util from command line: 12 | ``` 13 | ExampleObjectDetection 14 | ``` 15 | 16 | By default, the example downloads a pretrained model, but you can specify your own using the following options: 17 | 18 | _input_image_ - optional, the path to the image for processing (the default is 'test_images/input.jpg') 19 | _output_image_ - optional, the path where the image with detected objects will be saved (the default is 'test_images/output.jpg') 20 | _catalog_ - optional, the path to the '*.pbtxt' file (by default, 'mscoco_label_map.pbtxt' been loaded) 21 | _model_ - optional, the path to the '*.pb' file (by default, 'frozen_inference_graph.pb' model been used, but you can download any other from here https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md or train your own): 22 | ``` 23 | ExampleObjectDetection --input_image="/demo/input.jpg" --output_image="/demo/output.jpg" --catalog="/demo/mscoco_label_map.pbtxt" --model="/demo/frozen_inference_graph.pb" 24 | ``` 25 | 26 | ## I found an issue in the example ## 27 | If you want to address a bug or a question related to the object detection example - just create a new issue on github starting with [Object Detection Example] tag. 28 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/demo-picture.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/Examples/ExampleObjectDetection/demo-picture.jpg -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /Examples/ExampleObjectDetection/test_images/input.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/Examples/ExampleObjectDetection/test_images/input.jpg -------------------------------------------------------------------------------- /Examples/FExampleInceptionInference/AssemblyInfo.fs: -------------------------------------------------------------------------------- 1 | namespace FExampleInceptionInference 2 | open System.Reflection 3 | open System.Runtime.CompilerServices 4 | 5 | [] 6 | [] 7 | [] 8 | [] 9 | [] 10 | [] 11 | [] 12 | 13 | // The assembly version has the format {Major}.{Minor}.{Build}.{Revision} 14 | 15 | [] 16 | 17 | //[] 18 | //[] 19 | 20 | () 21 | -------------------------------------------------------------------------------- /Examples/FExampleInceptionInference/FSharpExampleInceptionInference.fsproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Debug 5 | AnyCPU 6 | {03FB7F3A-6D24-4033-9B04-69AD8A198CCF} 7 | Exe 8 | FExampleInceptionInference 9 | FExampleInceptionInference 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG 19 | prompt 20 | true 21 | x64 22 | 23 | 24 | true 25 | bin\Release 26 | 27 | prompt 28 | true 29 | true 30 | x64 31 | 32 | 33 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets 34 | 35 | 36 | $(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | {0264C321-34F4-46AF-819E-168D1E597232} 54 | TensorFlowSharp 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /Examples/FExampleInceptionInference/README.md: -------------------------------------------------------------------------------- 1 | An example for using the TensorFlow F# API for image recognition 2 | using a pre-trained inception model (http://arxiv.org/abs/1512.00567). 3 | 4 | Sample usage: 5 | 6 | ``` 7 | mono ExampleInceptionInference.exe [--dir=/tmp/modeldir] imagefile... 8 | 9 | The pre-trained model takes input in the form of a 4-dimensional 10 | tensor with shape [ BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 3 ], 11 | where: 12 | 13 | - BATCH_SIZE allows for inference of multiple images in one pass through the graph 14 | - IMAGE_HEIGHT is the height of the images on which the model was trained 15 | - IMAGE_WIDTH is the width of the images on which the model was trained 16 | - 3 is the (R, G, B) values of the pixel colors represented as a float. 17 | 18 | And produces as output a vector with shape [ NUM_LABELS ]. 19 | output[i] is the probability that the input image was recognized as 20 | having the i-th label. 21 | 22 | A separate file contains a list of string labels corresponding to the 23 | integer indices of the output. 24 | 25 | This example: 26 | - Loads the serialized representation of the pre-trained model into a Graph 27 | - Creates a Session to execute operations on the Graph 28 | - Converts an image file to a Tensor to provide as input to a Session run 29 | - Executes the Session and prints out the label with the highest probability 30 | 31 | To convert an image file to a Tensor suitable for input to the Inception model, 32 | this example: 33 | - Constructs another TensorFlow graph to normalize the image into a 34 | form suitable for the model (for example, resizing the image) 35 | - Creates an executes a Session to obtain a Tensor in this normalized form. -------------------------------------------------------------------------------- /Examples/ImageCompression/ImageCompression.fs: -------------------------------------------------------------------------------- 1 | // 2 | // Image compression using neural networks 3 | // 4 | // From models/compression/encoder.py 5 | // 6 | open System 7 | open System.IO 8 | open TensorFlow 9 | 10 | let input = "example.png" 11 | let iteration = 15 12 | let output_codes = None 13 | let model = "../../compression_residual_gru/residual_gru.pb" 14 | 15 | let opt x = 16 | System.Nullable x 17 | 18 | // Convenience functiosn to create tensor constants from an integer and a float 19 | let iconst (graph:TFGraph) (v:int) (label:string) = 20 | graph.Const (TFTensor.op_Implicit (v), label) 21 | 22 | let input_tensor_names = 23 | [| for a in 0 .. 16 do yield sprintf "loop_%02d/add:0" a |] 24 | 25 | let output_tensor_names = 26 | let first = [|"GruBinarizer/SignBinarizer/Sign:0" |] 27 | Seq.append first [| for a in 0 .. 16 do yield sprintf "GruBinarizer/SignBinarizer/Sign_%d:0" a |] |> Seq.toArray 28 | 29 | [] 30 | let main argv = 31 | use graph = new TFGraph() 32 | let outputs = [| for name in output_tensor_names do yield graph.[name] |]; 33 | let input_image = graph.Placeholder TFDataType.String 34 | let input_image_str = File.ReadAllBytes (input) |> TFTensor.CreateString 35 | 36 | let decoded_image = if Path.GetExtension (input) = ".png" then graph.DecodePng (input_image, channels = opt 3L) else graph.DecodeJpeg (input_image, channels = opt 3L) 37 | let expanded_image = graph.ExpandDims (decoded_image, iconst graph 0 "zero") 38 | 39 | use session = new TFSession (graph) 40 | let result = session.Run (runOptions = null, inputs = [| input_image |], inputValues = [| input_image_str |], outputs = [| |], targetOpers = [| expanded_image.Operation |]); 41 | 42 | 43 | // The following will fail unless you rebuild your tensorflow to remove the 64mb limitation 44 | // https://github.com/tensorflow/tensorflow/issues/582 45 | graph.Import (new TFBuffer (File.ReadAllBytes (model))) 46 | let input_tensor = graph.["Placeholder:0"] 47 | 48 | 0 // return an integer exit code 49 | 50 | -------------------------------------------------------------------------------- /Examples/ImageCompression/ImageCompression.fsproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Debug 5 | AnyCPU 6 | {5A493E1F-407D-4A3B-AF9B-A0F2930C1C18} 7 | Exe 8 | ImageCompression 9 | ImageCompression 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG 19 | prompt 20 | true 21 | x64 22 | 23 | 24 | true 25 | bin\Release 26 | 27 | prompt 28 | true 29 | true 30 | x64 31 | 32 | 33 | $(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets 34 | 35 | 36 | $(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | {0264C321-34F4-46AF-819E-168D1E597232} 48 | TensorFlowSharp 49 | 50 | 51 | 52 | 53 | 54 | PreserveNewest 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /Examples/ImageCompression/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/Examples/ImageCompression/example.png -------------------------------------------------------------------------------- /Examples/README.md: -------------------------------------------------------------------------------- 1 | This directory contains ports of some sample applications that I found interesting 2 | and I used to exercise the API in C# and F#. They are usually line-by-line ports 3 | and of exploratory nature, helping me learn what idioms work and which ones do not 4 | work for the binding. 5 | 6 | The ImageCompression sample will require you to use a TensorFlow that has been 7 | configured to support large protocol buffers, otherwise the program will crash. 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Permission is hereby granted, free of charge, to any person obtaining 2 | a copy of this software and associated documentation files (the 3 | "Software"), to deal in the Software without restriction, including 4 | without limitation the rights to use, copy, modify, merge, publish, 5 | distribute, sublicense, and/or sell copies of the Software, and to 6 | permit persons to whom the Software is furnished to do so, subject to 7 | the following conditions: 8 | 9 | The above copyright notice and this permission notice shall be 10 | included in all copies or substantial portions of the Software. 11 | 12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 13 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 14 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 15 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 16 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 17 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 18 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /Learn/Datasets/Helper.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.IO; 3 | using System.Net; 4 | 5 | namespace Learn 6 | { 7 | public class Helper 8 | { 9 | public static Stream MaybeDownload (string urlBase, string trainDir, string file) 10 | { 11 | if (!Directory.Exists (trainDir)) 12 | Directory.CreateDirectory (trainDir); 13 | var target = Path.Combine (trainDir, file); 14 | if (!File.Exists (target)) { 15 | var wc = new WebClient (); 16 | wc.DownloadFile (urlBase + file, target); 17 | } 18 | return File.OpenRead (target); 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /Learn/Learn.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Debug 5 | AnyCPU 6 | {20567EDE-7C20-4734-B489-C6D303843105} 7 | Library 8 | Learn 9 | Learn 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG; 19 | prompt 20 | 4 21 | false 22 | true 23 | 24 | 25 | true 26 | bin\Release 27 | prompt 28 | 4 29 | false 30 | true 31 | 32 | 33 | 34 | 35 | 36 | ..\packages\System.ValueTuple.4.5.0\lib\net47\System.ValueTuple.dll 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | {0264C321-34F4-46AF-819E-168D1E597232} 52 | TensorFlowSharp 53 | 54 | 55 | 56 | 57 | 58 | 59 | -------------------------------------------------------------------------------- /Learn/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | 4 | // Information about this assembly is defined by the following attributes. 5 | // Change them to the values specific to your project. 6 | 7 | [assembly: AssemblyTitle ("Learn")] 8 | [assembly: AssemblyDescription ("")] 9 | [assembly: AssemblyConfiguration ("")] 10 | [assembly: AssemblyCompany ("")] 11 | [assembly: AssemblyProduct ("")] 12 | [assembly: AssemblyCopyright ("miguel")] 13 | [assembly: AssemblyTrademark ("")] 14 | [assembly: AssemblyCulture ("")] 15 | 16 | // The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}". 17 | // The form "{Major}.{Minor}.*" will automatically update the build and revision, 18 | // and "{Major}.{Minor}.{Build}.*" will update just the revision. 19 | 20 | [assembly: AssemblyVersion ("1.0.*")] 21 | 22 | // The following attributes are used to specify the signing key for the assembly, 23 | // if desired. See the Mono documentation for more information about signing. 24 | 25 | //[assembly: AssemblyDelaySign(false)] 26 | //[assembly: AssemblyKeyFile("")] 27 | -------------------------------------------------------------------------------- /Learn/README.md: -------------------------------------------------------------------------------- 1 | This contains a port of parts of the TensorFlow Learn framework, as found on 2 | 3 | tensorflow/contrib/learn/python/learn 4 | 5 | -------------------------------------------------------------------------------- /Learn/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: doc-update yaml 2 | 3 | rebuild-docs: docs/template 4 | mdoc export-html --force-update -o docs --template=docs/template ecmadocs/en/ 5 | 6 | # Used to fetch XML doc updates from the C# compiler into the ECMA docs 7 | doc-update: 8 | mdoc update -i TensorFlowSharp/bin/Debug/TensorFlowSharp.xml -o ecmadocs/en TensorFlowSharp/bin/Debug/net471/TensorFlowSharp.dll 9 | 10 | yaml: 11 | -rm ecmadocs/en/ns-.xml 12 | mono /cvs/ECMA2Yaml/ECMA2Yaml/ECMA2Yaml/bin/Debug/ECMA2Yaml.exe --source=`pwd`/ecmadocs/en --output=`pwd`/docfx/api 13 | (cd docfx; mono ~/Downloads/docfx/docfx.exe build) 14 | 15 | -------------------------------------------------------------------------------- /OpGenerator/OpGenerator.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Debug 5 | AnyCPU 6 | {700E5928-3D5D-4B54-AD4D-BC48E79E389E} 7 | Exe 8 | OpGenerator 9 | OpGenerator 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG; 19 | prompt 20 | 4 21 | true 22 | true 23 | x64 24 | 25 | 26 | true 27 | bin\Release 28 | prompt 29 | 4 30 | true 31 | true 32 | x64 33 | 34 | 35 | 36 | 37 | 38 | ..\packages\Google.Protobuf.3.4.1\lib\net45\Google.Protobuf.dll 39 | 40 | 41 | ..\packages\protobuf-net.2.3.2\lib\net40\protobuf-net.dll 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /OpGenerator/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | 4 | // Information about this assembly is defined by the following attributes. 5 | // Change them to the values specific to your project. 6 | 7 | [assembly: AssemblyTitle ("OpGenerator")] 8 | [assembly: AssemblyDescription ("")] 9 | [assembly: AssemblyConfiguration ("")] 10 | [assembly: AssemblyCompany ("")] 11 | [assembly: AssemblyProduct ("")] 12 | [assembly: AssemblyCopyright ("miguel")] 13 | [assembly: AssemblyTrademark ("")] 14 | [assembly: AssemblyCulture ("")] 15 | 16 | // The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}". 17 | // The form "{Major}.{Minor}.*" will automatically update the build and revision, 18 | // and "{Major}.{Minor}.{Build}.*" will update just the revision. 19 | 20 | [assembly: AssemblyVersion ("1.0.*")] 21 | 22 | // The following attributes are used to specify the signing key for the assembly, 23 | // if desired. See the Mono documentation for more information about signing. 24 | 25 | //[assembly: AssemblyDelaySign(false)] 26 | //[assembly: AssemblyKeyFile("")] 27 | -------------------------------------------------------------------------------- /OpGenerator/README.md: -------------------------------------------------------------------------------- 1 | 2 | This directory contains the operations generator, it is a tool that produces 3 | part of the TFGraph convenience API, based on the tensorflow metadata. 4 | 5 | This needs to be run if you update your libtensorflow shared library with a 6 | new version and you want to get access to new operations or when you want 7 | to upgrade the implementation. -------------------------------------------------------------------------------- /OpGenerator/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /SampleTest/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | 4 | // Information about this assembly is defined by the following attributes. 5 | // Change them to the values specific to your project. 6 | 7 | [assembly: AssemblyTitle ("SampleTest")] 8 | [assembly: AssemblyDescription ("")] 9 | [assembly: AssemblyConfiguration ("")] 10 | [assembly: AssemblyCompany ("")] 11 | [assembly: AssemblyProduct ("")] 12 | [assembly: AssemblyCopyright ("miguel")] 13 | [assembly: AssemblyTrademark ("")] 14 | [assembly: AssemblyCulture ("")] 15 | 16 | // The assembly version has the format "{Major}.{Minor}.{Build}.{Revision}". 17 | // The form "{Major}.{Minor}.*" will automatically update the build and revision, 18 | // and "{Major}.{Minor}.{Build}.*" will update just the revision. 19 | 20 | [assembly: AssemblyVersion ("1.0.*")] 21 | 22 | // The following attributes are used to specify the signing key for the assembly, 23 | // if desired. See the Mono documentation for more information about signing. 24 | 25 | //[assembly: AssemblyDelaySign(false)] 26 | //[assembly: AssemblyKeyFile("")] 27 | -------------------------------------------------------------------------------- /SampleTest/README.md: -------------------------------------------------------------------------------- 1 | This contains a simple test suite to exercise the low-level TensorFlowSharp API 2 | and ports of some simple examples on how to use the API. 3 | 4 | The `LowLevelTests.cs` are the low-level tests, while the driver that 5 | shows how to use the API is in `SampleTest.cs` 6 | -------------------------------------------------------------------------------- /SampleTest/SampleTest.csproj: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | Debug 5 | AnyCPU 6 | {243E2C90-F465-4A6E-9063-8F2C138015C7} 7 | Exe 8 | SampleTest 9 | SampleTest 10 | v4.7.1 11 | 0.2 12 | 13 | 14 | true 15 | full 16 | false 17 | bin\Debug 18 | DEBUG; 19 | prompt 20 | 4 21 | true 22 | x64 23 | true 24 | 25 | 26 | true 27 | bin\Release 28 | prompt 29 | 4 30 | true 31 | x64 32 | true 33 | 34 | 35 | 36 | 37 | 38 | ..\packages\CsvHelper.2.16.3.0\lib\net45\CsvHelper.dll 39 | 40 | 41 | 42 | 43 | 44 | ..\packages\System.ValueTuple.4.5.0\lib\net47\System.ValueTuple.dll 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | {0264C321-34F4-46AF-819E-168D1E597232} 55 | TensorFlowSharp 56 | 57 | 58 | {20567EDE-7C20-4734-B489-C6D303843105} 59 | Learn 60 | 61 | 62 | 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /SampleTest/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /TensorFlowSharp/Ops/ArrayOps.cs: -------------------------------------------------------------------------------- 1 | // 2 | // ArrayOps: support for manipulating tensors 3 | // 4 | // Authors: 5 | // Stephanus van Staden 6 | // 7 | // This is a port of the Python code in tensorflow 8 | // 9 | // 10 | using System; 11 | namespace TensorFlow 12 | { 13 | public partial class TFGraph 14 | { 15 | /// 16 | /// Outputs Zero values based on shape of tensor 17 | /// 18 | /// Shape of the output tensor 19 | /// Optional Type of the Zero value. Default: Double 20 | /// Operation name, optional. 21 | /// 22 | public TFOutput Zeros (TFShape shape, TFDataType dtype = TFDataType.Double, string operName = null) 23 | { 24 | return Constant (0, shape, dtype, operName); 25 | } 26 | 27 | /// 28 | /// Outputs One values based on shape of tensor 29 | /// 30 | /// Shape of the output tensor 31 | /// Optional Type of the Zero value. Default: Double 32 | /// Operation name, optional. 33 | /// 34 | public TFOutput Ones (TFShape shape, TFDataType dtype = TFDataType.Double, string operName = null) 35 | { 36 | return Constant (1, shape, dtype, operName); 37 | } 38 | 39 | /// 40 | /// Create a constant tensor based on a shape 41 | /// Used by Zeros and Ones 42 | /// 43 | /// Value for tensor 44 | /// Shape of the tensor 45 | /// Optional Type of the Zero value. Default: Double 46 | /// Operation name, optional. 47 | /// 48 | /// see https://github.com/tensorflow/tensorflow/blob/r1.1/tensorflow/python/framework/constant_op.py 49 | public TFOutput Constant (object value, TFShape tfshape, TFDataType dtype = TFDataType.Double, string operName = null) 50 | { 51 | if (tfshape.NumDimensions <= 0) 52 | { 53 | TFTensor tensor = TFTensor.Create1DTensor(dtype, value); 54 | return Const(tensor, tensor.TensorType, operName); 55 | } 56 | //convert the .net type to relevant tensorflow type 57 | object dtvalue = TFTensor.FetchSimple (dtype, value); 58 | 59 | var shape = tfshape.ToArray (); 60 | var idx = new int [shape.Length]; 61 | for (int i = 0; i < shape.Length; i++) { 62 | if (shape [i] > Int32.MaxValue) 63 | throw new ArgumentOutOfRangeException ("Shape can not be longer than 32 bits"); 64 | } 65 | 66 | Array data = null; 67 | if (tfshape.IsLongArray) data = Array.CreateInstance (dtvalue.GetType (), tfshape.ToArray ()); 68 | else data = Array.CreateInstance (dtvalue.GetType (), tfshape.ToIntArray ()); 69 | 70 | TFTensor.Set (data, dtype, shape, idx, 0, value); 71 | 72 | TFTensor tensor_value = new TFTensor (data); 73 | return Const (tensor_value, tensor_value.TensorType, operName); 74 | } 75 | 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /TensorFlowSharp/README.md: -------------------------------------------------------------------------------- 1 | This contains the .NET binding to TensorFlow. 2 | 3 | The `Operations.cs` file is generated by running the `OpGenerator` project, and needs 4 | to be done when you upgrade your TensorFlow runtime. -------------------------------------------------------------------------------- /TensorFlowSharp/TensorFlowSharp.csproj: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | net471;netstandard2.0 5 | TensorFlowSharp 6 | TensorFlowSharp 7 | true 8 | full 9 | bin\Debug\TensorFlowSharp.xml 10 | true 11 | 0.1 12 | 1.15.0 13 | 14 | 15 | 16 | true 17 | TensorFlowSharp 18 | 1.15.3 19 | Miguel de Icaza 20 | https://github.com/migueldeicaza/TensorFlowSharp/blob/master/LICENSE 21 | https://github.com/migueldeicaza/TensorFlowSharp/ 22 | machine-learning, tensorflow, xamarin, c#, f# 23 | .NET Bindings for TensorFlow 24 | Miguel de Icaza 25 | .NET API for TensorFlow, Google's Machine Intelligence framework 26 | 1.15.3: Fixed libtensorflow.dll not being copied to output directory on .NET Core; 1.15.1: Add a Runner.Run method that does not allocate - contribution from Enrico Minack; 1.15.0: Updated to TensorFlow 1.15; 1.13.1: Fixes boolean and ushort tensor construction fixes from Enrico Minack; Runner.AddInput fixes from Colin Versteeg; captainst provided a convenience function for the samples; Zeeshan Ahmed added AdamOptimizer; Kevin Malenfant fixes a few bugs; Enrico Minack added support for mutable tensors; 1.13.0: An optimization implementation now exists for C#, contributed by Zeeshan Ahmed from the Microsoft data science team (RMSProp, SGD optimizer bindings); TFSession now has a static method for loading models; New methods for loading strings into tensors (also Zeeshan) 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /TensorFlowSharp/brute.pl: -------------------------------------------------------------------------------- 1 | # 2 | # Problems: 3 | # * Can not process [MarshalAs] [Out] etc, so will need manual binding, so will have to be done manually 4 | # 5 | # Use like this: 6 | # perl brute.pl Tensorflow.cs > t.cs && mv t.cs Tensorflow.cs 7 | # 8 | # Will produce a version that perform either invocation depending on the global variable TFCore.UseCPU 9 | # 10 | $next = 0; 11 | while (<>){ 12 | if (/DllImport/){ 13 | $import = $_; 14 | $next = 1; 15 | } elsif ($next){ 16 | if (/MarshalAs/){ 17 | print $import; 18 | print; 19 | $next = 0; 20 | } else { 21 | chop; 22 | if (/TF_GraphImportGraphDefWithReturnOutputs/){ 23 | $debug = 1; 24 | } else { 25 | $debug = 0; 26 | } 27 | ($prefix, $func, $args) = $_ =~ /(.*) (TF_[A-Za-z0-9]*) (\(.*)/; 28 | print STDERR "Got [$prefix][$func][$args] :: FullLine: $_\n" if $debug; 29 | $res = $import; 30 | $res =~ s/Library/Library, EntryPoint="$func"/; 31 | print $res; 32 | print "$prefix _CPU_$func $args\n"; 33 | $res = $import; 34 | $res =~ s/Library/LibraryGPU, EntryPoint="$func"/; 35 | print $res; 36 | print "$prefix _GPU_$func $args\n"; 37 | 38 | # 39 | # Remove the () around the arguments 40 | $args =~ s/;$//; 41 | $cargs = $args; 42 | $cargs =~ s/\(//; 43 | $cargs =~ s/\)//; 44 | 45 | # 46 | # Split the arguments in the indivudal "type value" groups 47 | # and generate a list of value1, value2, value3 48 | print STDERR "cargs: $cargs\n" if $debug; 49 | @individual = split (/,/, $cargs); 50 | $pass = ""; 51 | foreach $n (@individual){ 52 | print STDERR "ARG: $n\n" if $debug; 53 | if (!($pass eq "")){ 54 | $pass .= ", "; 55 | } 56 | #($arg) = $n =~ /\w[\w*]*\[\]* +(\w+)/; 57 | ($ref, $out, $arg) = $n =~ / *(ref )?(out )?[\w_\+\*]* ?\[*\]* \*?(\w+)/; 58 | print STDERR "matched: $arg\n" if $debug; 59 | $pass = $pass . "$ref$out$arg"; 60 | } 61 | 62 | # Remove the extern 63 | $nprefix = $prefix; 64 | $nprefix =~ s/ extern//; 65 | if ($nprefix =~ /void/){ 66 | $ret = ""; 67 | } else { 68 | $ret = "return "; 69 | } 70 | #print STDERR "nprefix=$nprefix and $ret\n"; 71 | print "$nprefix $func $args\n"; 72 | print "\t\t{\n"; 73 | print "\t\t\tif (TFCore.UseCPU)\n"; 74 | print "\t\t\t\t${ret}_CPU_$func ($pass);\n"; 75 | print "\t\t\telse\n"; 76 | print "\t\t\t\t${ret}_GPU_$func ($pass);\n"; 77 | print "\t\t}\n"; 78 | $next = 0; 79 | } 80 | } else { 81 | print; 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /TensorFlowSharp/nuget/build/net45/TensorFlowSharp.targets: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | true 6 | true 7 | 8 | 9 | True 10 | 11 | 12 | 13 | 14 | %(Filename)%(Extension) 15 | PreserveNewest 16 | 17 | 18 | %(Filename)%(Extension) 19 | PreserveNewest 20 | 21 | 22 | %(Filename)%(Extension) 23 | PreserveNewest 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /azure-pipelines.yml: -------------------------------------------------------------------------------- 1 | pool: 2 | name: Hosted macOS 3 | demands: msbuild 4 | 5 | steps: 6 | - bash: | 7 | echo at: 8 | pwd 9 | echo env: 10 | set 11 | echo dir: 12 | ls 13 | displayName: Environment 14 | 15 | - bash: | 16 | if echo $BUILD_SOURCEBRANCH | grep /release/; then 17 | perl -pi -e "s/PackageVersion>.*${BUILD_SOURCEBRANCHNAME}\(.*\)<.*/\1/p' TensorFlowSharp/TensorFlowSharp.csproj | sed 's/-pre.*//'` 24 | echo TFVERSION is $TFVERSION 25 | mono --version 26 | ROOT=`pwd` 27 | echo Root is: $ROOT 28 | BASE=https://storage.googleapis.com/tensorflow/libtensorflow 29 | 30 | WIN=libtensorflow-cpu-windows-x86_64-${TFVERSION}.zip 31 | DARWIN=libtensorflow-cpu-darwin-x86_64-${TFVERSION}.tar.gz 32 | LINUX=libtensorflow-cpu-linux-x86_64-${TFVERSION}.tar.gz 33 | if test x$SKIP_DOWNLOAD = x; then 34 | curl -O $BASE/$WIN 35 | curl -O $BASE/$DARWIN 36 | curl -O $BASE/$LINUX 37 | fi 38 | 39 | echo Unzipping Windows 40 | unzip -d PAYLOAD $WIN lib/tensorflow.dll 41 | mv lib/tensorflow.dll PAYLOAD 42 | 43 | echo Unzipping Darwin 44 | tar xzvf $DARWIN lib 45 | mv -f lib/libtensorflow* PAYLOAD/ 46 | 47 | echo Unzipping Linux 48 | tar xzvf $LINUX lib 49 | mv -f lib/* PAYLOAD/ 50 | mkdir -p $ROOT/native 51 | mv PAYLOAD/lib/tensorflow.dll PAYLOAD/libtensorflow.dll 52 | rmdir PAYLOAD/lib 53 | cp PAYLOAD/* $ROOT/native 54 | displayName: 'Script - Unpack remote payload' 55 | env: 56 | TFVERSION: 1.9.0 57 | 58 | - task: NuGetCommand@2 59 | displayName: 'NuGet restore' 60 | 61 | - task: MSBuild@1 62 | displayName: 'MSBuild Restore' 63 | inputs: 64 | msbuildArguments: '/t:restore' 65 | 66 | - bash: 'ls -lR' 67 | displayName: 'List contents' 68 | 69 | - task: MSBuild@1 70 | displayName: 'Build Solution' 71 | inputs: 72 | msbuildArguments: '/p:Configuration=Release' 73 | restoreNugetPackages: true 74 | 75 | - task: CopyFiles@2 76 | displayName: 'Copy Files to: $(build.artifactstagingdirectory)' 77 | inputs: 78 | SourceFolder: TensorFlowSharp/bin/Release 79 | TargetFolder: '$(build.artifactstagingdirectory)' 80 | 81 | - task: PublishBuildArtifacts@1 82 | displayName: 'Publish Artifact: drop' 83 | 84 | -------------------------------------------------------------------------------- /docfx/api/.gitignore: -------------------------------------------------------------------------------- 1 | ############### 2 | # temp file # 3 | ############### 4 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.DeviceType.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.DeviceType 4 | id: DeviceType 5 | children: 6 | - TensorFlow.DeviceType.CPU 7 | - TensorFlow.DeviceType.GPU 8 | - TensorFlow.DeviceType.TPU 9 | langs: 10 | - csharp 11 | name: DeviceType 12 | nameWithType: DeviceType 13 | fullName: TensorFlow.DeviceType 14 | type: Enum 15 | assemblies: 16 | - TensorFlowSharp 17 | namespace: TensorFlow 18 | summary: Device type 19 | syntax: 20 | content: public enum DeviceType 21 | inheritance: 22 | - System.Enum 23 | - uid: TensorFlow.DeviceType.CPU 24 | id: CPU 25 | parent: TensorFlow.DeviceType 26 | langs: 27 | - csharp 28 | name: CPU 29 | nameWithType: DeviceType.CPU 30 | fullName: DeviceType.CPU 31 | type: Field 32 | assemblies: 33 | - TensorFlowSharp 34 | namespace: TensorFlow 35 | summary: The device is the Central Processing Unit (CPU) 36 | syntax: 37 | content: CPU 38 | return: 39 | type: TensorFlow.DeviceType 40 | description: To be added. 41 | exceptions: [] 42 | - uid: TensorFlow.DeviceType.GPU 43 | id: GPU 44 | parent: TensorFlow.DeviceType 45 | langs: 46 | - csharp 47 | name: GPU 48 | nameWithType: DeviceType.GPU 49 | fullName: DeviceType.GPU 50 | type: Field 51 | assemblies: 52 | - TensorFlowSharp 53 | namespace: TensorFlow 54 | summary: The device is a Graphics Processing Unit (GPU) 55 | syntax: 56 | content: GPU 57 | return: 58 | type: TensorFlow.DeviceType 59 | description: To be added. 60 | exceptions: [] 61 | - uid: TensorFlow.DeviceType.TPU 62 | id: TPU 63 | parent: TensorFlow.DeviceType 64 | langs: 65 | - csharp 66 | name: TPU 67 | nameWithType: DeviceType.TPU 68 | fullName: DeviceType.TPU 69 | type: Field 70 | assemblies: 71 | - TensorFlowSharp 72 | namespace: TensorFlow 73 | summary: The device is a Tensor Processing Unit (TPU) 74 | syntax: 75 | content: TPU 76 | return: 77 | type: TensorFlow.DeviceType 78 | description: To be added. 79 | exceptions: [] 80 | references: 81 | - uid: System.Enum 82 | parent: System 83 | isExternal: true 84 | name: Enum 85 | nameWithType: Enum 86 | fullName: System.Enum 87 | - uid: TensorFlow.DeviceType.CPU 88 | parent: TensorFlow.DeviceType 89 | isExternal: false 90 | name: CPU 91 | nameWithType: DeviceType.CPU 92 | fullName: DeviceType.CPU 93 | - uid: TensorFlow.DeviceType 94 | parent: TensorFlow 95 | isExternal: false 96 | name: DeviceType 97 | nameWithType: DeviceType 98 | fullName: TensorFlow.DeviceType 99 | - uid: TensorFlow.DeviceType.GPU 100 | parent: TensorFlow.DeviceType 101 | isExternal: false 102 | name: GPU 103 | nameWithType: DeviceType.GPU 104 | fullName: DeviceType.GPU 105 | - uid: TensorFlow.DeviceType.TPU 106 | parent: TensorFlow.DeviceType 107 | isExternal: false 108 | name: TPU 109 | nameWithType: DeviceType.TPU 110 | fullName: DeviceType.TPU 111 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.MonoPInvokeCallbackAttribute.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.MonoPInvokeCallbackAttribute 4 | id: MonoPInvokeCallbackAttribute 5 | children: 6 | - TensorFlow.MonoPInvokeCallbackAttribute.#ctor(System.Type) 7 | langs: 8 | - csharp 9 | name: MonoPInvokeCallbackAttribute 10 | nameWithType: MonoPInvokeCallbackAttribute 11 | fullName: TensorFlow.MonoPInvokeCallbackAttribute 12 | type: Class 13 | assemblies: 14 | - TensorFlowSharp 15 | namespace: TensorFlow 16 | summary: >- 17 | This attribute can be applied to callback functions that will be invoked 18 | from unmanaged code to managed code. 19 | remarks: >- 20 |

21 |                 [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]
22 |                 internal static void MyFreeFunc (IntPtr data, IntPtr length){..}
23 |                 
24 | syntax: 25 | content: 'public sealed class MonoPInvokeCallbackAttribute : Attribute' 26 | inheritance: 27 | - System.Attribute 28 | implements: [] 29 | inheritedMembers: [] 30 | - uid: TensorFlow.MonoPInvokeCallbackAttribute.#ctor(System.Type) 31 | id: '#ctor(System.Type)' 32 | parent: TensorFlow.MonoPInvokeCallbackAttribute 33 | langs: 34 | - csharp 35 | name: MonoPInvokeCallbackAttribute(Type) 36 | nameWithType: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute(Type) 37 | fullName: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute(Type) 38 | type: Constructor 39 | assemblies: 40 | - TensorFlowSharp 41 | namespace: TensorFlow 42 | summary: "Use this constructor to annotate the type of the callback function that \n will be invoked from unmanaged code." 43 | syntax: 44 | content: public MonoPInvokeCallbackAttribute (Type t); 45 | parameters: 46 | - id: t 47 | type: System.Type 48 | description: T. 49 | overload: TensorFlow.MonoPInvokeCallbackAttribute.#ctor* 50 | exceptions: [] 51 | references: 52 | - uid: System.Attribute 53 | parent: System 54 | isExternal: true 55 | name: Attribute 56 | nameWithType: Attribute 57 | fullName: System.Attribute 58 | - uid: TensorFlow.MonoPInvokeCallbackAttribute.#ctor(System.Type) 59 | parent: TensorFlow.MonoPInvokeCallbackAttribute 60 | isExternal: false 61 | name: MonoPInvokeCallbackAttribute(Type) 62 | nameWithType: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute(Type) 63 | fullName: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute(Type) 64 | - uid: System.Type 65 | parent: System 66 | isExternal: true 67 | name: Type 68 | nameWithType: Type 69 | fullName: System.Type 70 | - uid: TensorFlow.MonoPInvokeCallbackAttribute.#ctor* 71 | parent: TensorFlow.MonoPInvokeCallbackAttribute 72 | isExternal: false 73 | name: MonoPInvokeCallbackAttribute 74 | nameWithType: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute 75 | fullName: MonoPInvokeCallbackAttribute.MonoPInvokeCallbackAttribute 76 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFBuffer.BufferReleaseFunc.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFBuffer.BufferReleaseFunc 4 | id: TFBuffer.BufferReleaseFunc 5 | langs: 6 | - csharp 7 | name: TFBuffer.BufferReleaseFunc 8 | nameWithType: TFBuffer.BufferReleaseFunc 9 | fullName: TensorFlow.TFBuffer.BufferReleaseFunc 10 | type: Delegate 11 | assemblies: 12 | - TensorFlowSharp 13 | namespace: TensorFlow 14 | summary: Signature of the method that is invoked to release the data. 15 | remarks: "Methods of this signature are invoked with the data pointer and the\n lenght pointer when then TFBuffer no longer needs to hold on to the\n data. If you are using this on platforms with static compilation\n like iOS, you need to annotate your callback with the MonoPInvokeCallbackAttribute,\n like this:\n \n
\n            [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))]\n            internal static void MyFreeFunc (IntPtr data, IntPtr length){..}\n            
" 16 | syntax: 17 | content: public delegate void TFBuffer.BufferReleaseFunc(IntPtr data, IntPtr lenght); 18 | inheritance: 19 | - System.Delegate 20 | references: 21 | - uid: System.Delegate 22 | parent: System 23 | isExternal: true 24 | name: Delegate 25 | nameWithType: Delegate 26 | fullName: System.Delegate 27 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFDependencies.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFDependencies 4 | id: TFDependencies 5 | children: 6 | - TensorFlow.TFDependencies.Dispose 7 | langs: 8 | - csharp 9 | name: TFDependencies 10 | nameWithType: TFDependencies 11 | fullName: TensorFlow.TFDependencies 12 | type: Class 13 | assemblies: 14 | - TensorFlowSharp 15 | namespace: TensorFlow 16 | summary: TFGraph variable dependencies handle. 17 | remarks: >- 18 | Instances of this class, when disposed, restore 19 | to the value it had before the method 20 | was called. 21 | syntax: 22 | content: 'public class TFDependencies : IDisposable' 23 | inheritance: 24 | - System.Object 25 | implements: 26 | - System.IDisposable 27 | inheritedMembers: [] 28 | - uid: TensorFlow.TFDependencies.Dispose 29 | id: Dispose 30 | parent: TensorFlow.TFDependencies 31 | langs: 32 | - csharp 33 | name: Dispose() 34 | nameWithType: TFDependencies.Dispose() 35 | fullName: TFDependencies.Dispose() 36 | type: Method 37 | assemblies: 38 | - TensorFlowSharp 39 | namespace: TensorFlow 40 | summary: Pops the variable dependencies to the previous dependencies in use. 41 | remarks: >- 42 | Call when you are finished using the 43 | to restore the previous variable dependencies in use in the . 44 | syntax: 45 | content: public void Dispose (); 46 | parameters: [] 47 | overload: TensorFlow.TFDependencies.Dispose* 48 | exceptions: [] 49 | references: 50 | - uid: System.Object 51 | parent: System 52 | isExternal: true 53 | name: Object 54 | nameWithType: Object 55 | fullName: System.Object 56 | - uid: TensorFlow.TFDependencies.Dispose 57 | parent: TensorFlow.TFDependencies 58 | isExternal: false 59 | name: Dispose() 60 | nameWithType: TFDependencies.Dispose() 61 | fullName: TFDependencies.Dispose() 62 | - uid: TensorFlow.TFDependencies.Dispose* 63 | parent: TensorFlow.TFDependencies 64 | isExternal: false 65 | name: Dispose 66 | nameWithType: TFDependencies.Dispose 67 | fullName: TFDependencies.Dispose 68 | - uid: System.IDisposable 69 | parent: System 70 | isExternal: true 71 | name: IDisposable 72 | nameWithType: IDisposable 73 | fullName: System.IDisposable 74 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFDevice.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFDevice 4 | id: TFDevice 5 | children: 6 | - TensorFlow.TFDevice.Dispose 7 | langs: 8 | - csharp 9 | name: TFDevice 10 | nameWithType: TFDevice 11 | fullName: TensorFlow.TFDevice 12 | type: Class 13 | assemblies: 14 | - TensorFlowSharp 15 | namespace: TensorFlow 16 | summary: Class to unset device name in the graph within using block. 17 | syntax: 18 | content: 'public class TFDevice : IDisposable' 19 | inheritance: 20 | - System.Object 21 | implements: 22 | - System.IDisposable 23 | inheritedMembers: [] 24 | - uid: TensorFlow.TFDevice.Dispose 25 | id: Dispose 26 | parent: TensorFlow.TFDevice 27 | langs: 28 | - csharp 29 | name: Dispose() 30 | nameWithType: TFDevice.Dispose() 31 | fullName: TFDevice.Dispose() 32 | type: Method 33 | assemblies: 34 | - TensorFlowSharp 35 | namespace: TensorFlow 36 | summary: Pops the device name back to previous device name in use. 37 | remarks: >- 38 | Call when you are finished using the 39 | to restore to the default device to be used in the . 40 | syntax: 41 | content: public void Dispose (); 42 | parameters: [] 43 | overload: TensorFlow.TFDevice.Dispose* 44 | exceptions: [] 45 | references: 46 | - uid: System.Object 47 | parent: System 48 | isExternal: true 49 | name: Object 50 | nameWithType: Object 51 | fullName: System.Object 52 | - uid: TensorFlow.TFDevice.Dispose 53 | parent: TensorFlow.TFDevice 54 | isExternal: false 55 | name: Dispose() 56 | nameWithType: TFDevice.Dispose() 57 | fullName: TFDevice.Dispose() 58 | - uid: TensorFlow.TFDevice.Dispose* 59 | parent: TensorFlow.TFDevice 60 | isExternal: false 61 | name: Dispose 62 | nameWithType: TFDevice.Dispose 63 | fullName: TFDevice.Dispose 64 | - uid: System.IDisposable 65 | parent: System 66 | isExternal: true 67 | name: IDisposable 68 | nameWithType: IDisposable 69 | fullName: System.IDisposable 70 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFException.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFException 4 | id: TFException 5 | children: 6 | - TensorFlow.TFException.#ctor(System.String) 7 | langs: 8 | - csharp 9 | name: TFException 10 | nameWithType: TFException 11 | fullName: TensorFlow.TFException 12 | type: Class 13 | assemblies: 14 | - TensorFlowSharp 15 | namespace: TensorFlow 16 | summary: TensorFlow Exception 17 | syntax: 18 | content: 'public class TFException : Exception' 19 | inheritance: 20 | - System.Exception 21 | implements: [] 22 | inheritedMembers: [] 23 | - uid: TensorFlow.TFException.#ctor(System.String) 24 | id: '#ctor(System.String)' 25 | parent: TensorFlow.TFException 26 | langs: 27 | - csharp 28 | name: TFException(String) 29 | nameWithType: TFException.TFException(String) 30 | fullName: TFException.TFException(String) 31 | type: Constructor 32 | assemblies: 33 | - TensorFlowSharp 34 | namespace: TensorFlow 35 | summary: Initializes a new instance of the class with a message. 36 | syntax: 37 | content: public TFException (string message); 38 | parameters: 39 | - id: message 40 | type: System.String 41 | description: Message. 42 | overload: TensorFlow.TFException.#ctor* 43 | exceptions: [] 44 | references: 45 | - uid: System.Exception 46 | parent: System 47 | isExternal: true 48 | name: Exception 49 | nameWithType: Exception 50 | fullName: System.Exception 51 | - uid: TensorFlow.TFException.#ctor(System.String) 52 | parent: TensorFlow.TFException 53 | isExternal: false 54 | name: TFException(String) 55 | nameWithType: TFException.TFException(String) 56 | fullName: TFException.TFException(String) 57 | - uid: System.String 58 | parent: System 59 | isExternal: true 60 | name: String 61 | nameWithType: String 62 | fullName: System.String 63 | - uid: TensorFlow.TFException.#ctor* 64 | parent: TensorFlow.TFException 65 | isExternal: false 66 | name: TFException 67 | nameWithType: TFException.TFException 68 | fullName: TFException.TFException 69 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFGraph.WhileConstructor.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFGraph.WhileConstructor 4 | id: TFGraph.WhileConstructor 5 | langs: 6 | - csharp 7 | name: TFGraph.WhileConstructor 8 | nameWithType: TFGraph.WhileConstructor 9 | fullName: TensorFlow.TFGraph.WhileConstructor 10 | type: Delegate 11 | assemblies: 12 | - TensorFlowSharp 13 | namespace: TensorFlow 14 | summary: Signature of the method that will be invoked by the TFGraph.While method to construct a while loop 15 | remarks: "

\n The method should build up the condition on the conditionGraph and the body of the while \n loop in the provided bodyGraph. It should set the condOutput to the value used as the\n condition output and the array of values in bodyOutputs to the final outputs as well as the\n name to be used, if not set, one will be assigned.\n

\n

\n The conditionGraph represents the while condition and the inputs are the current values of the\n input variables (condInputs). The output should be a scalar boolean.\n

\n

\n The loop body graph is in bodyGraph, The inputs are the current values of the loop\n variables. The outputs are the updated values of the loop variables.\n

\n

\n You can use the passed status record problems with it.\n

" 16 | syntax: 17 | content: public delegate void TFGraph.WhileConstructor(TFGraph conditionGraph, TFOutput[] condInputs, out TFOutput condOutput, TFGraph bodyGraph, TFOutput[] bodyInputs, TFOutput[] bodyOutputs, out string name); 18 | inheritance: 19 | - System.Delegate 20 | references: 21 | - uid: System.Delegate 22 | parent: System 23 | isExternal: true 24 | name: Delegate 25 | nameWithType: Delegate 26 | fullName: System.Delegate 27 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFScope.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFScope 4 | id: TFScope 5 | children: 6 | - TensorFlow.TFScope.Dispose 7 | langs: 8 | - csharp 9 | name: TFScope 10 | nameWithType: TFScope 11 | fullName: TensorFlow.TFScope 12 | type: Class 13 | assemblies: 14 | - TensorFlowSharp 15 | namespace: TensorFlow 16 | summary: TFGraph name scope handle 17 | remarks: >- 18 | Instances of this class when disposed restore the CurrentNameScope to the 19 | value they had when the TFGraph.WithScope method was called. 20 | syntax: 21 | content: 'public class TFScope : IDisposable' 22 | inheritance: 23 | - System.Object 24 | implements: 25 | - System.IDisposable 26 | inheritedMembers: [] 27 | - uid: TensorFlow.TFScope.Dispose 28 | id: Dispose 29 | parent: TensorFlow.TFScope 30 | langs: 31 | - csharp 32 | name: Dispose() 33 | nameWithType: TFScope.Dispose() 34 | fullName: TFScope.Dispose() 35 | type: Method 36 | assemblies: 37 | - TensorFlowSharp 38 | namespace: TensorFlow 39 | summary: Pops the name space to the previous namescope in use. 40 | remarks: >- 41 | Call when you are finished using the 42 | to restore the previous name scope in use in the . 43 | syntax: 44 | content: public void Dispose (); 45 | parameters: [] 46 | overload: TensorFlow.TFScope.Dispose* 47 | exceptions: [] 48 | references: 49 | - uid: System.Object 50 | parent: System 51 | isExternal: true 52 | name: Object 53 | nameWithType: Object 54 | fullName: System.Object 55 | - uid: TensorFlow.TFScope.Dispose 56 | parent: TensorFlow.TFScope 57 | isExternal: false 58 | name: Dispose() 59 | nameWithType: TFScope.Dispose() 60 | fullName: TFScope.Dispose() 61 | - uid: TensorFlow.TFScope.Dispose* 62 | parent: TensorFlow.TFScope 63 | isExternal: false 64 | name: Dispose 65 | nameWithType: TFScope.Dispose 66 | fullName: TFScope.Dispose 67 | - uid: System.IDisposable 68 | parent: System 69 | isExternal: true 70 | name: IDisposable 71 | nameWithType: IDisposable 72 | fullName: System.IDisposable 73 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFSession.PartialRunToken.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFSession.PartialRunToken 4 | id: TFSession.PartialRunToken 5 | children: 6 | - TensorFlow.TFSession.PartialRunToken.#ctor 7 | - TensorFlow.TFSession.PartialRunToken.System#IDisposable#Dispose 8 | langs: 9 | - csharp 10 | name: TFSession.PartialRunToken 11 | nameWithType: TFSession.PartialRunToken 12 | fullName: TensorFlow.TFSession.PartialRunToken 13 | type: Class 14 | assemblies: 15 | - TensorFlowSharp 16 | namespace: TensorFlow 17 | summary: >- 18 | Token returned from using one of the Partial Run Setup methods from , 19 | and use this token subsequently for other invocations. 20 | remarks: "Calling Dispose on this object will release the resources associated with setting up \n a partial run." 21 | syntax: 22 | content: 'public class TFSession.PartialRunToken : IDisposable' 23 | inheritance: 24 | - System.Object 25 | implements: 26 | - System.IDisposable 27 | inheritedMembers: [] 28 | - uid: TensorFlow.TFSession.PartialRunToken.#ctor 29 | id: '#ctor' 30 | parent: TensorFlow.TFSession.PartialRunToken 31 | langs: 32 | - csharp 33 | name: TFSession.PartialRunToken() 34 | nameWithType: TFSession.PartialRunToken.TFSession.PartialRunToken() 35 | fullName: TFSession.PartialRunToken.TFSession.PartialRunToken() 36 | type: Constructor 37 | assemblies: 38 | - TensorFlowSharp 39 | namespace: TensorFlow 40 | syntax: 41 | content: public PartialRunToken (); 42 | parameters: [] 43 | overload: TensorFlow.TFSession.PartialRunToken.#ctor* 44 | exceptions: [] 45 | - uid: TensorFlow.TFSession.PartialRunToken.System#IDisposable#Dispose 46 | id: System#IDisposable#Dispose 47 | isEii: true 48 | parent: TensorFlow.TFSession.PartialRunToken 49 | langs: 50 | - csharp 51 | name: IDisposable.Dispose() 52 | nameWithType: TFSession.PartialRunToken.IDisposable.Dispose() 53 | fullName: TFSession.PartialRunToken.IDisposable.Dispose() 54 | type: Method 55 | assemblies: 56 | - TensorFlowSharp 57 | namespace: TensorFlow 58 | syntax: 59 | content: void IDisposable.Dispose (); 60 | parameters: [] 61 | overload: TensorFlow.TFSession.PartialRunToken.System#IDisposable#Dispose* 62 | exceptions: [] 63 | references: 64 | - uid: System.Object 65 | parent: System 66 | isExternal: true 67 | name: Object 68 | nameWithType: Object 69 | fullName: System.Object 70 | - uid: TensorFlow.TFSession.PartialRunToken.#ctor 71 | parent: TensorFlow.TFSession.PartialRunToken 72 | isExternal: false 73 | name: TFSession.PartialRunToken() 74 | nameWithType: TFSession.PartialRunToken.TFSession.PartialRunToken() 75 | fullName: TFSession.PartialRunToken.TFSession.PartialRunToken() 76 | - uid: TensorFlow.TFSession.PartialRunToken.System#IDisposable#Dispose 77 | parent: TensorFlow.TFSession.PartialRunToken 78 | isExternal: false 79 | name: IDisposable.Dispose() 80 | nameWithType: TFSession.PartialRunToken.IDisposable.Dispose() 81 | fullName: TFSession.PartialRunToken.IDisposable.Dispose() 82 | - uid: TensorFlow.TFSession.PartialRunToken.#ctor* 83 | parent: TensorFlow.TFSession.PartialRunToken 84 | isExternal: false 85 | name: TFSession.PartialRunToken 86 | nameWithType: TFSession.PartialRunToken.TFSession.PartialRunToken 87 | fullName: TFSession.PartialRunToken.TFSession.PartialRunToken 88 | - uid: TensorFlow.TFSession.PartialRunToken.System#IDisposable#Dispose* 89 | parent: TensorFlow.TFSession.PartialRunToken 90 | isExternal: false 91 | name: System.IDisposable.Dispose 92 | nameWithType: TFSession.PartialRunToken.System.IDisposable.Dispose 93 | fullName: TFSession.PartialRunToken.System.IDisposable.Dispose 94 | - uid: System.IDisposable 95 | parent: System 96 | isExternal: true 97 | name: IDisposable 98 | nameWithType: IDisposable 99 | fullName: System.IDisposable 100 | -------------------------------------------------------------------------------- /docfx/api/TensorFlow/TensorFlow.TFTensor.Deallocator.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:ManagedReference 2 | items: 3 | - uid: TensorFlow.TFTensor.Deallocator 4 | id: TFTensor.Deallocator 5 | langs: 6 | - csharp 7 | name: TFTensor.Deallocator 8 | nameWithType: TFTensor.Deallocator 9 | fullName: TensorFlow.TFTensor.Deallocator 10 | type: Delegate 11 | assemblies: 12 | - TensorFlowSharp 13 | namespace: TensorFlow 14 | summary: Signature that methods must conform to to be used to release memory that was passed to a manually allocated TFTensor 15 | syntax: 16 | content: public delegate void TFTensor.Deallocator(IntPtr data, IntPtr size, IntPtr deallocatorData); 17 | inheritance: 18 | - System.Delegate 19 | references: 20 | - uid: System.Delegate 21 | parent: System 22 | isExternal: true 23 | name: Delegate 24 | nameWithType: Delegate 25 | fullName: System.Delegate 26 | -------------------------------------------------------------------------------- /docfx/api/index.md: -------------------------------------------------------------------------------- 1 | 2 | Welcome to the TensorFlowSharp API documentation. 3 | 4 | Expand the node on the left to explore the .NET API for TensorFlow. 5 | -------------------------------------------------------------------------------- /docfx/api/toc.yml: -------------------------------------------------------------------------------- 1 | ### YamlMime:TableOfContent 2 | - uid: TensorFlow 3 | name: TensorFlow 4 | items: 5 | - uid: TensorFlow.Adagrad 6 | name: Adagrad 7 | - uid: TensorFlow.AdaptiveOptimizer 8 | name: AdaptiveOptimizer 9 | - uid: TensorFlow.DeviceAttributes 10 | name: DeviceAttributes 11 | - uid: TensorFlow.DeviceType 12 | name: DeviceType 13 | - uid: TensorFlow.MonoPInvokeCallbackAttribute 14 | name: MonoPInvokeCallbackAttribute 15 | - uid: TensorFlow.Optimizer 16 | name: Optimizer 17 | - uid: TensorFlow.PaddingFIFOQueue 18 | name: PaddingFIFOQueue 19 | - uid: TensorFlow.QueueBase 20 | name: QueueBase 21 | - uid: TensorFlow.RMSProp 22 | name: RMSProp 23 | - uid: TensorFlow.SGD 24 | name: SGD 25 | - uid: TensorFlow.TFAttributeMetadata 26 | name: TFAttributeMetadata 27 | - uid: TensorFlow.TFAttributeType 28 | name: TFAttributeType 29 | - uid: TensorFlow.TFBuffer 30 | name: TFBuffer 31 | - uid: TensorFlow.TFBuffer.BufferReleaseFunc 32 | name: TFBuffer.BufferReleaseFunc 33 | - uid: TensorFlow.TFCode 34 | name: TFCode 35 | - uid: TensorFlow.TFCore 36 | name: TFCore 37 | - uid: TensorFlow.TFDataType 38 | name: TFDataType 39 | - uid: TensorFlow.TFDependencies 40 | name: TFDependencies 41 | - uid: TensorFlow.TFDevice 42 | name: TFDevice 43 | - uid: TensorFlow.TFDisposable 44 | name: TFDisposable 45 | - uid: TensorFlow.TFDisposableThreadSafe 46 | name: TFDisposableThreadSafe 47 | - uid: TensorFlow.TFException 48 | name: TFException 49 | - uid: TensorFlow.TFFunction 50 | name: TFFunction 51 | - uid: TensorFlow.TFGraph 52 | name: TFGraph 53 | - uid: TensorFlow.TFGraph.WhileConstructor 54 | name: TFGraph.WhileConstructor 55 | - uid: TensorFlow.TFImportGraphDefOptions 56 | name: TFImportGraphDefOptions 57 | - uid: TensorFlow.TFInput 58 | name: TFInput 59 | - uid: TensorFlow.TFLibrary 60 | name: TFLibrary 61 | - uid: TensorFlow.TFOperation 62 | name: TFOperation 63 | - uid: TensorFlow.TFOperationDesc 64 | name: TFOperationDesc 65 | - uid: TensorFlow.TFOutput 66 | name: TFOutput 67 | - uid: TensorFlow.TFScope 68 | name: TFScope 69 | - uid: TensorFlow.TFSession 70 | name: TFSession 71 | - uid: TensorFlow.TFSession.PartialRunToken 72 | name: TFSession.PartialRunToken 73 | - uid: TensorFlow.TFSession.Runner 74 | name: TFSession.Runner 75 | - uid: TensorFlow.TFSessionOptions 76 | name: TFSessionOptions 77 | - uid: TensorFlow.TFShape 78 | name: TFShape 79 | - uid: TensorFlow.TFStatus 80 | name: TFStatus 81 | - uid: TensorFlow.TFTensor 82 | name: TFTensor 83 | - uid: TensorFlow.TFTensor.Deallocator 84 | name: TFTensor.Deallocator 85 | - uid: TensorFlow.Variable 86 | name: Variable 87 | -------------------------------------------------------------------------------- /docfx/articles/intro.md: -------------------------------------------------------------------------------- 1 | # Add your introductions here! 2 | -------------------------------------------------------------------------------- /docfx/articles/toc.yml: -------------------------------------------------------------------------------- 1 | - name: Introduction 2 | href: intro.md 3 | - name: Getting Started 4 | href: start.md 5 | -------------------------------------------------------------------------------- /docfx/docfx.json: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": [ 3 | { 4 | "src": [ 5 | { 6 | "files": [ 7 | "TensorFlowSharp/TensorFlowSharp.csproj" 8 | ], 9 | "exclude": [ 10 | "**/obj/**", 11 | "**/bin/**", 12 | "_site/**" 13 | ] 14 | } 15 | ], 16 | "dest": "api" 17 | } 18 | ], 19 | "build": { 20 | "content": [ 21 | { 22 | "files": [ 23 | "api/**.yml", 24 | "api/index.md" 25 | ] 26 | }, 27 | { 28 | "files": [ 29 | "articles/**.md", 30 | "articles/**/toc.yml", 31 | "toc.yml", 32 | "*.md" 33 | ], 34 | "exclude": [ 35 | "obj/**", 36 | "_site/**" 37 | ] 38 | } 39 | ], 40 | "resource": [ 41 | { 42 | "files": [ 43 | "images/**" 44 | ], 45 | "exclude": [ 46 | "obj/**", 47 | "_site/**" 48 | ] 49 | } 50 | ], 51 | "overwrite": [ 52 | { 53 | "files": [ 54 | "apidoc/**.md" 55 | ], 56 | "exclude": [ 57 | "obj/**", 58 | "_site/**" 59 | ] 60 | } 61 | ], 62 | "dest": "../docs", 63 | "globalMetadataFiles": [], 64 | "fileMetadataFiles": [], 65 | "template": [ 66 | "default" 67 | ], 68 | "postProcessors": [], 69 | "noLangKeyword": false, 70 | "keepFileLink": false 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /docfx/index.md: -------------------------------------------------------------------------------- 1 | 2 | TensorFlowSharp are .NET bindings to the TensorFlow library published 3 | here: 4 | 5 | https://github.com/tensorflow/tensorflow 6 | 7 | This surfaces the C API as a strongly-typed C# API. 8 | 9 | ## Getting Started 10 | 11 | Check the [GitHub project 12 | page](https://github.com/migueldeicaza/TensorFlowSharp) for 13 | TensorFlowSharp. 14 | 15 | ## API documentation 16 | 17 | The [API Documentation](api/TensorFlow.html) -------------------------------------------------------------------------------- /docfx/toc.yml: -------------------------------------------------------------------------------- 1 | - name: Articles 2 | href: articles/ 3 | - name: Api Documentation 4 | href: api/ 5 | homepage: api/index.md 6 | -------------------------------------------------------------------------------- /docs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/favicon.ico -------------------------------------------------------------------------------- /docs/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /docs/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /docs/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /docs/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /docs/logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 4 | 7 | 8 | Created by Docfx 9 | 10 | 12 | 15 | 21 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /docs/search-stopwords.json: -------------------------------------------------------------------------------- 1 | [ 2 | "a", 3 | "able", 4 | "about", 5 | "across", 6 | "after", 7 | "all", 8 | "almost", 9 | "also", 10 | "am", 11 | "among", 12 | "an", 13 | "and", 14 | "any", 15 | "are", 16 | "as", 17 | "at", 18 | "be", 19 | "because", 20 | "been", 21 | "but", 22 | "by", 23 | "can", 24 | "cannot", 25 | "could", 26 | "dear", 27 | "did", 28 | "do", 29 | "does", 30 | "either", 31 | "else", 32 | "ever", 33 | "every", 34 | "for", 35 | "from", 36 | "get", 37 | "got", 38 | "had", 39 | "has", 40 | "have", 41 | "he", 42 | "her", 43 | "hers", 44 | "him", 45 | "his", 46 | "how", 47 | "however", 48 | "i", 49 | "if", 50 | "in", 51 | "into", 52 | "is", 53 | "it", 54 | "its", 55 | "just", 56 | "least", 57 | "let", 58 | "like", 59 | "likely", 60 | "may", 61 | "me", 62 | "might", 63 | "most", 64 | "must", 65 | "my", 66 | "neither", 67 | "no", 68 | "nor", 69 | "not", 70 | "of", 71 | "off", 72 | "often", 73 | "on", 74 | "only", 75 | "or", 76 | "other", 77 | "our", 78 | "own", 79 | "rather", 80 | "said", 81 | "say", 82 | "says", 83 | "she", 84 | "should", 85 | "since", 86 | "so", 87 | "some", 88 | "than", 89 | "that", 90 | "the", 91 | "their", 92 | "them", 93 | "then", 94 | "there", 95 | "these", 96 | "they", 97 | "this", 98 | "tis", 99 | "to", 100 | "too", 101 | "twas", 102 | "us", 103 | "wants", 104 | "was", 105 | "we", 106 | "were", 107 | "what", 108 | "when", 109 | "where", 110 | "which", 111 | "while", 112 | "who", 113 | "whom", 114 | "why", 115 | "will", 116 | "with", 117 | "would", 118 | "yet", 119 | "you", 120 | "your" 121 | ] 122 | -------------------------------------------------------------------------------- /docs/styles/main.css: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/migueldeicaza/TensorFlowSharp/600d278ded9e7e723309cff27f69cd4597c2a077/docs/styles/main.css -------------------------------------------------------------------------------- /docs/styles/main.js: -------------------------------------------------------------------------------- 1 | // Copyright (c) Microsoft. All rights reserved. Licensed under the MIT license. See LICENSE file in the project root for full license information. 2 | -------------------------------------------------------------------------------- /docs/styles/search-worker.js: -------------------------------------------------------------------------------- 1 | (function () { 2 | importScripts('lunr.min.js'); 3 | 4 | var lunrIndex = lunr(function () { 5 | this.pipeline.remove(lunr.stopWordFilter); 6 | this.ref('href'); 7 | this.field('title', { boost: 50 }); 8 | this.field('keywords', { boost: 20 }); 9 | }); 10 | lunr.tokenizer.seperator = /[\s\-\.]+/; 11 | 12 | var stopWordsRequest = new XMLHttpRequest(); 13 | stopWordsRequest.open('GET', '../search-stopwords.json'); 14 | stopWordsRequest.onload = function () { 15 | if (this.status != 200) { 16 | return; 17 | } 18 | var stopWords = JSON.parse(this.responseText); 19 | var docfxStopWordFilter = lunr.generateStopWordFilter(stopWords); 20 | lunr.Pipeline.registerFunction(docfxStopWordFilter, 'docfxStopWordFilter'); 21 | lunrIndex.pipeline.add(docfxStopWordFilter); 22 | } 23 | stopWordsRequest.send(); 24 | 25 | var searchData = {}; 26 | var searchDataRequest = new XMLHttpRequest(); 27 | 28 | searchDataRequest.open('GET', '../index.json'); 29 | searchDataRequest.onload = function () { 30 | if (this.status != 200) { 31 | return; 32 | } 33 | searchData = JSON.parse(this.responseText); 34 | for (var prop in searchData) { 35 | if (searchData.hasOwnProperty(prop)) { 36 | lunrIndex.add(searchData[prop]); 37 | } 38 | } 39 | postMessage({ e: 'index-ready' }); 40 | } 41 | searchDataRequest.send(); 42 | 43 | onmessage = function (oEvent) { 44 | var q = oEvent.data.q; 45 | var hits = lunrIndex.search(q); 46 | var results = []; 47 | hits.forEach(function (hit) { 48 | var item = searchData[hit.ref]; 49 | results.push({ 'href': item.href, 'title': item.title, 'keywords': item.keywords }); 50 | }); 51 | postMessage({ e: 'query-ready', q: q, d: results }); 52 | } 53 | })(); 54 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/Adagrad.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | TensorFlow.AdaptiveOptimizer 10 | 11 | 12 | 13 | 14 | Adaptive stochastic gradient descent optimizer. 15 | 16 | To be added. 17 | 18 | 19 | 20 | 21 | 22 | Constructor 23 | 24 | 1.0.0.0 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | The graph object. 35 | The learning rate for the SGD update. 36 | Learning rate decay over each update. 37 | A floating point value. Starting value for the accumulators, must be positive. 38 | Name the optimizer. All the variable that are created in this class will be created under this scope. 39 | 40 | Construct Adagrad optimizer. 41 | 42 | To be added. 43 | 44 | 45 | 46 | 47 | 48 | Method 49 | 50 | 1.0.0.0 51 | 52 | 53 | TensorFlow.TFOperation[] 54 | 55 | 56 | 57 | 58 | 59 | System.Runtime.CompilerServices.TupleElementNames(Mono.Cecil.CustomAttributeArgument[]) 60 | 61 | 62 | 63 | 64 | 65 | To be added. 66 | To be added. 67 | To be added. 68 | To be added. 69 | 70 | 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/AdaptiveOptimizer.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | TensorFlow.Optimizer 10 | 11 | 12 | 13 | 14 | The base class for all the adaptive optimizers. 15 | 16 | To be added. 17 | 18 | 19 | 20 | 21 | 22 | Constructor 23 | 24 | 1.0.0.0 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | The graph object. 35 | The learning rate for the SGD update. 36 | Learning rate decay over each update. 37 | A floating point value. Starting value for the accumulators, must be positive. 38 | Name the optimizer. All the variable that are created in this class will be created under this scope. 39 | 40 | Construct Adagrad optimizer. 41 | 42 | To be added. 43 | 44 | 45 | 46 | 47 | 48 | Field 49 | 50 | 1.0.0.0 51 | 52 | 53 | TensorFlow.TFOutput 54 | 55 | 56 | 57 | Constant value used for avoiding division overflow. 58 | 59 | To be added. 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/DeviceAttributes.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Object 10 | 11 | 12 | 13 | 14 | Describes the device attributes 15 | 16 | To be added. 17 | 18 | 19 | 20 | 21 | 22 | Property 23 | 24 | 1.0.0.0 25 | 26 | 27 | TensorFlow.DeviceType 28 | 29 | 30 | 31 | Gets the type of the device. 32 | 33 | The type of the device. 34 | To be added. 35 | 36 | 37 | 38 | 39 | 40 | Property 41 | 42 | 1.0.0.0 43 | 44 | 45 | System.Int64 46 | 47 | 48 | 49 | The amount of memory associated with a given device. 50 | 51 | The memory limit bytes. 52 | To be added. 53 | 54 | 55 | 56 | 57 | 58 | Property 59 | 60 | 1.0.0.0 61 | 62 | 63 | System.String 64 | 65 | 66 | 67 | The full name of the device (e.g. /job:worker/replica:0/...) 68 | 69 | To be added. 70 | To be added. 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/DeviceType.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Enum 10 | 11 | 12 | 13 | Device type 14 | 15 | To be added. 16 | 17 | 18 | 19 | 20 | 21 | Field 22 | 23 | 1.0.0.0 24 | 25 | 26 | TensorFlow.DeviceType 27 | 28 | 29 | 30 | The device is the Central Processing Unit (CPU) 31 | 32 | 33 | 34 | 35 | 36 | 37 | Field 38 | 39 | 1.0.0.0 40 | 41 | 42 | TensorFlow.DeviceType 43 | 44 | 45 | 46 | The device is a Graphics Processing Unit (GPU) 47 | 48 | 49 | 50 | 51 | 52 | 53 | Field 54 | 55 | 1.0.0.0 56 | 57 | 58 | TensorFlow.DeviceType 59 | 60 | 61 | 62 | The device is a Tensor Processing Unit (TPU) 63 | 64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/MonoPInvokeCallbackAttribute.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Attribute 10 | 11 | 12 | 13 | 14 | This attribute can be applied to callback functions that will be invoked 15 | from unmanaged code to managed code. 16 | 17 | 18 | 19 | [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))] 20 | internal static void MyFreeFunc (IntPtr data, IntPtr length){..} 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | Constructor 29 | 30 | 1.0.0.0 31 | 32 | 33 | 34 | 35 | 36 | T. 37 | 38 | Use this constructor to annotate the type of the callback function that 39 | will be invoked from unmanaged code. 40 | 41 | To be added. 42 | 43 | 44 | 45 | 46 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFBuffer+BufferReleaseFunc.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Delegate 10 | 11 | 12 | 13 | 14 | 15 | 16 | System.Void 17 | 18 | 19 | To be added. 20 | To be added. 21 | 22 | Signature of the method that is invoked to release the data. 23 | 24 | 25 | Methods of this signature are invoked with the data pointer and the 26 | lenght pointer when then TFBuffer no longer needs to hold on to the 27 | data. If you are using this on platforms with static compilation 28 | like iOS, you need to annotate your callback with the MonoPInvokeCallbackAttribute, 29 | like this: 30 | 31 | 32 | [TensorFlow.MonoPInvokeCallback (typeof (BufferReleaseFunc))] 33 | internal static void MyFreeFunc (IntPtr data, IntPtr length){..} 34 | 35 | 36 | 37 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFDependencies.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Object 10 | 11 | 12 | 13 | System.IDisposable 14 | 15 | 16 | 17 | 18 | TFGraph variable dependencies handle. 19 | 20 | 21 | Instances of this class, when disposed, restore 22 | to the value it had before the method 23 | was called. 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | Method 32 | 33 | 1.0.0.0 34 | 35 | 36 | System.Void 37 | 38 | 39 | 40 | 41 | Pops the variable dependencies to the previous dependencies in use. 42 | 43 | Call when you are finished using the 44 | to restore the previous variable dependencies in use in the . 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFDevice.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Object 10 | 11 | 12 | 13 | System.IDisposable 14 | 15 | 16 | 17 | 18 | Class to unset device name in the graph within using block. 19 | 20 | To be added. 21 | 22 | 23 | 24 | 25 | 26 | Method 27 | 28 | 1.0.0.0 29 | 30 | 31 | System.Void 32 | 33 | 34 | 35 | 36 | Pops the device name back to previous device name in use. 37 | 38 | Call when you are finished using the 39 | to restore to the default device to be used in the . 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFDisposableThreadSafe.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | TensorFlow.TFDisposable 10 | 11 | 12 | 13 | 14 | ase class for many TensorFlow data types that provides a common idiom to dispose and 15 | release resources associated with the native data types and whose unmanaged resource 16 | disposing can be called from a background thread (the finalizer). Users do not 17 | need to deal with this class. 18 | 19 | 20 | Some object deletion APIs in TensorFlow can be invoked from a background thread, 21 | so the release methods are suitable to be invoked from the Finalizer thread, in 22 | those scenarios, subclass from this class rather than the TFDisposable class. 23 | 24 | 25 | 26 | 27 | 28 | 29 | Constructor 30 | 31 | 1.0.0.0 32 | 33 | 34 | 35 | 36 | Initializes a new instance of the class. 37 | 38 | To be added. 39 | 40 | 41 | 42 | 43 | 44 | Constructor 45 | 46 | 1.0.0.0 47 | 48 | 49 | 50 | 51 | 52 | To be added. 53 | 54 | Initializes a new instance of the class 55 | from the handle that it will wrap. 56 | 57 | To be added. 58 | 59 | 60 | 61 | 62 | 63 | Method 64 | 65 | 1.0.0.0 66 | 67 | 68 | System.Void 69 | 70 | 71 | 72 | 73 | 74 | If set to true disposing. 75 | 76 | Dispose the object, unlike the default implementat in TFDisposable, 77 | this will release the unmanaged resources from a background thread. 78 | 79 | To be added. 80 | 81 | 82 | 83 | 84 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFException.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Exception 10 | 11 | 12 | 13 | 14 | TensorFlow Exception 15 | 16 | To be added. 17 | 18 | 19 | 20 | 21 | 22 | Constructor 23 | 24 | 1.0.0.0 25 | 26 | 27 | 28 | 29 | 30 | Message. 31 | 32 | Initializes a new instance of the class with a message. 33 | 34 | To be added. 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFGraph+WhileConstructor.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Delegate 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | System.Void 22 | 23 | 24 | To be added. 25 | To be added. 26 | To be added. 27 | To be added. 28 | To be added. 29 | To be added. 30 | To be added. 31 | 32 | Signature of the method that will be invoked by the TFGraph.While method to construct a while loop 33 | 34 | 35 | 36 | The method should build up the condition on the conditionGraph and the body of the while 37 | loop in the provided bodyGraph. It should set the condOutput to the value used as the 38 | condition output and the array of values in bodyOutputs to the final outputs as well as the 39 | name to be used, if not set, one will be assigned. 40 | 41 | 42 | The conditionGraph represents the while condition and the inputs are the current values of the 43 | input variables (condInputs). The output should be a scalar boolean. 44 | 45 | 46 | The loop body graph is in bodyGraph, The inputs are the current values of the loop 47 | variables. The outputs are the updated values of the loop variables. 48 | 49 | 50 | You can use the passed status record problems with it. 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFInput.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.ValueType 10 | 11 | 12 | 13 | 14 | Represents a specific input of an operation. 15 | 16 | To be added. 17 | 18 | 19 | 20 | 21 | 22 | Method 23 | 24 | 1.0.0.0 25 | 26 | 27 | TensorFlow.TFOutput 28 | 29 | 30 | 31 | 32 | 33 | To be added. 34 | To be added. 35 | To be added. 36 | To be added. 37 | 38 | 39 | 40 | 41 | 42 | Field 43 | 44 | 1.0.0.0 45 | 46 | 47 | System.Int32 48 | 49 | 50 | 51 | The index of the output within the Operation 52 | 53 | To be added. 54 | 55 | 56 | 57 | 58 | 59 | Property 60 | 61 | 1.0.0.0 62 | 63 | 64 | TensorFlow.TFDataType 65 | 66 | 67 | To be added. 68 | To be added. 69 | To be added. 70 | 71 | 72 | 73 | 74 | 75 | Field 76 | 77 | 1.0.0.0 78 | 79 | 80 | System.IntPtr 81 | 82 | 83 | 84 | The operation that this input is for 85 | 86 | To be added. 87 | 88 | 89 | 90 | 91 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFLibrary.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | TensorFlow.TFDisposable 10 | 11 | 12 | 13 | 14 | Represents a dynamically loaded library of TensorFlow operations, use to load and consume TensorFlow operations from an external library. 15 | 16 | 17 | Use the static method to load a dynamic library. 18 | Once that function returns 19 | 20 | 21 | 22 | 23 | 24 | 25 | Method 26 | 27 | 1.0.0.0 28 | 29 | 30 | TensorFlow.TFLibrary 31 | 32 | 33 | 34 | 35 | 36 | 37 | Name of the library to load, this is a platform specific name. 38 | Status buffer, if specified a status code will be left here, if not specified, a exception is raised if there is an error. 39 | 40 | Load the library specified by and register the operations and 41 | kernels present in that library. 42 | 43 | Handle to the loaded library. 44 | 45 | The provided is passed to the operating system dynamic loader 46 | and it will load the library using the operating system defined search paths and rules to load this. 47 | 48 | 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFScope.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Object 10 | 11 | 12 | 13 | System.IDisposable 14 | 15 | 16 | 17 | 18 | TFGraph name scope handle 19 | 20 | 21 | Instances of this class when disposed restore the CurrentNameScope to the 22 | value they had when the TFGraph.WithScope method was called. 23 | 24 | 25 | 26 | 27 | 28 | 29 | Method 30 | 31 | 1.0.0.0 32 | 33 | 34 | System.Void 35 | 36 | 37 | 38 | 39 | Pops the name space to the previous namescope in use. 40 | 41 | Call when you are finished using the 42 | to restore the previous name scope in use in the . 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFSession+PartialRunToken.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Object 10 | 11 | 12 | 13 | System.IDisposable 14 | 15 | 16 | 17 | 18 | Token returned from using one of the Partial Run Setup methods from , 19 | and use this token subsequently for other invocations. 20 | 21 | 22 | Calling Dispose on this object will release the resources associated with setting up 23 | a partial run. 24 | 25 | 26 | 27 | 28 | 29 | 30 | Constructor 31 | 32 | 1.0.0.0 33 | 34 | 35 | 36 | To be added. 37 | To be added. 38 | 39 | 40 | 41 | 42 | 43 | Method 44 | 45 | 1.0.0.0 46 | 47 | 48 | System.Void 49 | 50 | 51 | 52 | To be added. 53 | To be added. 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /ecmadocs/en/TensorFlow/TFTensor+Deallocator.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | TensorFlowSharp 6 | 1.0.0.0 7 | 8 | 9 | System.Delegate 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | System.Void 18 | 19 | 20 | To be added. 21 | To be added. 22 | To be added. 23 | 24 | Signature that methods must conform to to be used to release memory that was passed to a manually allocated TFTensor 25 | 26 | To be added. 27 | 28 | 29 | -------------------------------------------------------------------------------- /ecmadocs/en/ns-TensorFlow.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | .NET Bindings for TensorFlow. 4 | 5 | 6 | This is the API documentation for the TensorFlowSharp project. 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/BitwiseOperationTests.cs: -------------------------------------------------------------------------------- 1 | using TensorFlow; 2 | using Xunit; 3 | 4 | namespace TensorFlowSharp.Tests.CSharp 5 | { 6 | public class BitwiseOperationTests 7 | { 8 | [Theory] 9 | [InlineData(2, 3, 2)] 10 | [InlineData(3, 0, 0)] 11 | [InlineData(1, 3, 1)] 12 | public void Should_EvaluateBitwiseAnd(int aValue, int bValue, int expected) 13 | { 14 | using (var graph = new TFGraph()) 15 | using (var session = new TFSession(graph)) 16 | { 17 | TFOutput a = graph.Placeholder(TFDataType.Int32); 18 | TFOutput b = graph.Placeholder(TFDataType.Int32); 19 | 20 | TFOutput y = graph.BitwiseAnd(a, b); 21 | 22 | TFTensor[] result = session.Run(new[] { a, b }, new TFTensor[] { aValue, bValue }, new[] { y }); 23 | 24 | Assert.Equal(expected, (int)result[0].GetValue()); 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/CondTests.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using TensorFlow; 3 | using Xunit; 4 | 5 | namespace TensorFlowSharp.Tests.CSharp 6 | { 7 | public class CondTests 8 | { 9 | [Theory] 10 | [InlineData (false)] 11 | [InlineData (true)] 12 | public void Should_ExecuteOnlyOne (bool flag) 13 | { 14 | using (var graph = new TFGraph ()) { 15 | 16 | var W = graph.VariableV2 (TFShape.Scalar, TFDataType.Double, operName: "W"); 17 | 18 | var b = graph.VariableV2 (TFShape.Scalar, TFDataType.Double, operName: "b"); 19 | 20 | var pred = graph.Const (flag); 21 | 22 | var init = graph.Cond (pred, 23 | () => graph.Assign (W, graph.Const (1.0)), 24 | () => graph.Assign (b, graph.Const (-0.3))); 25 | 26 | using (var sess = new TFSession (graph)) { 27 | 28 | Assert.Throws (() => sess.GetRunner ().Fetch (W).Run ()); // ok 29 | Assert.Throws (() => sess.GetRunner ().Fetch (b).Run ()); // ok 30 | 31 | var r1 = sess.GetRunner ().AddTarget (init.Operation).Run (); 32 | 33 | if (flag) { 34 | var rW = sess.GetRunner ().Fetch (W).Run (); 35 | Assert.Throws (() => sess.GetRunner ().Fetch (b).Run ()); 36 | Assert.Equal (1.0, (double)rW [0].GetValue ()); 37 | } else { 38 | Assert.Throws (() => sess.GetRunner ().Fetch (W).Run ()); 39 | var rb = sess.GetRunner ().Fetch (b).Run (); 40 | Assert.Equal (-0.3, (double)rb [0].GetValue ()); 41 | } 42 | 43 | } 44 | } 45 | } 46 | 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/GradientTests.cs: -------------------------------------------------------------------------------- 1 | using TensorFlow; 2 | using Xunit; 3 | 4 | namespace TensorFlowSharp.Tests.CSharp 5 | { 6 | public class GradientTests 7 | { 8 | private const float _tolerance = 0.000001f; 9 | [Fact] 10 | public void ShouldAddGradients () 11 | { 12 | using (var graph = new TFGraph ()) 13 | using (var session = new TFSession (graph)) { 14 | var x = graph.Const (3.0); 15 | 16 | var y1 = graph.Square (x, "Square1"); 17 | var y2 = graph.Square (y1, "Square2"); 18 | 19 | var y3 = graph.Square (y2, "Square3"); 20 | var g = graph.AddGradients (new TFOutput [] { y1, y3 }, new [] { x }); 21 | 22 | var r = session.Run (new TFOutput [] { }, new TFTensor [] { }, g); 23 | var dy = (double)r [0].GetValue (); 24 | Assert.Equal (17502.0, dy); 25 | } 26 | } 27 | 28 | 29 | [Fact] 30 | public void ComputeGradientMSE() 31 | { 32 | using (var graph = new TFGraph()) 33 | using (var session = new TFSession(graph)) 34 | { 35 | var X = graph.Const(5.5f); 36 | var Y = graph.Const(2.09f); 37 | 38 | var W = graph.Const(0.1078f); 39 | var b = graph.Const(0.1021f); 40 | var pred = graph.Add(graph.Mul(X, W, "x_w"), b); 41 | 42 | var cost = graph.Div(graph.ReduceSum(graph.Pow(graph.Sub(pred, Y), graph.Const(2f))), graph.Mul(graph.Const(2f), graph.Const((float)17), "2_n_samples")); 43 | 44 | var g = graph.AddGradients(new TFOutput[] { cost }, new[] { W }); 45 | 46 | var r = session.Run(new TFOutput[] { }, new TFTensor[] { }, new TFOutput[] { cost, g[0] }); 47 | var d = (float)r[0].GetValue(); 48 | Assert.InRange(d, 0.057236027 - _tolerance, 0.057236027 + _tolerance); 49 | d = (float)r[1].GetValue(); 50 | Assert.InRange(d, -0.4513235 - _tolerance, -0.4513235 + _tolerance); 51 | } 52 | } 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/PaddingFIFOQueueTests.cs: -------------------------------------------------------------------------------- 1 | using System.Collections.Generic; 2 | using System.Linq; 3 | using TensorFlow; 4 | using Xunit; 5 | 6 | namespace TensorFlowSharp.Tests.CSharp 7 | { 8 | public class PaddingFIFOQueueTests 9 | { 10 | [Fact] 11 | public void Should_EnqueueAndDequeue_ScalarValues() 12 | { 13 | using (var graph = new TFGraph()) 14 | using (var session = new TFSession(graph)) 15 | { 16 | int[] numbersToEnqueue = new int[] { 5, 8, 9 }; 17 | 18 | TFOutput a = graph.Placeholder(TFDataType.Int32); 19 | TFOutput b = graph.Placeholder(TFDataType.Int32); 20 | TFOutput c = graph.Placeholder(TFDataType.Int32); 21 | var queue = new PaddingFIFOQueue(session, new[] { TFDataType.Int32 }, new[] { TFShape.Scalar }); 22 | queue.EnqueueExecute(new[] { a }, new[] { (TFTensor)numbersToEnqueue[0] }); 23 | queue.EnqueueExecute(new[] { b }, new[] { (TFTensor)numbersToEnqueue[1] }); 24 | queue.EnqueueExecute(new[] { c }, new[] { (TFTensor)numbersToEnqueue[2] }); 25 | int size = queue.GetSizeExecute(); 26 | Assert.Equal(numbersToEnqueue.Length, size); 27 | 28 | List dequeuedNumbers = new List(); 29 | dequeuedNumbers.Add(queue.DequeueExecute().Single()); 30 | dequeuedNumbers.Add(queue.DequeueExecute().Single()); 31 | dequeuedNumbers.Add(queue.DequeueExecute().Single()); 32 | 33 | Assert.Equal(numbersToEnqueue, dequeuedNumbers.ToArray()); 34 | } 35 | } 36 | } 37 | } -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/PartialRunTests.cs: -------------------------------------------------------------------------------- 1 | using TensorFlow; 2 | using Xunit; 3 | 4 | namespace TensorFlowSharp.Tests.CSharp 5 | { 6 | public class PartialRunTests 7 | { 8 | [Fact] 9 | public void Should_RunPartialRun() 10 | { 11 | using (var graph = new TFGraph()) 12 | using (var session = new TFSession(graph)) 13 | { 14 | float aValue = 1; 15 | float bValue = 2; 16 | 17 | var a = graph.Placeholder(TFDataType.Float); 18 | var b = graph.Placeholder(TFDataType.Float); 19 | var c = graph.Placeholder(TFDataType.Float); 20 | 21 | var r1 = graph.Add(a, b); 22 | var r2 = graph.Mul(r1, c); 23 | 24 | var h = session.PartialRunSetup(new[] { a, b, c }, new[] { r1, r2 }, new[] { r1.Operation, r2.Operation }); 25 | var res = session.PartialRun(h, new[] { a, b }, new TFTensor[] { aValue, bValue }, new TFOutput[] { r1 }, new[] { r1.Operation }); // 1+2=3 26 | var calculated = (float)res[0].GetValue(); 27 | Assert.Equal(3, calculated); 28 | 29 | float temp = calculated * 17; // 3*17=51 30 | res = session.PartialRun(h, new[] { c }, new TFTensor[] { temp }, new[] { r2 }, new[] { r2.Operation }); // 51*3=153 31 | calculated = (float)res[0].GetValue(); 32 | Assert.Equal(153, calculated); 33 | } 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("TensorFlowSharp.Tests.CSharp")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("TensorFlowSharp.Tests.CSharp")] 13 | [assembly: AssemblyCopyright("Copyright © 2017")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("6504a704-575c-48d0-a4d2-422a7010936b")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/SessionTests.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Linq; 3 | using TensorFlow; 4 | using Xunit; 5 | 6 | namespace TensorFlowSharp.Tests.CSharp 7 | { 8 | public class SessionTests 9 | { 10 | [Fact] 11 | public void Should_ListDeviceReturnDevices () 12 | { 13 | using (var graph = new TFGraph ()) 14 | using (var session = new TFSession (graph)) { 15 | var devices = session.ListDevices (); 16 | 17 | Assert.True(devices.Any()); 18 | } 19 | } 20 | 21 | [Theory] 22 | [InlineData("Placeholder")] 23 | [InlineData("Placeholder:0")] 24 | public void ParseOutput_ThrowsForMissingOp (string name) 25 | { 26 | using (var graph = new TFGraph ()) 27 | using (var session = new TFSession (graph)) 28 | { 29 | var runner = session.GetRunner(); 30 | Assert.Throws(() => runner.AddInput(name, new TFTensor(1))); 31 | } 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/ShapeTests.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using TensorFlow; 4 | using Xunit; 5 | 6 | namespace TensorFlowSharp.Tests.CSharp 7 | { 8 | public class ShapeTests 9 | { 10 | [Fact] 11 | public void Should_ShapeAutomaticallyConvertToTensor () 12 | { 13 | using (var graph = new TFGraph ()) 14 | using (var session = new TFSession (graph)) { 15 | 16 | var x = graph.Const (new TFShape(2, 3)); 17 | 18 | TFTensor [] result = session.Run (new TFOutput [] { }, new TFTensor [] { }, new TFOutput [] { x }); 19 | 20 | int[] actual = (int[])result [0].GetValue (); 21 | Assert.Equal (new [] { 2, 3 }, actual); 22 | } 23 | } 24 | 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Adagrad/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1061, b: 0.1023 2 | loss: 0.1411, W: 0.1143, b: 0.1060 3 | loss: 0.0540, W: 0.1197, b: 0.1082 4 | loss: 0.1528, W: 0.1270, b: 0.1117 5 | loss: 0.0145, W: 0.1293, b: 0.1128 6 | loss: 0.0250, W: 0.1311, b: 0.1142 7 | loss: 0.1141, W: 0.1378, b: 0.1170 8 | loss: 0.0779, W: 0.1410, b: 0.1193 9 | loss: 0.0528, W: 0.1442, b: 0.1212 10 | loss: 0.0182, W: 0.1447, b: 0.1223 11 | loss: 0.0836, W: 0.1482, b: 0.1245 12 | loss: 0.0892, W: 0.1529, b: 0.1268 13 | loss: 0.0149, W: 0.1539, b: 0.1277 14 | loss: 0.0702, W: 0.1569, b: 0.1297 15 | loss: 0.0579, W: 0.1588, b: 0.1315 16 | loss: 0.0525, W: 0.1616, b: 0.1331 17 | loss: 0.0130, W: 0.1621, b: 0.1340 18 | loss: 0.0313, W: 0.1629, b: 0.1352 19 | loss: 0.1071, W: 0.1647, b: 0.1375 20 | loss: 0.0322, W: 0.1660, b: 0.1387 21 | loss: 0.1104, W: 0.1688, b: 0.1410 22 | loss: 0.0043, W: 0.1693, b: 0.1414 23 | loss: 0.0155, W: 0.1700, b: 0.1422 24 | loss: 0.0717, W: 0.1730, b: 0.1440 25 | loss: 0.0562, W: 0.1747, b: 0.1455 26 | loss: 0.0329, W: 0.1763, b: 0.1467 27 | loss: 0.0141, W: 0.1766, b: 0.1475 28 | loss: 0.0606, W: 0.1786, b: 0.1491 29 | loss: 0.0568, W: 0.1813, b: 0.1506 30 | loss: 0.0085, W: 0.1819, b: 0.1511 31 | loss: 0.0496, W: 0.1837, b: 0.1525 32 | loss: 0.0444, W: 0.1850, b: 0.1538 33 | loss: 0.0338, W: 0.1867, b: 0.1549 34 | loss: 0.0094, W: 0.1871, b: 0.1555 35 | loss: 0.0253, W: 0.1876, b: 0.1565 36 | loss: 0.0930, W: 0.1890, b: 0.1583 37 | loss: 0.0234, W: 0.1898, b: 0.1593 38 | loss: 0.0908, W: 0.1918, b: 0.1610 39 | loss: 0.0012, W: 0.1921, b: 0.1612 40 | loss: 0.0110, W: 0.1925, b: 0.1618 41 | loss: 0.0514, W: 0.1947, b: 0.1632 42 | loss: 0.0445, W: 0.1959, b: 0.1644 43 | loss: 0.0227, W: 0.1970, b: 0.1652 44 | loss: 0.0116, W: 0.1972, b: 0.1659 45 | loss: 0.0476, W: 0.1987, b: 0.1671 46 | loss: 0.0392, W: 0.2007, b: 0.1682 47 | loss: 0.0051, W: 0.2010, b: 0.1686 48 | loss: 0.0374, W: 0.2024, b: 0.1697 49 | loss: 0.0360, W: 0.2034, b: 0.1708 50 | loss: 0.0230, W: 0.2047, b: 0.1716 51 | loss: 0.0072, W: 0.2049, b: 0.1721 52 | loss: 0.0213, W: 0.2054, b: 0.1729 53 | loss: 0.0834, W: 0.2065, b: 0.1745 54 | loss: 0.0179, W: 0.2072, b: 0.1752 55 | loss: 0.0776, W: 0.2088, b: 0.1767 56 | loss: 0.0001, W: 0.2089, b: 0.1768 57 | loss: 0.0081, W: 0.2092, b: 0.1772 58 | loss: 0.0384, W: 0.2109, b: 0.1783 59 | loss: 0.0365, W: 0.2119, b: 0.1793 60 | loss: 0.0162, W: 0.2128, b: 0.1800 61 | loss: 0.0099, W: 0.2130, b: 0.1805 62 | loss: 0.0387, W: 0.2141, b: 0.1816 63 | loss: 0.0278, W: 0.2157, b: 0.1824 64 | loss: 0.0030, W: 0.2159, b: 0.1827 65 | loss: 0.0291, W: 0.2171, b: 0.1836 66 | loss: 0.0299, W: 0.2179, b: 0.1845 67 | loss: 0.0159, W: 0.2189, b: 0.1852 68 | loss: 0.0056, W: 0.2191, b: 0.1856 69 | loss: 0.0184, W: 0.2194, b: 0.1863 70 | loss: 0.0761, W: 0.2204, b: 0.1877 71 | loss: 0.0140, W: 0.2210, b: 0.1883 72 | loss: 0.0679, W: 0.2224, b: 0.1896 73 | loss: 0.0000, W: 0.2224, b: 0.1896 74 | loss: 0.0061, W: 0.2227, b: 0.1900 75 | loss: 0.0293, W: 0.2240, b: 0.1908 76 | loss: 0.0306, W: 0.2249, b: 0.1917 77 | loss: 0.0117, W: 0.2256, b: 0.1923 78 | loss: 0.0086, W: 0.2257, b: 0.1927 79 | loss: 0.0321, W: 0.2267, b: 0.1936 80 | loss: 0.0200, W: 0.2279, b: 0.1943 81 | loss: 0.0018, W: 0.2281, b: 0.1946 82 | loss: 0.0230, W: 0.2291, b: 0.1953 83 | loss: 0.0254, W: 0.2298, b: 0.1961 84 | loss: 0.0111, W: 0.2306, b: 0.1966 85 | loss: 0.0044, W: 0.2307, b: 0.1969 86 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Adagrad/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(cost, name = "AdagradOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(5): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/AdagradTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | X = tf.placeholder(tf.float32) 17 | Y = tf.placeholder(tf.float32) 18 | 19 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 20 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | 22 | pred = tf.add(tf.multiply(X,W), b) 23 | 24 | global_step = tf.Variable(0, trainable=False) 25 | learning_rate = 0.01 26 | decay_rate = 0.5 27 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 28 | 29 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 30 | optimizer = tf.train.AdagradOptimizer(decayed_learning_rate).minimize(cost, global_step=global_step, name = "AdagradOptimizer") 31 | 32 | init = tf.global_variables_initializer() 33 | with tf.Session() as session: 34 | session.run(init) 35 | for e in range(5): 36 | for i in range(n_samples): 37 | _, cost_v, W_v, b_v, pred_v, lr_v, step_v = session.run([optimizer, cost, W, b, pred, decayed_learning_rate, global_step], feed_dict = {X: train_x[i], Y: train_y[i]}) 38 | print(f"step: {step_v:d}, loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}, lr: {lr_v:.8f}") 39 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Adam/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1000, b: 0.1000 2 | loss: 0.1380, W: 0.1100, b: 0.1100 3 | loss: 0.0507, W: 0.1195, b: 0.1198 4 | loss: 0.1415, W: 0.1292, b: 0.1295 5 | loss: 0.0103, W: 0.1388, b: 0.1393 6 | loss: 0.0192, W: 0.1478, b: 0.1484 7 | loss: 0.0830, W: 0.1564, b: 0.1572 8 | loss: 0.0583, W: 0.1654, b: 0.1663 9 | loss: 0.0312, W: 0.1745, b: 0.1754 10 | loss: 0.0120, W: 0.1836, b: 0.1845 11 | loss: 0.0484, W: 0.1919, b: 0.1932 12 | loss: 0.0355, W: 0.2005, b: 0.2020 13 | loss: 0.0031, W: 0.2095, b: 0.2108 14 | loss: 0.0262, W: 0.2178, b: 0.2191 15 | loss: 0.0245, W: 0.2262, b: 0.2274 16 | loss: 0.0083, W: 0.2345, b: 0.2356 17 | loss: 0.0027, W: 0.2427, b: 0.2436 18 | loss: 0.0114, W: 0.2502, b: 0.2511 19 | loss: 0.0552, W: 0.2573, b: 0.2584 20 | loss: 0.0040, W: 0.2646, b: 0.2662 21 | loss: 0.0353, W: 0.2714, b: 0.2736 22 | loss: 0.0079, W: 0.2786, b: 0.2813 23 | loss: 0.0003, W: 0.2846, b: 0.2877 24 | loss: 0.0016, W: 0.2901, b: 0.2937 25 | loss: 0.0065, W: 0.2955, b: 0.2993 26 | loss: 0.0001, W: 0.3008, b: 0.3049 27 | loss: 0.0018, W: 0.3056, b: 0.3100 28 | loss: 0.0032, W: 0.3100, b: 0.3148 29 | loss: 0.0018, W: 0.3144, b: 0.3196 30 | loss: 0.0039, W: 0.3179, b: 0.3236 31 | loss: 0.0000, W: 0.3209, b: 0.3269 32 | loss: 0.0020, W: 0.3235, b: 0.3300 33 | loss: 0.0051, W: 0.3262, b: 0.3330 34 | loss: 0.0001, W: 0.3280, b: 0.3353 35 | loss: 0.0022, W: 0.3296, b: 0.3373 36 | loss: 0.0273, W: 0.3313, b: 0.3395 37 | loss: 0.0002, W: 0.3334, b: 0.3425 38 | loss: 0.0104, W: 0.3353, b: 0.3451 39 | loss: 0.0291, W: 0.3377, b: 0.3482 40 | loss: 0.0010, W: 0.3387, b: 0.3498 41 | loss: 0.0027, W: 0.3395, b: 0.3511 42 | loss: 0.0006, W: 0.3396, b: 0.3519 43 | loss: 0.0048, W: 0.3399, b: 0.3528 44 | loss: 0.0005, W: 0.3397, b: 0.3531 45 | loss: 0.0002, W: 0.3395, b: 0.3536 46 | loss: 0.0090, W: 0.3394, b: 0.3541 47 | loss: 0.0074, W: 0.3383, b: 0.3539 48 | loss: 0.0006, W: 0.3368, b: 0.3531 49 | loss: 0.0009, W: 0.3352, b: 0.3523 50 | loss: 0.0076, W: 0.3339, b: 0.3517 51 | loss: 0.0002, W: 0.3319, b: 0.3505 52 | loss: 0.0020, W: 0.3300, b: 0.3494 53 | loss: 0.0274, W: 0.3285, b: 0.3487 54 | loss: 0.0001, W: 0.3279, b: 0.3492 55 | loss: 0.0122, W: 0.3273, b: 0.3497 56 | loss: 0.0253, W: 0.3276, b: 0.3509 57 | loss: 0.0006, W: 0.3266, b: 0.3508 58 | loss: 0.0008, W: 0.3256, b: 0.3505 59 | loss: 0.0017, W: 0.3244, b: 0.3501 60 | loss: 0.0022, W: 0.3236, b: 0.3500 61 | loss: 0.0009, W: 0.3225, b: 0.3496 62 | loss: 0.0013, W: 0.3215, b: 0.3494 63 | loss: 0.0036, W: 0.3209, b: 0.3495 64 | loss: 0.0046, W: 0.3196, b: 0.3491 65 | loss: 0.0000, W: 0.3181, b: 0.3483 66 | loss: 0.0023, W: 0.3166, b: 0.3475 67 | loss: 0.0033, W: 0.3157, b: 0.3472 68 | loss: 0.0000, W: 0.3142, b: 0.3465 69 | loss: 0.0030, W: 0.3128, b: 0.3458 70 | loss: 0.0320, W: 0.3118, b: 0.3456 71 | loss: 0.0000, W: 0.3118, b: 0.3469 72 | loss: 0.0165, W: 0.3119, b: 0.3481 73 | loss: 0.0200, W: 0.3130, b: 0.3502 74 | loss: 0.0002, W: 0.3128, b: 0.3510 75 | loss: 0.0001, W: 0.3126, b: 0.3516 76 | loss: 0.0029, W: 0.3122, b: 0.3520 77 | loss: 0.0011, W: 0.3124, b: 0.3529 78 | loss: 0.0011, W: 0.3122, b: 0.3534 79 | loss: 0.0022, W: 0.3121, b: 0.3541 80 | loss: 0.0020, W: 0.3124, b: 0.3552 81 | loss: 0.0039, W: 0.3121, b: 0.3557 82 | loss: 0.0001, W: 0.3114, b: 0.3557 83 | loss: 0.0028, W: 0.3109, b: 0.3558 84 | loss: 0.0026, W: 0.3108, b: 0.3563 85 | loss: 0.0000, W: 0.3101, b: 0.3564 86 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Adam/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=1e-7).minimize(cost, name = "AdamOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(5): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/AdamTimeDecay/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1000, b: 0.1000 2 | loss: 0.1380, W: 0.1100, b: 0.1100 3 | loss: 0.0523, W: 0.1163, b: 0.1165 4 | loss: 0.1496, W: 0.1212, b: 0.1214 5 | loss: 0.0145, W: 0.1250, b: 0.1253 6 | loss: 0.0244, W: 0.1281, b: 0.1284 7 | loss: 0.1128, W: 0.1305, b: 0.1309 8 | loss: 0.0793, W: 0.1328, b: 0.1333 9 | loss: 0.0553, W: 0.1348, b: 0.1353 10 | loss: 0.0182, W: 0.1367, b: 0.1372 11 | loss: 0.0865, W: 0.1382, b: 0.1389 12 | loss: 0.0971, W: 0.1397, b: 0.1404 13 | loss: 0.0169, W: 0.1412, b: 0.1419 14 | loss: 0.0774, W: 0.1424, b: 0.1432 15 | loss: 0.0630, W: 0.1436, b: 0.1444 16 | loss: 0.0621, W: 0.1448, b: 0.1456 17 | loss: 0.0145, W: 0.1458, b: 0.1467 18 | loss: 0.0335, W: 0.1468, b: 0.1477 19 | loss: 0.1132, W: 0.1477, b: 0.1486 20 | loss: 0.0371, W: 0.1485, b: 0.1495 21 | loss: 0.1221, W: 0.1493, b: 0.1504 22 | loss: 0.0074, W: 0.1501, b: 0.1513 23 | loss: 0.0185, W: 0.1508, b: 0.1521 24 | loss: 0.0883, W: 0.1514, b: 0.1528 25 | loss: 0.0664, W: 0.1521, b: 0.1535 26 | loss: 0.0435, W: 0.1528, b: 0.1542 27 | loss: 0.0158, W: 0.1534, b: 0.1549 28 | loss: 0.0741, W: 0.1540, b: 0.1555 29 | loss: 0.0792, W: 0.1545, b: 0.1561 30 | loss: 0.0132, W: 0.1551, b: 0.1567 31 | loss: 0.0663, W: 0.1557, b: 0.1573 32 | loss: 0.0559, W: 0.1562, b: 0.1579 33 | loss: 0.0519, W: 0.1568, b: 0.1584 34 | loss: 0.0126, W: 0.1573, b: 0.1589 35 | loss: 0.0306, W: 0.1578, b: 0.1594 36 | loss: 0.1066, W: 0.1582, b: 0.1599 37 | loss: 0.0329, W: 0.1586, b: 0.1604 38 | loss: 0.1132, W: 0.1590, b: 0.1609 39 | loss: 0.0054, W: 0.1595, b: 0.1613 40 | loss: 0.0163, W: 0.1599, b: 0.1618 41 | loss: 0.0788, W: 0.1602, b: 0.1622 42 | loss: 0.0610, W: 0.1606, b: 0.1626 43 | loss: 0.0386, W: 0.1610, b: 0.1630 44 | loss: 0.0147, W: 0.1614, b: 0.1634 45 | loss: 0.0683, W: 0.1617, b: 0.1638 46 | loss: 0.0708, W: 0.1621, b: 0.1642 47 | loss: 0.0114, W: 0.1625, b: 0.1646 48 | loss: 0.0607, W: 0.1628, b: 0.1649 49 | loss: 0.0522, W: 0.1631, b: 0.1653 50 | loss: 0.0466, W: 0.1635, b: 0.1657 51 | loss: 0.0115, W: 0.1638, b: 0.1660 52 | loss: 0.0289, W: 0.1641, b: 0.1663 53 | loss: 0.1028, W: 0.1644, b: 0.1667 54 | loss: 0.0304, W: 0.1647, b: 0.1670 55 | loss: 0.1079, W: 0.1650, b: 0.1673 56 | loss: 0.0043, W: 0.1653, b: 0.1676 57 | loss: 0.0150, W: 0.1656, b: 0.1679 58 | loss: 0.0731, W: 0.1658, b: 0.1682 59 | loss: 0.0577, W: 0.1661, b: 0.1685 60 | loss: 0.0355, W: 0.1664, b: 0.1688 61 | loss: 0.0140, W: 0.1666, b: 0.1691 62 | loss: 0.0646, W: 0.1669, b: 0.1694 63 | loss: 0.0655, W: 0.1671, b: 0.1696 64 | loss: 0.0103, W: 0.1674, b: 0.1699 65 | loss: 0.0571, W: 0.1677, b: 0.1702 66 | loss: 0.0497, W: 0.1679, b: 0.1704 67 | loss: 0.0431, W: 0.1682, b: 0.1707 68 | loss: 0.0108, W: 0.1684, b: 0.1710 69 | loss: 0.0278, W: 0.1686, b: 0.1712 70 | loss: 0.1002, W: 0.1689, b: 0.1715 71 | loss: 0.0287, W: 0.1691, b: 0.1717 72 | loss: 0.1042, W: 0.1693, b: 0.1719 73 | loss: 0.0035, W: 0.1695, b: 0.1722 74 | loss: 0.0141, W: 0.1697, b: 0.1724 75 | loss: 0.0690, W: 0.1699, b: 0.1726 76 | loss: 0.0553, W: 0.1701, b: 0.1729 77 | loss: 0.0333, W: 0.1703, b: 0.1731 78 | loss: 0.0135, W: 0.1705, b: 0.1733 79 | loss: 0.0619, W: 0.1707, b: 0.1735 80 | loss: 0.0616, W: 0.1709, b: 0.1738 81 | loss: 0.0094, W: 0.1711, b: 0.1740 82 | loss: 0.0544, W: 0.1713, b: 0.1742 83 | loss: 0.0479, W: 0.1715, b: 0.1744 84 | loss: 0.0405, W: 0.1717, b: 0.1746 85 | loss: 0.0103, W: 0.1719, b: 0.1748 86 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/AdamTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | X = tf.placeholder(tf.float32) 17 | Y = tf.placeholder(tf.float32) 18 | 19 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 20 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | 22 | pred = tf.add(tf.multiply(X,W), b) 23 | 24 | global_step = tf.Variable(0, trainable=False) 25 | learning_rate = 0.01 26 | decay_rate = 0.5 27 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 28 | 29 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 30 | optimizer = tf.train.AdamOptimizer(decayed_learning_rate, epsilon=1e-7).minimize(cost, global_step=global_step, name = "AdamOptimizer") 31 | 32 | init = tf.global_variables_initializer() 33 | with tf.Session() as session: 34 | session.run(init) 35 | for e in range(5): 36 | for i in range(n_samples): 37 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 38 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 39 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Momentum/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1025, b: 0.1007 2 | loss: 0.1434, W: 0.1104, b: 0.1027 3 | loss: 0.0560, W: 0.1220, b: 0.1053 4 | loss: 0.1510, W: 0.1414, b: 0.1090 5 | loss: 0.0108, W: 0.1613, b: 0.1126 6 | loss: 0.0183, W: 0.1812, b: 0.1164 7 | loss: 0.0642, W: 0.2075, b: 0.1206 8 | loss: 0.0418, W: 0.2356, b: 0.1251 9 | loss: 0.0112, W: 0.2636, b: 0.1296 10 | loss: 0.0080, W: 0.2895, b: 0.1339 11 | loss: 0.0126, W: 0.3155, b: 0.1381 12 | loss: 0.0002, W: 0.3384, b: 0.1419 13 | loss: 0.0025, W: 0.3581, b: 0.1451 14 | loss: 0.0003, W: 0.3754, b: 0.1480 15 | loss: 0.0007, W: 0.3914, b: 0.1506 16 | loss: 0.0207, W: 0.4012, b: 0.1525 17 | loss: 0.0003, W: 0.4099, b: 0.1541 18 | loss: 0.0011, W: 0.4181, b: 0.1557 19 | loss: 0.0172, W: 0.4275, b: 0.1576 20 | loss: 0.0052, W: 0.4345, b: 0.1591 21 | loss: 0.0004, W: 0.4413, b: 0.1604 22 | loss: 0.0684, W: 0.4412, b: 0.1608 23 | loss: 0.0054, W: 0.4401, b: 0.1608 24 | loss: 0.0355, W: 0.4328, b: 0.1602 25 | loss: 0.0017, W: 0.4253, b: 0.1596 26 | loss: 0.0216, W: 0.4148, b: 0.1584 27 | loss: 0.0008, W: 0.4055, b: 0.1575 28 | loss: 0.0010, W: 0.3963, b: 0.1566 29 | loss: 0.0276, W: 0.3820, b: 0.1552 30 | loss: 0.0084, W: 0.3674, b: 0.1536 31 | loss: 0.0010, W: 0.3534, b: 0.1521 32 | loss: 0.0021, W: 0.3416, b: 0.1509 33 | loss: 0.0042, W: 0.3290, b: 0.1496 34 | loss: 0.0005, W: 0.3179, b: 0.1484 35 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/Momentum/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9).minimize(cost, name = "MomentumOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(2): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumNesterov/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1047, b: 0.1014 2 | loss: 0.1421, W: 0.1175, b: 0.1045 3 | loss: 0.0528, W: 0.1321, b: 0.1076 4 | loss: 0.1418, W: 0.1579, b: 0.1121 5 | loss: 0.0070, W: 0.1770, b: 0.1156 6 | loss: 0.0152, W: 0.1958, b: 0.1192 7 | loss: 0.0522, W: 0.2257, b: 0.1236 8 | loss: 0.0341, W: 0.2531, b: 0.1281 9 | loss: 0.0068, W: 0.2784, b: 0.1320 10 | loss: 0.0069, W: 0.3004, b: 0.1359 11 | loss: 0.0098, W: 0.3241, b: 0.1398 12 | loss: 0.0009, W: 0.3413, b: 0.1428 13 | loss: 0.0028, W: 0.3559, b: 0.1452 14 | loss: 0.0002, W: 0.3691, b: 0.1475 15 | loss: 0.0010, W: 0.3825, b: 0.1497 16 | loss: 0.0168, W: 0.3862, b: 0.1508 17 | loss: 0.0001, W: 0.3931, b: 0.1522 18 | loss: 0.0018, W: 0.4003, b: 0.1537 19 | loss: 0.0210, W: 0.4105, b: 0.1559 20 | loss: 0.0031, W: 0.4157, b: 0.1570 21 | loss: 0.0017, W: 0.4232, b: 0.1585 22 | loss: 0.0574, W: 0.4182, b: 0.1582 23 | loss: 0.0032, W: 0.4174, b: 0.1582 24 | loss: 0.0224, W: 0.4077, b: 0.1575 25 | loss: 0.0002, W: 0.4031, b: 0.1571 26 | loss: 0.0139, W: 0.3933, b: 0.1561 27 | loss: 0.0013, W: 0.3877, b: 0.1558 28 | loss: 0.0001, W: 0.3821, b: 0.1554 29 | loss: 0.0195, W: 0.3674, b: 0.1541 30 | loss: 0.0061, W: 0.3561, b: 0.1529 31 | loss: 0.0003, W: 0.3463, b: 0.1519 32 | loss: 0.0028, W: 0.3399, b: 0.1514 33 | loss: 0.0039, W: 0.3295, b: 0.1504 34 | loss: 0.0005, W: 0.3223, b: 0.1498 35 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumNesterov/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True).minimize(cost, name = "MomentumOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(2): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumNesterovTimeDecay/expected.txt: -------------------------------------------------------------------------------- 1 | step: 1, loss: 0.0474, W: 0.1047, b: 0.1014, lr: 0.01000000 2 | step: 2, loss: 0.1421, W: 0.1132, b: 0.1035, lr: 0.00666667 3 | step: 3, loss: 0.0547, W: 0.1206, b: 0.1050, lr: 0.00500000 4 | step: 4, loss: 0.1523, W: 0.1312, b: 0.1069, lr: 0.00400000 5 | step: 5, loss: 0.0135, W: 0.1382, b: 0.1081, lr: 0.00333333 6 | step: 6, loss: 0.0232, W: 0.1440, b: 0.1092, lr: 0.00285714 7 | step: 7, loss: 0.1005, W: 0.1532, b: 0.1105, lr: 0.00250000 8 | step: 8, loss: 0.0696, W: 0.1608, b: 0.1117, lr: 0.00222222 9 | step: 9, loss: 0.0422, W: 0.1679, b: 0.1128, lr: 0.00200000 10 | step: 10, loss: 0.0163, W: 0.1732, b: 0.1137, lr: 0.00181818 11 | step: 11, loss: 0.0656, W: 0.1794, b: 0.1147, lr: 0.00166667 12 | step: 12, loss: 0.0588, W: 0.1863, b: 0.1156, lr: 0.00153846 13 | step: 13, loss: 0.0087, W: 0.1914, b: 0.1164, lr: 0.00142857 14 | step: 14, loss: 0.0465, W: 0.1970, b: 0.1172, lr: 0.00133333 15 | step: 15, loss: 0.0416, W: 0.2020, b: 0.1179, lr: 0.00125000 16 | step: 16, loss: 0.0265, W: 0.2069, b: 0.1186, lr: 0.00117647 17 | step: 17, loss: 0.0086, W: 0.2108, b: 0.1192, lr: 0.00111111 18 | step: 18, loss: 0.0230, W: 0.2144, b: 0.1198, lr: 0.00105263 19 | step: 19, loss: 0.0847, W: 0.2181, b: 0.1204, lr: 0.00100000 20 | step: 20, loss: 0.0174, W: 0.2214, b: 0.1209, lr: 0.00095238 21 | step: 21, loss: 0.0737, W: 0.2251, b: 0.1215, lr: 0.00090909 22 | step: 22, loss: 0.0000, W: 0.2278, b: 0.1220, lr: 0.00086957 23 | step: 23, loss: 0.0074, W: 0.2303, b: 0.1224, lr: 0.00083333 24 | step: 24, loss: 0.0289, W: 0.2332, b: 0.1228, lr: 0.00080000 25 | step: 25, loss: 0.0313, W: 0.2360, b: 0.1232, lr: 0.00076923 26 | step: 26, loss: 0.0112, W: 0.2384, b: 0.1236, lr: 0.00074074 27 | step: 27, loss: 0.0099, W: 0.2405, b: 0.1240, lr: 0.00071429 28 | step: 28, loss: 0.0300, W: 0.2428, b: 0.1243, lr: 0.00068966 29 | step: 29, loss: 0.0153, W: 0.2452, b: 0.1246, lr: 0.00066667 30 | step: 30, loss: 0.0015, W: 0.2470, b: 0.1249, lr: 0.00064516 31 | step: 31, loss: 0.0190, W: 0.2491, b: 0.1252, lr: 0.00062500 32 | step: 32, loss: 0.0231, W: 0.2510, b: 0.1255, lr: 0.00060606 33 | step: 33, loss: 0.0070, W: 0.2528, b: 0.1258, lr: 0.00058824 34 | step: 34, loss: 0.0045, W: 0.2543, b: 0.1260, lr: 0.00057143 35 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumNesterovTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | # 4 | # NOTE: This script is not used to generate the expected.txt file in this case 5 | # because of the tf.train.MomentumOptimizer implemention difference with decay. 6 | # The expected.txt is actually the output from the test itself. 7 | import tensorflow as tf 8 | 9 | # Training data 10 | train_x =[ 11 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 12 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 13 | ] 14 | train_y = [ 15 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 16 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 17 | ] 18 | n_samples = len(train_x) 19 | learning_rate = 0.01 20 | X = tf.placeholder(tf.float32) 21 | Y = tf.placeholder(tf.float32) 22 | 23 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 24 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 25 | 26 | pred = tf.add(tf.multiply(X,W), b) 27 | 28 | global_step = tf.Variable(0, trainable=False) 29 | learning_rate = 0.01 30 | decay_rate = 0.5 31 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 32 | 33 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 34 | optimizer = tf.train.MomentumOptimizer(decayed_learning_rate, 0.9, use_nesterov=True).minimize(cost, global_step=global_step, name = "MomentumOptimizer") 35 | 36 | init = tf.global_variables_initializer() 37 | with tf.Session() as session: 38 | session.run(init) 39 | for e in range(2): 40 | for i in range(n_samples): 41 | _, cost_v, W_v, b_v, pred_v, lr_v, step_v = session.run([optimizer, cost, W, b, pred, decayed_learning_rate, global_step], feed_dict = {X: train_x[i], Y: train_y[i]}) 42 | print(f"step: {step_v:d}, loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}, lr: {lr_v:.8f}") 43 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumTimeDecay/expected.txt: -------------------------------------------------------------------------------- 1 | step: 1, loss: 0.0474, W: 0.1025, b: 0.1007, lr: 0.01000000 2 | step: 2, loss: 0.1434, W: 0.1078, b: 0.1021, lr: 0.00666667 3 | step: 3, loss: 0.0573, W: 0.1136, b: 0.1034, lr: 0.00500000 4 | step: 4, loss: 0.1589, W: 0.1214, b: 0.1048, lr: 0.00400000 5 | step: 5, loss: 0.0164, W: 0.1284, b: 0.1061, lr: 0.00333333 6 | step: 6, loss: 0.0255, W: 0.1344, b: 0.1072, lr: 0.00285714 7 | step: 7, loss: 0.1113, W: 0.1419, b: 0.1084, lr: 0.00250000 8 | step: 8, loss: 0.0763, W: 0.1492, b: 0.1095, lr: 0.00222222 9 | step: 9, loss: 0.0488, W: 0.1562, b: 0.1106, lr: 0.00200000 10 | step: 10, loss: 0.0175, W: 0.1622, b: 0.1116, lr: 0.00181818 11 | step: 11, loss: 0.0728, W: 0.1682, b: 0.1125, lr: 0.00166667 12 | step: 12, loss: 0.0695, W: 0.1747, b: 0.1135, lr: 0.00153846 13 | step: 13, loss: 0.0109, W: 0.1804, b: 0.1143, lr: 0.00142857 14 | step: 14, loss: 0.0534, W: 0.1860, b: 0.1151, lr: 0.00133333 15 | step: 15, loss: 0.0462, W: 0.1913, b: 0.1159, lr: 0.00125000 16 | step: 16, loss: 0.0325, W: 0.1964, b: 0.1166, lr: 0.00117647 17 | step: 17, loss: 0.0097, W: 0.2009, b: 0.1173, lr: 0.00111111 18 | step: 18, loss: 0.0249, W: 0.2049, b: 0.1179, lr: 0.00105263 19 | step: 19, loss: 0.0891, W: 0.2088, b: 0.1185, lr: 0.00100000 20 | step: 20, loss: 0.0199, W: 0.2124, b: 0.1191, lr: 0.00095238 21 | step: 21, loss: 0.0797, W: 0.2161, b: 0.1197, lr: 0.00090909 22 | step: 22, loss: 0.0002, W: 0.2193, b: 0.1202, lr: 0.00086957 23 | step: 23, loss: 0.0085, W: 0.2221, b: 0.1206, lr: 0.00083333 24 | step: 24, loss: 0.0339, W: 0.2251, b: 0.1211, lr: 0.00080000 25 | step: 25, loss: 0.0345, W: 0.2279, b: 0.1215, lr: 0.00076923 26 | step: 26, loss: 0.0135, W: 0.2306, b: 0.1220, lr: 0.00074074 27 | step: 27, loss: 0.0106, W: 0.2331, b: 0.1223, lr: 0.00071429 28 | step: 28, loss: 0.0333, W: 0.2354, b: 0.1227, lr: 0.00068966 29 | step: 29, loss: 0.0189, W: 0.2379, b: 0.1231, lr: 0.00066667 30 | step: 30, loss: 0.0020, W: 0.2400, b: 0.1234, lr: 0.00064516 31 | step: 31, loss: 0.0218, W: 0.2422, b: 0.1237, lr: 0.00062500 32 | step: 32, loss: 0.0253, W: 0.2442, b: 0.1240, lr: 0.00060606 33 | step: 33, loss: 0.0090, W: 0.2462, b: 0.1243, lr: 0.00058824 34 | step: 34, loss: 0.0050, W: 0.2480, b: 0.1246, lr: 0.00057143 35 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/MomentumTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | # 4 | # NOTE: This script is not used to generate the expected.txt file in this case 5 | # because of the tf.train.MomentumOptimizer implemention difference with decay. 6 | # The expected.txt is actually the output from the test itself. 7 | import tensorflow as tf 8 | 9 | # Training data 10 | train_x =[ 11 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 12 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 13 | ] 14 | train_y = [ 15 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 16 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 17 | ] 18 | n_samples = len(train_x) 19 | X = tf.placeholder(tf.float32) 20 | Y = tf.placeholder(tf.float32) 21 | 22 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 23 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 24 | 25 | pred = tf.add(tf.multiply(X,W), b) 26 | 27 | global_step = tf.Variable(0, trainable=False) 28 | learning_rate = 0.01 29 | decay_rate = 0.5 30 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 31 | 32 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 33 | optimizer = tf.train.MomentumOptimizer(decayed_learning_rate, 0.9).minimize(cost, global_step=global_step, name = "MomentumOptimizer") 34 | 35 | init = tf.global_variables_initializer() 36 | with tf.Session() as session: 37 | session.run(init) 38 | for e in range(2): 39 | for i in range(n_samples): 40 | _, cost_v, W_v, b_v, pred_v, lr_v, step_v = session.run([optimizer, cost, W, b, pred, decayed_learning_rate, global_step], feed_dict = {X: train_x[i], Y: train_y[i]}) 41 | print(f"step: {step_v:d}, loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}, lr: {lr_v:.8f}") 42 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/RMSProp/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1026, b: 0.1008 2 | loss: 0.1434, W: 0.1088, b: 0.1022 3 | loss: 0.0568, W: 0.1139, b: 0.1032 4 | loss: 0.1587, W: 0.1242, b: 0.1049 5 | loss: 0.0156, W: 0.1277, b: 0.1054 6 | loss: 0.0257, W: 0.1305, b: 0.1062 7 | loss: 0.1157, W: 0.1440, b: 0.1079 8 | loss: 0.0751, W: 0.1511, b: 0.1093 9 | loss: 0.0478, W: 0.1581, b: 0.1105 10 | loss: 0.0173, W: 0.1594, b: 0.1112 11 | loss: 0.0746, W: 0.1682, b: 0.1129 12 | loss: 0.0695, W: 0.1807, b: 0.1146 13 | loss: 0.0097, W: 0.1832, b: 0.1152 14 | loss: 0.0515, W: 0.1917, b: 0.1168 15 | loss: 0.0437, W: 0.1975, b: 0.1184 16 | loss: 0.0289, W: 0.2053, b: 0.1197 17 | loss: 0.0087, W: 0.2068, b: 0.1205 18 | loss: 0.0237, W: 0.2096, b: 0.1219 19 | loss: 0.0866, W: 0.2170, b: 0.1246 20 | loss: 0.0175, W: 0.2212, b: 0.1258 21 | loss: 0.0734, W: 0.2319, b: 0.1286 22 | loss: 0.0001, W: 0.2316, b: 0.1285 23 | loss: 0.0068, W: 0.2338, b: 0.1294 24 | loss: 0.0266, W: 0.2441, b: 0.1314 25 | loss: 0.0269, W: 0.2509, b: 0.1334 26 | loss: 0.0071, W: 0.2553, b: 0.1345 27 | loss: 0.0084, W: 0.2568, b: 0.1358 28 | loss: 0.0229, W: 0.2647, b: 0.1380 29 | loss: 0.0065, W: 0.2714, b: 0.1392 30 | loss: 0.0001, W: 0.2719, b: 0.1394 31 | loss: 0.0102, W: 0.2787, b: 0.1411 32 | loss: 0.0145, W: 0.2846, b: 0.1433 33 | loss: 0.0007, W: 0.2869, b: 0.1438 34 | loss: 0.0021, W: 0.2882, b: 0.1447 35 | loss: 0.0107, W: 0.2916, b: 0.1469 36 | loss: 0.0520, W: 0.3017, b: 0.1518 37 | loss: 0.0023, W: 0.3045, b: 0.1529 38 | loss: 0.0291, W: 0.3162, b: 0.1570 39 | loss: 0.0126, W: 0.3081, b: 0.1542 40 | loss: 0.0005, W: 0.3092, b: 0.1548 41 | loss: 0.0010, W: 0.3128, b: 0.1557 42 | loss: 0.0075, W: 0.3192, b: 0.1582 43 | loss: 0.0001, W: 0.3184, b: 0.1579 44 | loss: 0.0041, W: 0.3202, b: 0.1600 45 | loss: 0.0050, W: 0.3270, b: 0.1624 46 | loss: 0.0015, W: 0.3211, b: 0.1610 47 | loss: 0.0014, W: 0.3182, b: 0.1596 48 | loss: 0.0012, W: 0.3224, b: 0.1610 49 | loss: 0.0056, W: 0.3291, b: 0.1641 50 | loss: 0.0022, W: 0.3220, b: 0.1620 51 | loss: 0.0006, W: 0.3233, b: 0.1632 52 | loss: 0.0065, W: 0.3280, b: 0.1671 53 | loss: 0.0389, W: 0.3426, b: 0.1767 54 | loss: 0.0000, W: 0.3431, b: 0.1769 55 | loss: 0.0149, W: 0.3568, b: 0.1834 56 | loss: 0.0272, W: 0.3398, b: 0.1745 57 | loss: 0.0000, W: 0.3396, b: 0.1744 58 | loss: 0.0005, W: 0.3360, b: 0.1730 59 | loss: 0.0035, W: 0.3423, b: 0.1767 60 | loss: 0.0018, W: 0.3366, b: 0.1740 61 | loss: 0.0030, W: 0.3388, b: 0.1777 62 | loss: 0.0020, W: 0.3449, b: 0.1810 63 | loss: 0.0056, W: 0.3304, b: 0.1754 64 | loss: 0.0023, W: 0.3256, b: 0.1716 65 | loss: 0.0005, W: 0.3290, b: 0.1734 66 | loss: 0.0044, W: 0.3366, b: 0.1791 67 | loss: 0.0038, W: 0.3253, b: 0.1736 68 | loss: 0.0004, W: 0.3266, b: 0.1755 69 | loss: 0.0059, W: 0.3321, b: 0.1828 70 | loss: 0.0366, W: 0.3485, b: 0.1992 71 | loss: 0.0000, W: 0.3480, b: 0.1988 72 | loss: 0.0127, W: 0.3625, b: 0.2090 73 | loss: 0.0310, W: 0.3430, b: 0.1942 74 | loss: 0.0001, W: 0.3424, b: 0.1934 75 | loss: 0.0009, W: 0.3372, b: 0.1906 76 | loss: 0.0030, W: 0.3434, b: 0.1959 77 | loss: 0.0022, W: 0.3368, b: 0.1912 78 | loss: 0.0026, W: 0.3390, b: 0.1966 79 | loss: 0.0017, W: 0.3449, b: 0.2012 80 | loss: 0.0062, W: 0.3292, b: 0.1925 81 | loss: 0.0025, W: 0.3241, b: 0.1867 82 | loss: 0.0005, W: 0.3276, b: 0.1893 83 | loss: 0.0042, W: 0.3351, b: 0.1973 84 | loss: 0.0039, W: 0.3234, b: 0.1895 85 | loss: 0.0003, W: 0.3246, b: 0.1919 86 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/RMSProp/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.RMSPropOptimizer(learning_rate, momentum=0.0,epsilon=1e-7).minimize(cost, name = "RMSPropOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(5): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/RMSPropTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | X = tf.placeholder(tf.float32) 17 | Y = tf.placeholder(tf.float32) 18 | 19 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 20 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | 22 | pred = tf.add(tf.multiply(X,W), b) 23 | 24 | global_step = tf.Variable(0, trainable=False) 25 | learning_rate = 0.01 26 | decay_rate = 0.5 27 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 28 | 29 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 30 | optimizer = tf.train.RMSPropOptimizer(decayed_learning_rate, momentum=0.0,epsilon=1e-7).minimize(cost, global_step=global_step, name = "AdagradOptimizer") 31 | 32 | init = tf.global_variables_initializer() 33 | with tf.Session() as session: 34 | session.run(init) 35 | for e in range(5): 36 | for i in range(n_samples): 37 | _, cost_v, W_v, b_v, pred_v, lr_v, step_v = session.run([optimizer, cost, W, b, pred, decayed_learning_rate, global_step], feed_dict = {X: train_x[i], Y: train_y[i]}) 38 | print(f"step: {step_v:d}, loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}, lr: {lr_v:.8f}") 39 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGD/expected.txt: -------------------------------------------------------------------------------- 1 | loss: 0.0474, W: 0.1025, b: 0.1007 2 | loss: 0.1434, W: 0.1082, b: 0.1020 3 | loss: 0.0571, W: 0.1127, b: 0.1029 4 | loss: 0.1598, W: 0.1219, b: 0.1042 5 | loss: 0.0163, W: 0.1249, b: 0.1047 6 | loss: 0.0264, W: 0.1272, b: 0.1052 7 | loss: 0.1196, W: 0.1388, b: 0.1064 8 | loss: 0.0783, W: 0.1448, b: 0.1074 9 | loss: 0.0515, W: 0.1507, b: 0.1082 10 | loss: 0.0182, W: 0.1517, b: 0.1086 11 | loss: 0.0801, W: 0.1585, b: 0.1096 12 | loss: 0.0796, W: 0.1690, b: 0.1106 13 | loss: 0.0121, W: 0.1710, b: 0.1109 14 | loss: 0.0598, W: 0.1777, b: 0.1118 15 | loss: 0.0500, W: 0.1820, b: 0.1125 16 | loss: 0.0382, W: 0.1882, b: 0.1132 17 | loss: 0.0107, W: 0.1893, b: 0.1136 18 | loss: 0.0272, W: 0.1912, b: 0.1141 19 | loss: 0.0958, W: 0.1959, b: 0.1152 20 | loss: 0.0237, W: 0.1988, b: 0.1157 21 | loss: 0.0891, W: 0.2056, b: 0.1167 22 | loss: 0.0007, W: 0.2063, b: 0.1168 23 | loss: 0.0105, W: 0.2077, b: 0.1172 24 | loss: 0.0436, W: 0.2147, b: 0.1179 25 | loss: 0.0389, W: 0.2189, b: 0.1186 26 | loss: 0.0165, W: 0.2223, b: 0.1190 27 | loss: 0.0113, W: 0.2231, b: 0.1194 28 | loss: 0.0380, W: 0.2278, b: 0.1201 29 | loss: 0.0231, W: 0.2334, b: 0.1206 30 | loss: 0.0025, W: 0.2343, b: 0.1207 31 | loss: 0.0243, W: 0.2386, b: 0.1213 32 | loss: 0.0265, W: 0.2417, b: 0.1218 33 | loss: 0.0098, W: 0.2449, b: 0.1222 34 | loss: 0.0052, W: 0.2456, b: 0.1224 35 | loss: 0.0173, W: 0.2471, b: 0.1229 36 | loss: 0.0706, W: 0.2511, b: 0.1238 37 | loss: 0.0101, W: 0.2530, b: 0.1241 38 | loss: 0.0550, W: 0.2584, b: 0.1249 39 | loss: 0.0014, W: 0.2575, b: 0.1248 40 | loss: 0.0041, W: 0.2585, b: 0.1250 41 | loss: 0.0150, W: 0.2626, b: 0.1254 42 | loss: 0.0211, W: 0.2656, b: 0.1259 43 | loss: 0.0044, W: 0.2674, b: 0.1262 44 | loss: 0.0078, W: 0.2680, b: 0.1265 45 | loss: 0.0194, W: 0.2714, b: 0.1270 46 | loss: 0.0049, W: 0.2740, b: 0.1272 47 | loss: 0.0001, W: 0.2742, b: 0.1272 48 | loss: 0.0100, W: 0.2770, b: 0.1276 49 | loss: 0.0155, W: 0.2794, b: 0.1280 50 | loss: 0.0015, W: 0.2806, b: 0.1281 51 | loss: 0.0027, W: 0.2811, b: 0.1283 52 | loss: 0.0122, W: 0.2824, b: 0.1287 53 | loss: 0.0567, W: 0.2860, b: 0.1295 54 | loss: 0.0044, W: 0.2872, b: 0.1297 55 | loss: 0.0378, W: 0.2917, b: 0.1304 56 | loss: 0.0062, W: 0.2898, b: 0.1301 57 | loss: 0.0016, W: 0.2904, b: 0.1303 58 | loss: 0.0046, W: 0.2927, b: 0.1305 59 | loss: 0.0127, W: 0.2951, b: 0.1309 60 | loss: 0.0007, W: 0.2958, b: 0.1310 61 | loss: 0.0059, W: 0.2964, b: 0.1312 62 | loss: 0.0109, W: 0.2989, b: 0.1316 63 | loss: 0.0003, W: 0.2996, b: 0.1317 64 | loss: 0.0002, W: 0.2993, b: 0.1316 65 | loss: 0.0042, W: 0.3011, b: 0.1319 66 | loss: 0.0101, W: 0.3031, b: 0.1322 67 | loss: 0.0000, W: 0.3031, b: 0.1322 68 | loss: 0.0015, W: 0.3035, b: 0.1323 69 | loss: 0.0094, W: 0.3046, b: 0.1327 70 | loss: 0.0487, W: 0.3079, b: 0.1334 71 | loss: 0.0020, W: 0.3088, b: 0.1336 72 | loss: 0.0285, W: 0.3126, b: 0.1342 73 | loss: 0.0108, W: 0.3102, b: 0.1338 74 | loss: 0.0006, W: 0.3105, b: 0.1339 75 | loss: 0.0011, W: 0.3117, b: 0.1340 76 | loss: 0.0084, W: 0.3136, b: 0.1343 77 | loss: 0.0000, W: 0.3137, b: 0.1343 78 | loss: 0.0049, W: 0.3142, b: 0.1346 79 | loss: 0.0068, W: 0.3162, b: 0.1348 80 | loss: 0.0002, W: 0.3157, b: 0.1348 81 | loss: 0.0008, W: 0.3152, b: 0.1347 82 | loss: 0.0018, W: 0.3163, b: 0.1348 83 | loss: 0.0073, W: 0.3180, b: 0.1351 84 | loss: 0.0006, W: 0.3172, b: 0.1351 85 | loss: 0.0010, W: 0.3175, b: 0.1352 86 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGD/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | learning_rate = 0.01 17 | X = tf.placeholder(tf.float32) 18 | Y = tf.placeholder(tf.float32) 19 | 20 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 22 | 23 | pred = tf.add(tf.multiply(X,W), b) 24 | 25 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 26 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost, name = "SGDOptimizer") 27 | 28 | init = tf.global_variables_initializer() 29 | with tf.Session() as session: 30 | session.run(init) 31 | for e in range(5): 32 | for i in range(n_samples): 33 | _, cost_v, W_v, b_v, pred_v = session.run([optimizer, cost, W, b, pred], feed_dict = {X: train_x[i], Y: train_y[i]}) 34 | print(f"loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}") 35 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGDMnist/expected.txt: -------------------------------------------------------------------------------- 1 | Epoch: 0, loss(Cross-Entropy): 0.4056, Accuracy:0.8808 2 | Epoch: 1, loss(Cross-Entropy): 0.1885, Accuracy:0.9433 3 | Epoch: 2, loss(Cross-Entropy): 0.1397, Accuracy:0.9581 4 | Epoch: 3, loss(Cross-Entropy): 0.1103, Accuracy:0.9666 5 | Epoch: 4, loss(Cross-Entropy): 0.0892, Accuracy:0.9734 -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGDMnist/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | # 4 | # NOTE: This script is not used to generate the expected.txt file in this case 5 | # because of the tf.train.MomentumOptimizer implemention difference with decay. 6 | # The expected.txt is actually the output from the test itself. 7 | 8 | import tensorflow as tf 9 | from keras.utils.np_utils import to_categorical 10 | import math 11 | import tensorflow as tf 12 | mnist = tf.keras.datasets.mnist 13 | 14 | (x_train, y_train),(x_test, y_test) = mnist.load_data() 15 | x_train, x_test = x_train / 255.0, x_test / 255.0 16 | 17 | x_train = x_train.reshape((-1,784)) 18 | x_test = x_test.reshape((-1,784)) 19 | 20 | y_train = to_categorical(y_train, num_classes=10) 21 | y_test = to_categorical(y_test, num_classes=10) 22 | 23 | n_samples = len(x_train) 24 | learning_rate = 0.1 25 | X = tf.placeholder(tf.float32, shape=[None, 784]) 26 | Y = tf.placeholder(tf.float32, shape=[None, 10]) 27 | 28 | tf.set_random_seed(1) 29 | initB = 4 * math.sqrt(6) / math.sqrt(784 + 500) 30 | W1 = tf.Variable(tf.random_uniform([x_train.shape[1], 500], minval=-initB, maxval=initB)) 31 | b1 = tf.Variable(tf.constant(0., shape=[500], dtype=tf.float32)) 32 | layer1 = tf.nn.sigmoid(tf.add(tf.matmul(X,W1), b1)) 33 | 34 | initB = 4 * math.sqrt(6) / math.sqrt(500 + 100) 35 | W2 = tf.Variable(tf.random_uniform([500, 100], minval=-initB, maxval=initB)) 36 | b2 = tf.Variable(tf.constant(0., shape=[100], dtype=tf.float32)) 37 | layer2 = tf.nn.sigmoid(tf.add(tf.matmul(layer1,W2), b2)) 38 | 39 | initB = 4 * math.sqrt(6) / math.sqrt(100 + 10) 40 | W3 = tf.Variable(tf.random_uniform([100, 10], minval=-initB, maxval=initB)) 41 | b3 = tf.Variable(tf.constant(0., shape=[10], dtype=tf.float32)) 42 | layer3 = tf.add(tf.matmul(layer2,W3), b3) 43 | 44 | cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=layer3)) 45 | optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(cost, name = "SGDOptimizer") 46 | 47 | prediction = tf.nn.softmax(layer3, name = "Prediction") 48 | accuracy = tf.reduce_mean( tf.cast(tf.equal( tf.argmax(prediction,1), tf.argmax(Y, 1)), tf.float32), name = "Accuracy") 49 | 50 | init = tf.global_variables_initializer() 51 | with tf.Session() as sess: 52 | sess.run(init) 53 | batch_size =100 54 | total_batch = int(x_train.shape[0] / batch_size) 55 | for epoch in range(5): 56 | avg_loss = 0 57 | avg_acc = 0 58 | for batch_idx in range(0, x_train.shape[0], batch_size): 59 | X_batch = x_train[batch_idx:batch_idx+batch_size] 60 | Y_batch = y_train[batch_idx:batch_idx+batch_size] 61 | _, loss_val, acc = sess.run([optimizer, cost, accuracy], feed_dict={X: X_batch, Y: Y_batch}) 62 | avg_loss += loss_val / total_batch 63 | avg_acc += acc / total_batch 64 | print('Epoch: ', '%04d' % (epoch+1), 'cost (cross-entropy) = %.4f , acc = %.4f' % (avg_loss, avg_acc)) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGDMnistGPU/expected.txt: -------------------------------------------------------------------------------- 1 | Epoch: 0, loss(Cross-Entropy): 0.7236, Accuracy:0.7956 2 | Epoch: 1, loss(Cross-Entropy): 0.2806, Accuracy:0.9170 3 | Epoch: 2, loss(Cross-Entropy): 0.2276, Accuracy:0.9331 4 | Epoch: 3, loss(Cross-Entropy): 0.1957, Accuracy:0.9425 5 | Epoch: 4, loss(Cross-Entropy): 0.1730, Accuracy:0.9492 -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestData/SGDTimeDecay/optimizer_lr_test.py: -------------------------------------------------------------------------------- 1 | # This script is used to create data file (expected.txt) 2 | # which is used to compare the output from TensorFlowSharp optimizer tests. 3 | 4 | import tensorflow as tf 5 | 6 | # Training data 7 | train_x =[ 8 | 3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 9 | 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1 10 | ] 11 | train_y = [ 12 | 1.7, 2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 13 | 2.827,3.465,1.65,2.904,2.42,2.94,1.3 14 | ] 15 | n_samples = len(train_x) 16 | X = tf.placeholder(tf.float32) 17 | Y = tf.placeholder(tf.float32) 18 | 19 | W = tf.Variable(tf.constant(0.1), dtype=tf.float32) 20 | b = tf.Variable(tf.constant(0.1), dtype=tf.float32) 21 | 22 | pred = tf.add(tf.multiply(X,W), b) 23 | 24 | global_step = tf.Variable(0, trainable=False) 25 | learning_rate = 0.01 26 | decay_rate = 0.5 27 | decayed_learning_rate = learning_rate * (1. / (1. + decay_rate * tf.cast(global_step, tf.float32))) 28 | 29 | cost = tf.divide(tf.reduce_sum(tf.pow(tf.subtract(pred, Y), 2.0)), tf.multiply(2.0, n_samples)) 30 | optimizer = tf.train.GradientDescentOptimizer(decayed_learning_rate).minimize(cost, global_step=global_step, name = "SGDOptimizer") 31 | 32 | init = tf.global_variables_initializer() 33 | with tf.Session() as session: 34 | session.run(init) 35 | for e in range(5): 36 | for i in range(n_samples): 37 | _, cost_v, W_v, b_v, pred_v, lr_v, step_v = session.run([optimizer, cost, W, b, pred, decayed_learning_rate, global_step], feed_dict = {X: train_x[i], Y: train_y[i]}) 38 | print(f"step: {step_v:d}, loss: {cost_v:.4f}, W: {W_v:.4f}, b: {b_v:.4f}, lr: {lr_v:.8f}") 39 | #print("Prediction: %f == Actual: %f" % (pred_v, train_y[i])) -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/TestUtils.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections.Generic; 3 | using System.Linq; 4 | using System.Text; 5 | using System.Threading.Tasks; 6 | using Xunit; 7 | 8 | namespace TensorFlowSharp.Tests.CSharp 9 | { 10 | public static class TestUtils 11 | { 12 | public static void MatrixEqual(double[,] expected, double[,] actual, int precision) 13 | { 14 | for (int i = 0; i < expected.GetLength(0); i++) 15 | for (int j = 0; j < expected.GetLength(1); j++) 16 | Assert.Equal(expected[i, j], actual[i, j], precision: precision); 17 | } 18 | 19 | public static void MatrixEqual(Array expected, Array actual, int precision) 20 | { 21 | Assert.Equal(expected.Length, actual.Length); 22 | Assert.Equal(expected.Rank, actual.Rank); 23 | Assert.Equal(expected.GetType(), actual.GetType()); 24 | 25 | var ei = expected.GetEnumerator(); 26 | var ai = actual.GetEnumerator(); 27 | 28 | var expectedType = expected.GetType().GetElementType(); 29 | 30 | if (expectedType == typeof(double)) 31 | { 32 | while (ei.MoveNext() && ai.MoveNext()) 33 | Assert.Equal((double)ei.Current, (double)ai.Current, precision: 8); 34 | } 35 | else if (expectedType == typeof(float)) 36 | { 37 | while (ei.MoveNext() && ai.MoveNext()) 38 | Assert.Equal((float)ei.Current, (float)ai.Current, precision: 8); 39 | } 40 | else 41 | { 42 | while (ei.MoveNext() && ai.MoveNext()) 43 | Assert.True(Object.Equals(ei.Current, ai.Current)); 44 | } 45 | } 46 | 47 | public static void MatrixEqual (object expected, object actual, int precision) 48 | { 49 | if (expected is Array) { 50 | MatrixEqual (expected as Array, actual as Array, precision); 51 | return; 52 | } 53 | var expectedType = expected.GetType (); 54 | 55 | if (expectedType == typeof (double)) { 56 | Assert.Equal ((double)expected, (double)actual, precision: precision); 57 | } else if (expectedType == typeof (float)) { 58 | Assert.Equal ((float)expected, (float)actual, precision: precision); 59 | } else if (expectedType == typeof (int)) { 60 | Assert.Equal ((int)expected, (int)actual); 61 | } else { 62 | Assert.True (Object.Equals (expected, actual)); 63 | } 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/VariableTests.cs: -------------------------------------------------------------------------------- 1 | using System; 2 | using System.Collections; 3 | using System.Collections.Generic; 4 | using TensorFlow; 5 | using Xunit; 6 | 7 | 8 | namespace TensorFlowSharp.Tests.CSharp 9 | { 10 | public class VariableTests 11 | { 12 | [Fact] 13 | public void ShouldNotShareVariablesSameTypeNoName() 14 | { 15 | using (var graph = new TFGraph()) 16 | { 17 | var v1 = graph.Variable(graph.Const(0.5f)); 18 | var v2 = graph.Variable(graph.Const(0.6f)); 19 | 20 | using (var session = new TFSession(graph)) 21 | { 22 | session.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run(); 23 | var result = session.GetRunner().Fetch(v1.Read, v2.Read).Run(); 24 | Assert.NotEqual(result[0].GetValue(), result[1].GetValue()); 25 | } 26 | } 27 | } 28 | [Fact] 29 | public void ShouldNotShareVariablesSameType() 30 | { 31 | using (var graph = new TFGraph()) 32 | { 33 | var v1 = graph.Variable(graph.Const(0.5f), operName: "v1"); 34 | var v2 = graph.Variable(graph.Const(0.6f), operName: "v2"); 35 | 36 | using (var session = new TFSession(graph)) 37 | { 38 | session.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run(); 39 | var result = session.GetRunner().Fetch(v1.Read, v2.Read).Run(); 40 | Assert.NotEqual(result[0].GetValue(), result[1].GetValue()); 41 | } 42 | } 43 | } 44 | 45 | [Fact] 46 | public void ShouldNotShareVariablesDifferentTypeNoName() 47 | { 48 | using (var graph = new TFGraph()) 49 | { 50 | var v1 = graph.Variable(graph.Const(0.5f)); 51 | var v2 = graph.Variable(graph.Const(0L)); 52 | 53 | using (var session = new TFSession(graph)) 54 | { 55 | session.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run(); 56 | var result = session.GetRunner().Fetch(v1.Read, v2.Read).Run(); 57 | Assert.NotEqual(result[0].TensorType, result[1].TensorType); 58 | } 59 | } 60 | } 61 | [Fact] 62 | public void ShouldNotShareVariablesDifferentType() 63 | { 64 | using (var graph = new TFGraph()) 65 | { 66 | var v1 = graph.Variable(graph.Const(0.5f), operName: "v1"); 67 | var v2 = graph.Variable(graph.Const(0L), operName: "v2"); 68 | 69 | using (var session = new TFSession(graph)) 70 | { 71 | session.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run(); 72 | var result = session.GetRunner().Fetch(v1.Read, v2.Read).Run(); 73 | Assert.NotEqual(result[0].TensorType, result[1].TensorType); 74 | } 75 | } 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests.CSharp/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests/ArithmeticOperationTests.fs: -------------------------------------------------------------------------------- 1 | namespace TensorFlowSharp.Tests 2 | 3 | open TensorFlow 4 | open Xunit 5 | 6 | module ArithmeticOperationTests = 7 | 8 | [] 9 | [] 10 | [] 11 | let Should_EvaluateMultiplyExpression_ForFloatDataType(aValue:float32 , bValue:float32, expected:float32) = 12 | use graph = new TFGraph() 13 | use session = new TFSession(graph) 14 | 15 | let a = graph.Placeholder(TFDataType.Float) // create symbolic variable a 16 | let b = graph.Placeholder(TFDataType.Float) // create symbolic variable b 17 | 18 | let y = graph.Mul(a, b) // multiply symbolic variables 19 | 20 | // evaluate expression with parameters for a and b 21 | let mul = 22 | session.Run([| a; b |], 23 | [| new TFTensor(aValue); new TFTensor(bValue) |], 24 | [| y |]) 25 | 26 | let mulTensor = mul.[0] 27 | let mulValue = mulTensor.GetValue() :?> float32 28 | 29 | Assert.Equal(expected, mulValue) 30 | 31 | 32 | [] 33 | let Should_EvaluateAddExpression_ForFloatDataType() = 34 | use graph = new TFGraph() 35 | use session = new TFSession(graph) 36 | 37 | let a = graph.Placeholder(TFDataType.Float) // create symbolic variable a 38 | let b = graph.Placeholder(TFDataType.Float) // create symbolic variable b 39 | 40 | let y = graph.Mul(a, b) // multiply symbolic variables 41 | 42 | let aValue = Array2D.init 40 20 (fun i j -> i) 43 | let bValue = Array2D.init 40 20 (fun i j -> i + j) 44 | let sValue = Array2D.init 40 20 (fun i j -> 2*i + j) 45 | 46 | 47 | // evaluate expression with parameters for a and b 48 | let add = 49 | session.Run([| a; b |], 50 | [| new TFTensor(aValue); new TFTensor(bValue) |], 51 | [| y |]) 52 | 53 | let addTensor = add.[0] 54 | let addValue = addTensor.GetValue() :?> int[,] 55 | let it = sValue = addValue : bool 56 | Assert.True(it); 57 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests/AssemblyInfo.fs: -------------------------------------------------------------------------------- 1 | namespace TensorFlowSharp.Tests.AssemblyInfo 2 | 3 | open System.Reflection 4 | open System.Runtime.CompilerServices 5 | open System.Runtime.InteropServices 6 | 7 | // General Information about an assembly is controlled through the following 8 | // set of attributes. Change these attribute values to modify the information 9 | // associated with an assembly. 10 | [] 11 | [] 12 | [] 13 | [] 14 | [] 15 | [] 16 | [] 17 | [] 18 | 19 | // Setting ComVisible to false makes the types in this assembly not visible 20 | // to COM components. If you need to access a type in this assembly from 21 | // COM, set the ComVisible attribute to true on that type. 22 | [] 23 | 24 | // The following GUID is for the ID of the typelib if this project is exposed to COM 25 | [] 26 | 27 | // Version information for an assembly consists of the following four values: 28 | // 29 | // Major Version 30 | // Minor Version 31 | // Build Number 32 | // Revision 33 | // 34 | // You can specify all the values or you can default the Build and Revision Numbers 35 | // by using the '*' as shown below: 36 | // [] 37 | [] 38 | [] 39 | 40 | do 41 | () -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests/NeuralNetOperationTests.fs: -------------------------------------------------------------------------------- 1 | namespace TensorFlowSharp.Tests 2 | 3 | open TensorFlow 4 | open Xunit 5 | 6 | module NeuralNetOperationTests = 7 | 8 | [] 9 | [] 10 | [] 11 | [] 12 | let Should_ApplyDropout_ForFloatDataType(keep_prob:float32) = 13 | use graph = new TFGraph() 14 | use session = new TFSession(graph) 15 | 16 | let inputs = Array2D.init 4000 200 (fun i j -> float32(i+j+1)) 17 | 18 | let a = graph.Placeholder(TFDataType.Float, new TFShape(4000L, 200L)) // create symbolic variable x 19 | let b = graph.Placeholder(TFDataType.Float, new TFShape(4000L, 200L)) // create symbolic variable keep_prob 20 | 21 | let y = graph.Dropout(a, b) // apply dropout to symbolic variables 22 | 23 | // evaluate expression with parameters for a and b 24 | let res = 25 | session.Run([| a; b |], 26 | [| new TFTensor(inputs); new TFTensor(keep_prob) |], 27 | [| y |]) 28 | 29 | let resTensor = res.[0] 30 | let resValue = resTensor.GetValue() :?> float32[,] 31 | 32 | let countZeros arr = arr |> Seq.cast |> Seq.filter (fun x -> x = 0.0f) |> Seq.length 33 | let numberOfOnes = countZeros resValue |> float32; 34 | let totalLength = resValue |> Seq.cast |> Seq.length |> float32 35 | let actualRatio = numberOfOnes / totalLength; 36 | let expected = 1.0f - keep_prob : float32 37 | 38 | Assert.True(System.Math.Abs(expected - actualRatio) < 0.05f) 39 | -------------------------------------------------------------------------------- /tests/TensorFlowSharp.Tests/packages.config: -------------------------------------------------------------------------------- 1 |  2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | --------------------------------------------------------------------------------